hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f715ed9bba993419b3f1d384d817b622367f47e1
| 2,536
|
py
|
Python
|
pxlc/qt/DropDownSelectMenu.py
|
pxlc/pxlc_td
|
44d08dd9e9a9595449005f3446536e7a02113c95
|
[
"MIT"
] | 2
|
2020-10-06T22:56:10.000Z
|
2022-03-07T04:13:47.000Z
|
pxlc/qt/DropDownSelectMenu.py
|
pxlc/pxlc_td
|
44d08dd9e9a9595449005f3446536e7a02113c95
|
[
"MIT"
] | null | null | null |
pxlc/qt/DropDownSelectMenu.py
|
pxlc/pxlc_td
|
44d08dd9e9a9595449005f3446536e7a02113c95
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2018 pxlc@github
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -------------------------------------------------------------------------------
from PySide import QtCore, QtGui
from .cb import connect_callback # local import
__INFO__ = '''
item list:
[
{
'label': 'Menu label',
'select_data': 'any type, returned if item is selected',
'style': 'style sheet string (optional)',
}
]
'''
class DropDownSelectMenu(QtGui.QComboBox):
def __init__(self, item_list=[], parent=None):
super(DropDownSelectMenu, self).__init__(parent=parent)
self.item_list = item_list[:]
def clear_all_items(self):
while self.count() > 0:
self.removeItem(0)
def load_items(self, item_list):
self.clear_all_items()
self.item_list = item_list[:]
for item in self.item_list:
label = item.get('label','')
self.addItem(label)
self.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)
def set_index_changed_callback(self, index_changed_cb_fn):
connect_callback(self.currentIndexChanged, index_changed_cb_fn, {'wdg': self}, containing_obj=self)
def get_current_item(self):
curr_idx = self.currentIndex()
if curr_idx >= 0 and curr_idx < len(self.item_list):
return self.item_list[curr_idx]
return None
| 32.101266
| 107
| 0.651025
|
from PySide import QtCore, QtGui
from .cb import connect_callback
__INFO__ = '''
item list:
[
{
'label': 'Menu label',
'select_data': 'any type, returned if item is selected',
'style': 'style sheet string (optional)',
}
]
'''
class DropDownSelectMenu(QtGui.QComboBox):
def __init__(self, item_list=[], parent=None):
super(DropDownSelectMenu, self).__init__(parent=parent)
self.item_list = item_list[:]
def clear_all_items(self):
while self.count() > 0:
self.removeItem(0)
def load_items(self, item_list):
self.clear_all_items()
self.item_list = item_list[:]
for item in self.item_list:
label = item.get('label','')
self.addItem(label)
self.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)
def set_index_changed_callback(self, index_changed_cb_fn):
connect_callback(self.currentIndexChanged, index_changed_cb_fn, {'wdg': self}, containing_obj=self)
def get_current_item(self):
curr_idx = self.currentIndex()
if curr_idx >= 0 and curr_idx < len(self.item_list):
return self.item_list[curr_idx]
return None
| true
| true
|
f715edafdd23569ac05010564563d3ff065388fb
| 8,828
|
py
|
Python
|
docs/source/conf.py
|
andrewseidl/ibis
|
1468b8c4f96d9d58f6fa147a2579b0d9e5796186
|
[
"Apache-2.0"
] | null | null | null |
docs/source/conf.py
|
andrewseidl/ibis
|
1468b8c4f96d9d58f6fa147a2579b0d9e5796186
|
[
"Apache-2.0"
] | 6
|
2017-05-18T19:49:09.000Z
|
2019-03-27T15:37:14.000Z
|
docs/source/conf.py
|
andrewseidl/ibis
|
1468b8c4f96d9d58f6fa147a2579b0d9e5796186
|
[
"Apache-2.0"
] | 1
|
2017-06-26T15:43:35.000Z
|
2017-06-26T15:43:35.000Z
|
# -*- coding: utf-8 -*-
#
# Ibis documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 10 11:06:29 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import glob
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.mathjax',
'numpydoc',
'nbsphinx',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
]
autosummary_generate = glob.glob("*.rst")
# autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Ibis'
copyright = '{}, Ibis Developers'.format(datetime.date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.2'
from ibis import __version__ as version # noqa: E402
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme # noqa: E402
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo-wide.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ibisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Ibis.tex', 'Ibis Documentation', 'Ibis Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# extlinks alias
extlinks = {'issue': ('https://github.com/ibis-project/ibis/issues/%s', '#')}
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ibis', 'Ibis Documentation',
['Ibis Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Ibis', 'Ibis Documentation',
'Ibis Developers', 'Ibis', 'Pandas-like expressions for analytics',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| 31.194346
| 79
| 0.715224
|
import glob
import datetime
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.mathjax',
'numpydoc',
'nbsphinx',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
]
autosummary_generate = glob.glob("*.rst")
numpydoc_show_class_members = False
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'Ibis'
copyright = '{}, Ibis Developers'.format(datetime.date.today().year)
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.2'
from ibis import __version__ as version # noqa: E402
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme # noqa: E402
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo-wide.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ibisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Ibis.tex', 'Ibis Documentation', 'Ibis Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# extlinks alias
extlinks = {'issue': ('https://github.com/ibis-project/ibis/issues/%s', '
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ibis', 'Ibis Documentation',
['Ibis Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Ibis', 'Ibis Documentation',
'Ibis Developers', 'Ibis', 'Pandas-like expressions for analytics',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true
| true
|
f715ee5b393ad823887100ea16fc12a89479e531
| 1,091
|
py
|
Python
|
masakari-7.0.0/masakari/tests/uuidsentinel.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 70
|
2016-07-22T21:58:00.000Z
|
2022-01-04T06:05:32.000Z
|
masakari-7.0.0/masakari/tests/uuidsentinel.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
masakari-7.0.0/masakari/tests/uuidsentinel.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 33
|
2016-07-05T02:05:25.000Z
|
2021-12-20T07:40:43.000Z
|
# Copyright 2016 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
class UUIDSentinels(object):
def __init__(self):
from oslo_utils import uuidutils
self._uuid_module = uuidutils
self._sentinels = {}
def __getattr__(self, name):
if name.startswith('_'):
raise ValueError('Sentinels must not start with _')
if name not in self._sentinels:
self._sentinels[name] = self._uuid_module.generate_uuid()
return self._sentinels[name]
sys.modules[__name__] = UUIDSentinels()
| 32.088235
| 75
| 0.711274
|
import sys
class UUIDSentinels(object):
def __init__(self):
from oslo_utils import uuidutils
self._uuid_module = uuidutils
self._sentinels = {}
def __getattr__(self, name):
if name.startswith('_'):
raise ValueError('Sentinels must not start with _')
if name not in self._sentinels:
self._sentinels[name] = self._uuid_module.generate_uuid()
return self._sentinels[name]
sys.modules[__name__] = UUIDSentinels()
| true
| true
|
f715ee8ea645eac7275a181386fd2444e7fa7fa0
| 5,420
|
py
|
Python
|
sdk/python/pulumi_azure_native/machinelearningservices/v20200515preview/get_aks_service.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/machinelearningservices/v20200515preview/get_aks_service.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/machinelearningservices/v20200515preview/get_aks_service.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAKSServiceResult',
'AwaitableGetAKSServiceResult',
'get_aks_service',
]
@pulumi.output_type
class GetAKSServiceResult:
"""
Machine Learning service object wrapped into ARM resource envelope.
"""
def __init__(__self__, id=None, identity=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Specifies the resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Service properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetAKSServiceResult(GetAKSServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAKSServiceResult(
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_aks_service(expand: Optional[bool] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAKSServiceResult:
"""
Machine Learning service object wrapped into ARM resource envelope.
:param bool expand: Set to True to include Model details.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str service_name: Name of the Azure Machine Learning service.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20200515preview:getAKSService', __args__, opts=opts, typ=GetAKSServiceResult).value
return AwaitableGetAKSServiceResult(
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 33.04878
| 158
| 0.62583
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAKSServiceResult',
'AwaitableGetAKSServiceResult',
'get_aks_service',
]
@pulumi.output_type
class GetAKSServiceResult:
def __init__(__self__, id=None, identity=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetAKSServiceResult(GetAKSServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAKSServiceResult(
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_aks_service(expand: Optional[bool] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAKSServiceResult:
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20200515preview:getAKSService', __args__, opts=opts, typ=GetAKSServiceResult).value
return AwaitableGetAKSServiceResult(
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| true
| true
|
f715f0635c3ef7697ec0cfe38a3e89fa3c316e5f
| 735
|
py
|
Python
|
awx/main/migrations/0016_v330_non_blank_workflow.py
|
james-crowley/awx
|
5cd44cde991a9526810809544e7a8f12e6174711
|
[
"Apache-2.0"
] | 1
|
2021-12-27T14:33:10.000Z
|
2021-12-27T14:33:10.000Z
|
awx/main/migrations/0016_v330_non_blank_workflow.py
|
james-crowley/awx
|
5cd44cde991a9526810809544e7a8f12e6174711
|
[
"Apache-2.0"
] | 35
|
2021-03-01T06:34:26.000Z
|
2022-03-01T01:18:42.000Z
|
awx/main/migrations/0016_v330_non_blank_workflow.py
|
james-crowley/awx
|
5cd44cde991a9526810809544e7a8f12e6174711
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-11 16:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0015_v330_blank_start_args'),
]
operations = [
migrations.AlterField(
model_name='workflowjobtemplatenode',
name='workflow_job_template',
field=models.ForeignKey(
default=None, on_delete=django.db.models.deletion.CASCADE, related_name='workflow_job_template_nodes', to='main.WorkflowJobTemplate'
),
preserve_default=False,
),
]
| 28.269231
| 148
| 0.665306
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0015_v330_blank_start_args'),
]
operations = [
migrations.AlterField(
model_name='workflowjobtemplatenode',
name='workflow_job_template',
field=models.ForeignKey(
default=None, on_delete=django.db.models.deletion.CASCADE, related_name='workflow_job_template_nodes', to='main.WorkflowJobTemplate'
),
preserve_default=False,
),
]
| true
| true
|
f715f097181f6d815bcda6fe2acd64e76df19463
| 8,577
|
py
|
Python
|
models_all_solvable2/fac2.py
|
grossmann-group/pyomo-MINLP-benchmarking
|
714f0a0dffd61675649a805683c0627af6b4929e
|
[
"MIT"
] | 7
|
2019-05-08T19:14:34.000Z
|
2021-12-24T00:00:40.000Z
|
models_all_solvable2/fac2.py
|
grossmann-group/pyomo-MINLP-benchmarking
|
714f0a0dffd61675649a805683c0627af6b4929e
|
[
"MIT"
] | null | null | null |
models_all_solvable2/fac2.py
|
grossmann-group/pyomo-MINLP-benchmarking
|
714f0a0dffd61675649a805683c0627af6b4929e
|
[
"MIT"
] | 2
|
2020-05-21T22:15:51.000Z
|
2020-06-02T23:02:08.000Z
|
# MINLP written by GAMS Convert at 05/15/20 00:50:46
#
# Equation counts
# Total E G L N X C B
# 34 22 3 9 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 67 55 12 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 217 163 54 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.b55 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b56 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b57 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b58 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b59 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b60 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr=276.28*(m.x1 + m.x2 + m.x3 + m.x4 + m.x5 + m.x6 + m.x19 + m.x20 + m.x21 + m.x22 + m.x23 + m.x24
+ m.x37 + m.x38 + m.x39 + m.x40 + m.x41 + m.x42)**2.5 + 792.912*(m.x7 + m.x8 + m.x9 + m.x10 +
m.x11 + m.x12 + m.x25 + m.x26 + m.x27 + m.x28 + m.x29 + m.x30 + m.x43 + m.x44 + m.x45 + m.x46 +
m.x47 + m.x48)**2.5 + 991.679*(m.x13 + m.x14 + m.x15 + m.x16 + m.x17 + m.x18 + m.x31 + m.x32 +
m.x33 + m.x34 + m.x35 + m.x36 + m.x49 + m.x50 + m.x51 + m.x52 + m.x53 + m.x54)**2.5 + 115.274*
m.x1 + 98.5559*m.x2 + 142.777*m.x3 + 33.9886*m.x4 + 163.087*m.x5 + 10.4376*m.x6 + 234.406*m.x7 +
142.066*m.x8 + 50.6436*m.x9 + 123.61*m.x10 + 242.356*m.x11 + 135.071*m.x12 + 10.7347*m.x13 +
56.0272*m.x14 + 14.912*m.x15 + 169.218*m.x16 + 209.028*m.x17 + 259.29*m.x18 + 165.41*m.x19 +
40.7497*m.x20 + 124.907*m.x21 + 18.495*m.x22 + 95.2789*m.x23 + 251.899*m.x24 + 114.185*m.x25 +
37.8148*m.x26 + 10.5547*m.x27 + 52.5162*m.x28 + 37.4727*m.x29 + 254.843*m.x30 + 266.645*m.x31 +
136.583*m.x32 + 15.092*m.x33 + 194.101*m.x34 + 78.768*m.x35 + 120.36*m.x36 + 257.318*m.x37 +
172.747*m.x38 + 142.813*m.x39 + 251.331*m.x40 + 15.9113*m.x41 + 48.8251*m.x42 + 289.116*m.x43 +
129.705*m.x44 + 275.621*m.x45 + 20.2235*m.x46 + 253.789*m.x47 + 56.7474*m.x48 + 201.646*m.x49 +
164.573*m.x50 + 295.157*m.x51 + 151.474*m.x52 + 221.794*m.x53 + 278.304*m.x54 + 2481400*m.b64
+ 2156460*m.b65 + 2097730*m.b66, sense=minimize)
m.c2 = Constraint(expr= m.x1 + m.x3 + m.x5 + m.x7 + m.x9 + m.x11 + m.x13 + m.x15 + m.x17 <= 60)
m.c3 = Constraint(expr= m.x2 + m.x4 + m.x6 + m.x8 + m.x10 + m.x12 + m.x14 + m.x16 + m.x18 <= 60)
m.c4 = Constraint(expr= m.x19 + m.x21 + m.x23 + m.x25 + m.x27 + m.x29 + m.x31 + m.x33 + m.x35 <= 60)
m.c5 = Constraint(expr= m.x20 + m.x22 + m.x24 + m.x26 + m.x28 + m.x30 + m.x32 + m.x34 + m.x36 <= 60)
m.c6 = Constraint(expr= m.x37 + m.x39 + m.x41 + m.x43 + m.x45 + m.x47 + m.x49 + m.x51 + m.x53 <= 60)
m.c7 = Constraint(expr= m.x38 + m.x40 + m.x42 + m.x44 + m.x46 + m.x48 + m.x50 + m.x52 + m.x54 <= 60)
m.c8 = Constraint(expr= m.x1 + m.x19 + m.x37 - 60*m.b55 == 0)
m.c9 = Constraint(expr= m.x2 + m.x20 + m.x38 - 60*m.b55 == 0)
m.c10 = Constraint(expr= m.x3 + m.x21 + m.x39 - 60*m.b56 == 0)
m.c11 = Constraint(expr= m.x4 + m.x22 + m.x40 - 60*m.b56 == 0)
m.c12 = Constraint(expr= m.x5 + m.x23 + m.x41 - 60*m.b57 == 0)
m.c13 = Constraint(expr= m.x6 + m.x24 + m.x42 - 60*m.b57 == 0)
m.c14 = Constraint(expr= m.x7 + m.x25 + m.x43 - 60*m.b58 == 0)
m.c15 = Constraint(expr= m.x8 + m.x26 + m.x44 - 60*m.b58 == 0)
m.c16 = Constraint(expr= m.x9 + m.x27 + m.x45 - 60*m.b59 == 0)
m.c17 = Constraint(expr= m.x10 + m.x28 + m.x46 - 60*m.b59 == 0)
m.c18 = Constraint(expr= m.x11 + m.x29 + m.x47 - 60*m.b60 == 0)
m.c19 = Constraint(expr= m.x12 + m.x30 + m.x48 - 60*m.b60 == 0)
m.c20 = Constraint(expr= m.x13 + m.x31 + m.x49 - 60*m.b61 == 0)
m.c21 = Constraint(expr= m.x14 + m.x32 + m.x50 - 60*m.b61 == 0)
m.c22 = Constraint(expr= m.x15 + m.x33 + m.x51 - 60*m.b62 == 0)
m.c23 = Constraint(expr= m.x16 + m.x34 + m.x52 - 60*m.b62 == 0)
m.c24 = Constraint(expr= m.x17 + m.x35 + m.x53 - 60*m.b63 == 0)
m.c25 = Constraint(expr= m.x18 + m.x36 + m.x54 - 60*m.b63 == 0)
m.c26 = Constraint(expr= 120*m.b55 + 120*m.b56 + 120*m.b57 - 2749.5*m.b64 <= 0)
m.c27 = Constraint(expr= 120*m.b58 + 120*m.b59 + 120*m.b60 - 2872.94*m.b65 <= 0)
m.c28 = Constraint(expr= 120*m.b61 + 120*m.b62 + 120*m.b63 - 2508.06*m.b66 <= 0)
m.c29 = Constraint(expr= 120*m.b55 + 120*m.b56 + 120*m.b57 - 50*m.b64 >= 0)
m.c30 = Constraint(expr= 120*m.b58 + 120*m.b59 + 120*m.b60 - 50*m.b65 >= 0)
m.c31 = Constraint(expr= 120*m.b61 + 120*m.b62 + 120*m.b63 - 50*m.b66 >= 0)
m.c32 = Constraint(expr= m.b55 + m.b58 + m.b61 == 1)
m.c33 = Constraint(expr= m.b56 + m.b59 + m.b62 == 1)
m.c34 = Constraint(expr= m.b57 + m.b60 + m.b63 == 1)
| 49.578035
| 120
| 0.5903
|
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,1000),initialize=0)
m.b55 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b56 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b57 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b58 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b59 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b60 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr=276.28*(m.x1 + m.x2 + m.x3 + m.x4 + m.x5 + m.x6 + m.x19 + m.x20 + m.x21 + m.x22 + m.x23 + m.x24
+ m.x37 + m.x38 + m.x39 + m.x40 + m.x41 + m.x42)**2.5 + 792.912*(m.x7 + m.x8 + m.x9 + m.x10 +
m.x11 + m.x12 + m.x25 + m.x26 + m.x27 + m.x28 + m.x29 + m.x30 + m.x43 + m.x44 + m.x45 + m.x46 +
m.x47 + m.x48)**2.5 + 991.679*(m.x13 + m.x14 + m.x15 + m.x16 + m.x17 + m.x18 + m.x31 + m.x32 +
m.x33 + m.x34 + m.x35 + m.x36 + m.x49 + m.x50 + m.x51 + m.x52 + m.x53 + m.x54)**2.5 + 115.274*
m.x1 + 98.5559*m.x2 + 142.777*m.x3 + 33.9886*m.x4 + 163.087*m.x5 + 10.4376*m.x6 + 234.406*m.x7 +
142.066*m.x8 + 50.6436*m.x9 + 123.61*m.x10 + 242.356*m.x11 + 135.071*m.x12 + 10.7347*m.x13 +
56.0272*m.x14 + 14.912*m.x15 + 169.218*m.x16 + 209.028*m.x17 + 259.29*m.x18 + 165.41*m.x19 +
40.7497*m.x20 + 124.907*m.x21 + 18.495*m.x22 + 95.2789*m.x23 + 251.899*m.x24 + 114.185*m.x25 +
37.8148*m.x26 + 10.5547*m.x27 + 52.5162*m.x28 + 37.4727*m.x29 + 254.843*m.x30 + 266.645*m.x31 +
136.583*m.x32 + 15.092*m.x33 + 194.101*m.x34 + 78.768*m.x35 + 120.36*m.x36 + 257.318*m.x37 +
172.747*m.x38 + 142.813*m.x39 + 251.331*m.x40 + 15.9113*m.x41 + 48.8251*m.x42 + 289.116*m.x43 +
129.705*m.x44 + 275.621*m.x45 + 20.2235*m.x46 + 253.789*m.x47 + 56.7474*m.x48 + 201.646*m.x49 +
164.573*m.x50 + 295.157*m.x51 + 151.474*m.x52 + 221.794*m.x53 + 278.304*m.x54 + 2481400*m.b64
+ 2156460*m.b65 + 2097730*m.b66, sense=minimize)
m.c2 = Constraint(expr= m.x1 + m.x3 + m.x5 + m.x7 + m.x9 + m.x11 + m.x13 + m.x15 + m.x17 <= 60)
m.c3 = Constraint(expr= m.x2 + m.x4 + m.x6 + m.x8 + m.x10 + m.x12 + m.x14 + m.x16 + m.x18 <= 60)
m.c4 = Constraint(expr= m.x19 + m.x21 + m.x23 + m.x25 + m.x27 + m.x29 + m.x31 + m.x33 + m.x35 <= 60)
m.c5 = Constraint(expr= m.x20 + m.x22 + m.x24 + m.x26 + m.x28 + m.x30 + m.x32 + m.x34 + m.x36 <= 60)
m.c6 = Constraint(expr= m.x37 + m.x39 + m.x41 + m.x43 + m.x45 + m.x47 + m.x49 + m.x51 + m.x53 <= 60)
m.c7 = Constraint(expr= m.x38 + m.x40 + m.x42 + m.x44 + m.x46 + m.x48 + m.x50 + m.x52 + m.x54 <= 60)
m.c8 = Constraint(expr= m.x1 + m.x19 + m.x37 - 60*m.b55 == 0)
m.c9 = Constraint(expr= m.x2 + m.x20 + m.x38 - 60*m.b55 == 0)
m.c10 = Constraint(expr= m.x3 + m.x21 + m.x39 - 60*m.b56 == 0)
m.c11 = Constraint(expr= m.x4 + m.x22 + m.x40 - 60*m.b56 == 0)
m.c12 = Constraint(expr= m.x5 + m.x23 + m.x41 - 60*m.b57 == 0)
m.c13 = Constraint(expr= m.x6 + m.x24 + m.x42 - 60*m.b57 == 0)
m.c14 = Constraint(expr= m.x7 + m.x25 + m.x43 - 60*m.b58 == 0)
m.c15 = Constraint(expr= m.x8 + m.x26 + m.x44 - 60*m.b58 == 0)
m.c16 = Constraint(expr= m.x9 + m.x27 + m.x45 - 60*m.b59 == 0)
m.c17 = Constraint(expr= m.x10 + m.x28 + m.x46 - 60*m.b59 == 0)
m.c18 = Constraint(expr= m.x11 + m.x29 + m.x47 - 60*m.b60 == 0)
m.c19 = Constraint(expr= m.x12 + m.x30 + m.x48 - 60*m.b60 == 0)
m.c20 = Constraint(expr= m.x13 + m.x31 + m.x49 - 60*m.b61 == 0)
m.c21 = Constraint(expr= m.x14 + m.x32 + m.x50 - 60*m.b61 == 0)
m.c22 = Constraint(expr= m.x15 + m.x33 + m.x51 - 60*m.b62 == 0)
m.c23 = Constraint(expr= m.x16 + m.x34 + m.x52 - 60*m.b62 == 0)
m.c24 = Constraint(expr= m.x17 + m.x35 + m.x53 - 60*m.b63 == 0)
m.c25 = Constraint(expr= m.x18 + m.x36 + m.x54 - 60*m.b63 == 0)
m.c26 = Constraint(expr= 120*m.b55 + 120*m.b56 + 120*m.b57 - 2749.5*m.b64 <= 0)
m.c27 = Constraint(expr= 120*m.b58 + 120*m.b59 + 120*m.b60 - 2872.94*m.b65 <= 0)
m.c28 = Constraint(expr= 120*m.b61 + 120*m.b62 + 120*m.b63 - 2508.06*m.b66 <= 0)
m.c29 = Constraint(expr= 120*m.b55 + 120*m.b56 + 120*m.b57 - 50*m.b64 >= 0)
m.c30 = Constraint(expr= 120*m.b58 + 120*m.b59 + 120*m.b60 - 50*m.b65 >= 0)
m.c31 = Constraint(expr= 120*m.b61 + 120*m.b62 + 120*m.b63 - 50*m.b66 >= 0)
m.c32 = Constraint(expr= m.b55 + m.b58 + m.b61 == 1)
m.c33 = Constraint(expr= m.b56 + m.b59 + m.b62 == 1)
m.c34 = Constraint(expr= m.b57 + m.b60 + m.b63 == 1)
| true
| true
|
f715f09dd9a1ab89449f85a7df5a818d88fa8086
| 37,361
|
py
|
Python
|
banana/study/mri/dwi.py
|
apoz00003/banana
|
50bf516cc4f7d4d93985e42d0c4dcbc62fb8058a
|
[
"Apache-2.0"
] | null | null | null |
banana/study/mri/dwi.py
|
apoz00003/banana
|
50bf516cc4f7d4d93985e42d0c4dcbc62fb8058a
|
[
"Apache-2.0"
] | null | null | null |
banana/study/mri/dwi.py
|
apoz00003/banana
|
50bf516cc4f7d4d93985e42d0c4dcbc62fb8058a
|
[
"Apache-2.0"
] | null | null | null |
from logging import getLogger
from nipype.interfaces.utility import Merge
from nipype.interfaces.fsl import (
TOPUP, ApplyTOPUP, BET, FUGUE, Merge as FslMerge)
from nipype.interfaces import fsl
from nipype.interfaces.utility import Merge as merge_lists
from nipype.interfaces.fsl.epi import PrepareFieldmap
from nipype.interfaces.mrtrix3 import ResponseSD, Tractography
from nipype.interfaces.mrtrix3.utils import BrainMask, TensorMetrics
from nipype.interfaces.mrtrix3.reconst import FitTensor, EstimateFOD
from banana.interfaces.custom.motion_correction import GenTopupConfigFiles
from banana.interfaces.mrtrix import (
DWIPreproc, MRCat, ExtractDWIorB0, MRMath, DWIBiasCorrect, DWIDenoise,
MRCalc, DWIIntensityNorm, AverageResponse, DWI2Mask)
# from nipype.workflows.dwi.fsl.tbss import create_tbss_all
# from banana.interfaces.noddi import (
# CreateROI, BatchNODDIFitting, SaveParamsAsNIfTI)
from nipype.interfaces import fsl, mrtrix3, utility
from arcana.utils.interfaces import MergeTuple, Chain
from arcana.data import FilesetSpec, InputFilesetSpec
from arcana.utils.interfaces import SelectSession
from arcana.study import ParamSpec, SwitchSpec
from arcana.exceptions import ArcanaMissingDataException, ArcanaNameError
from banana.requirement import (
fsl_req, mrtrix_req, ants_req)
from banana.interfaces.mrtrix import MRConvert, ExtractFSLGradients
from banana.study import StudyMetaClass
from banana.interfaces.custom.motion_correction import (
PrepareDWI, AffineMatrixGeneration)
from banana.interfaces.custom.dwi import TransformGradients
from banana.interfaces.utility import AppendPath
from banana.study.base import Study
from banana.bids_ import BidsInputs, BidsAssocInputs
from banana.exceptions import BananaUsageError
from banana.citation import (
mrtrix_cite, fsl_cite, eddy_cite, topup_cite, distort_correct_cite,
n4_cite, dwidenoise_cites)
from banana.file_format import (
mrtrix_image_format, nifti_gz_format, nifti_gz_x_format, fsl_bvecs_format,
fsl_bvals_format, text_format, dicom_format, eddy_par_format,
mrtrix_track_format, motion_mats_format, text_matrix_format,
directory_format, csv_format, zip_format, STD_IMAGE_FORMATS)
from .base import MriStudy
from .epi import EpiSeriesStudy, EpiStudy
logger = getLogger('banana')
class DwiStudy(EpiSeriesStudy, metaclass=StudyMetaClass):
desc = "Diffusion-weighted MRI contrast"
add_data_specs = [
InputFilesetSpec('anat_5tt', mrtrix_image_format,
desc=("A co-registered segmentation image taken from "
"freesurfer output and simplified into 5 tissue"
" types. Used in ACT streamlines tractography"),
optional=True),
InputFilesetSpec('anat_fs_recon_all', zip_format, optional=True,
desc=("Co-registered freesurfer recon-all output. "
"Used in building the connectome")),
InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True),
FilesetSpec('grad_dirs', fsl_bvecs_format, 'preprocess_pipeline'),
FilesetSpec('grad_dirs_coreg', fsl_bvecs_format,
'series_coreg_pipeline',
desc=("The gradient directions coregistered to the "
"orientation of the coreg reference")),
FilesetSpec('bvalues', fsl_bvals_format, 'preprocess_pipeline',
desc=("")),
FilesetSpec('eddy_par', eddy_par_format, 'preprocess_pipeline',
desc=("")),
FilesetSpec('noise_residual', mrtrix_image_format,
'preprocess_pipeline',
desc=("")),
FilesetSpec('tensor', nifti_gz_format, 'tensor_pipeline',
desc=("")),
FilesetSpec('fa', nifti_gz_format, 'tensor_metrics_pipeline',
desc=("")),
FilesetSpec('adc', nifti_gz_format, 'tensor_metrics_pipeline',
desc=("")),
FilesetSpec('wm_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('gm_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('csf_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('avg_response', text_format, 'average_response_pipeline',
desc=("")),
FilesetSpec('wm_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('gm_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('csf_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('norm_intensity', mrtrix_image_format,
'intensity_normalisation_pipeline',
desc=("")),
FilesetSpec('norm_intens_fa_template', mrtrix_image_format,
'intensity_normalisation_pipeline', frequency='per_study',
desc=("")),
FilesetSpec('norm_intens_wm_mask', mrtrix_image_format,
'intensity_normalisation_pipeline', frequency='per_study',
desc=("")),
FilesetSpec('global_tracks', mrtrix_track_format,
'global_tracking_pipeline',
desc=("")),
FilesetSpec('wm_mask', mrtrix_image_format,
'global_tracking_pipeline',
desc=("")),
FilesetSpec('connectome', csv_format, 'connectome_pipeline',
desc=(""))]
add_param_specs = [
ParamSpec('multi_tissue', True,
desc=("")),
ParamSpec('preproc_pe_dir', None, dtype=str,
desc=("")),
ParamSpec('tbss_skel_thresh', 0.2,
desc=("")),
ParamSpec('fsl_mask_f', 0.25,
desc=("")),
ParamSpec('bet_robust', True,
desc=("")),
ParamSpec('bet_f_threshold', 0.2,
desc=("")),
ParamSpec('bet_reduce_bias', False,
desc=("")),
ParamSpec('num_global_tracks', int(1e9),
desc=("")),
ParamSpec('global_tracks_cutoff', 0.05,
desc=("")),
SwitchSpec('preproc_denoise', False,
desc=("")),
SwitchSpec('response_algorithm', 'tax',
('tax', 'dhollander', 'msmt_5tt'),
desc=("")),
SwitchSpec('fod_algorithm', 'csd', ('csd', 'msmt_csd'),
desc=("")),
MriStudy.param_spec('bet_method').with_new_choices('mrtrix'),
SwitchSpec('reorient2std', False,
desc=(""))]
primary_bids_input = BidsInputs(
spec_name='series', type='dwi',
valid_formats=(nifti_gz_x_format, nifti_gz_format))
default_bids_inputs = [primary_bids_input,
BidsAssocInputs(
spec_name='bvalues',
primary=primary_bids_input,
association='grads',
type='bval',
format=fsl_bvals_format),
BidsAssocInputs(
spec_name='grad_dirs',
primary=primary_bids_input,
association='grads',
type='bvec',
format=fsl_bvecs_format),
BidsAssocInputs(
spec_name='reverse_phase',
primary=primary_bids_input,
association='epi',
format=nifti_gz_format,
drop_if_missing=True)]
RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM = 5
primary_scan_name = 'series'
@property
def multi_tissue(self):
return self.branch('response_algorithm',
('msmt_5tt', 'dhollander'))
def fsl_grads(self, pipeline, coregistered=True):
"Adds and returns a node to the pipeline to merge the FSL grads and "
"bvecs"
try:
fslgrad = pipeline.node('fslgrad')
except ArcanaNameError:
if self.is_coregistered and coregistered:
grad_dirs = 'grad_dirs_coreg'
else:
grad_dirs = 'grad_dirs'
# Gradient merge node
fslgrad = pipeline.add(
"fslgrad",
MergeTuple(2),
inputs={
'in1': (grad_dirs, fsl_bvecs_format),
'in2': ('bvalues', fsl_bvals_format)})
return (fslgrad, 'out')
def extract_magnitude_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
'extract_magnitude',
desc="Extracts the first b==0 volume from the series",
citations=[],
name_maps=name_maps)
dwiextract = pipeline.add(
'dwiextract',
ExtractDWIorB0(
bzero=True,
out_ext='.nii.gz'),
inputs={
'in_file': ('series', nifti_gz_format),
'fslgrad': self.fsl_grads(pipeline, coregistered=False)},
requirements=[mrtrix_req.v('3.0rc3')])
pipeline.add(
"extract_first_vol",
MRConvert(
coord=(3, 0)),
inputs={
'in_file': (dwiextract, 'out_file')},
outputs={
'magnitude': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def preprocess_pipeline(self, **name_maps):
"""
Performs a series of FSL preprocessing steps, including Eddy and Topup
Parameters
----------
phase_dir : str{AP|LR|IS}
The phase encode direction
"""
# Determine whether we can correct for distortion, i.e. if reference
# scans are provided
# Include all references
references = [fsl_cite, eddy_cite, topup_cite,
distort_correct_cite, n4_cite]
if self.branch('preproc_denoise'):
references.extend(dwidenoise_cites)
pipeline = self.new_pipeline(
name='preprocess',
name_maps=name_maps,
desc=(
"Preprocess dMRI studies using distortion correction"),
citations=references)
# Create nodes to gradients to FSL format
if self.input('series').format == dicom_format:
extract_grad = pipeline.add(
"extract_grad",
ExtractFSLGradients(),
inputs={
'in_file': ('series', dicom_format)},
outputs={
'grad_dirs': ('bvecs_file', fsl_bvecs_format),
'bvalues': ('bvals_file', fsl_bvals_format)},
requirements=[mrtrix_req.v('3.0rc3')])
grad_fsl_inputs = {'in1': (extract_grad, 'bvecs_file'),
'in2': (extract_grad, 'bvals_file')}
elif self.provided('grad_dirs') and self.provided('bvalues'):
grad_fsl_inputs = {'in1': ('grad_dirs', fsl_bvecs_format),
'in2': ('bvalues', fsl_bvals_format)}
else:
raise BananaUsageError(
"Either input 'magnitude' image needs to be in DICOM format "
"or gradient directions and b-values need to be explicitly "
"provided to {}".format(self))
# Gradient merge node
grad_fsl = pipeline.add(
"grad_fsl",
MergeTuple(2),
inputs=grad_fsl_inputs)
gradients = (grad_fsl, 'out')
# Create node to reorient preproc out_file
if self.branch('reorient2std'):
reorient = pipeline.add(
'fslreorient2std',
fsl.utils.Reorient2Std(
output_type='NIFTI_GZ'),
inputs={
'in_file': ('series', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
reoriented = (reorient, 'out_file')
else:
reoriented = ('series', nifti_gz_format)
# Denoise the dwi-scan
if self.branch('preproc_denoise'):
# Run denoising
denoise = pipeline.add(
'denoise',
DWIDenoise(),
inputs={
'in_file': reoriented},
requirements=[mrtrix_req.v('3.0rc3')])
# Calculate residual noise
subtract_operands = pipeline.add(
'subtract_operands',
Merge(2),
inputs={
'in1': reoriented,
'in2': (denoise, 'noise')})
pipeline.add(
'subtract',
MRCalc(
operation='subtract'),
inputs={
'operands': (subtract_operands, 'out')},
outputs={
'noise_residual': ('out_file', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
denoised = (denoise, 'out_file')
else:
denoised = reoriented
# Preproc kwargs
preproc_kwargs = {}
preproc_inputs = {'in_file': denoised,
'grad_fsl': gradients}
if self.provided('reverse_phase'):
if self.provided('magnitude', default_okay=False):
dwi_reference = ('magnitude', mrtrix_image_format)
else:
# Extract b=0 volumes
dwiextract = pipeline.add(
'dwiextract',
ExtractDWIorB0(
bzero=True,
out_ext='.nii.gz'),
inputs={
'in_file': denoised,
'fslgrad': gradients},
requirements=[mrtrix_req.v('3.0rc3')])
# Get first b=0 from dwi b=0 volumes
extract_first_b0 = pipeline.add(
"extract_first_vol",
MRConvert(
coord=(3, 0)),
inputs={
'in_file': (dwiextract, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
dwi_reference = (extract_first_b0, 'out_file')
# Concatenate extracted forward rpe with reverse rpe
combined_images = pipeline.add(
'combined_images',
MRCat(),
inputs={
'first_scan': dwi_reference,
'second_scan': ('reverse_phase', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# Create node to assign the right PED to the diffusion
prep_dwi = pipeline.add(
'prepare_dwi',
PrepareDWI(),
inputs={
'pe_dir': ('ped', float),
'ped_polarity': ('pe_angle', float)})
preproc_kwargs['rpe_pair'] = True
distortion_correction = True
preproc_inputs['se_epi'] = (combined_images, 'out_file')
else:
distortion_correction = False
preproc_kwargs['rpe_none'] = True
if self.parameter('preproc_pe_dir') is not None:
preproc_kwargs['pe_dir'] = self.parameter('preproc_pe_dir')
preproc = pipeline.add(
'dwipreproc',
DWIPreproc(
no_clean_up=True,
out_file_ext='.nii.gz',
# FIXME: Need to determine this programmatically
# eddy_parameters = '--data_is_shelled '
temp_dir='dwipreproc_tempdir',
**preproc_kwargs),
inputs=preproc_inputs,
outputs={
'eddy_par': ('eddy_parameters', eddy_par_format)},
requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('5.0.10')],
wall_time=60)
if distortion_correction:
pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')
mask = pipeline.add(
'dwi2mask',
BrainMask(
out_file='brainmask.nii.gz'),
inputs={
'in_file': (preproc, 'out_file'),
'grad_fsl': gradients},
requirements=[mrtrix_req.v('3.0rc3')])
# Create bias correct node
pipeline.add(
"bias_correct",
DWIBiasCorrect(
method='ants'),
inputs={
'grad_fsl': gradients, # internal
'in_file': (preproc, 'out_file'),
'mask': (mask, 'out_file')},
outputs={
'series_preproc': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3'), ants_req.v('2.0')])
return pipeline
def brain_extraction_pipeline(self, **name_maps):
"""
Generates a whole brain mask using MRtrix's 'dwi2mask' command
Parameters
----------
mask_tool: Str
Can be either 'bet' or 'dwi2mask' depending on which mask tool you
want to use
"""
if self.branch('bet_method', 'mrtrix'):
pipeline = self.new_pipeline(
'brain_extraction',
desc="Generate brain mask from b0 images",
citations=[mrtrix_cite],
name_maps=name_maps)
if self.provided('coreg_ref'):
series = 'series_coreg'
else:
series = 'series_preproc'
# Create mask node
masker = pipeline.add(
'dwi2mask',
BrainMask(
out_file='brain_mask.nii.gz'),
inputs={
'in_file': (series, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},
outputs={
'brain_mask': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
merge = pipeline.add(
'merge_operands',
Merge(2),
inputs={
'in1': ('mag_preproc', nifti_gz_format),
'in2': (masker, 'out_file')})
pipeline.add(
'apply_mask',
MRCalc(
operation='multiply'),
inputs={
'operands': (merge, 'out')},
outputs={
'brain': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
else:
pipeline = super().brain_extraction_pipeline(**name_maps)
return pipeline
def series_coreg_pipeline(self, **name_maps):
pipeline = super().series_coreg_pipeline(**name_maps)
# Apply coregistration transform to gradients
pipeline.add(
'transform_grads',
TransformGradients(),
inputs={
'gradients': ('grad_dirs', fsl_bvecs_format),
'transform': ('coreg_fsl_mat', text_matrix_format)},
outputs={
'grad_dirs_coreg': ('transformed', fsl_bvecs_format)})
return pipeline
def intensity_normalisation_pipeline(self, **name_maps):
if self.num_sessions < 2:
raise ArcanaMissingDataException(
"Cannot normalise intensities of DWI images as study only "
"contains a single session")
elif self.num_sessions < self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM:
logger.warning(
"The number of sessions in the study ({}) is less than the "
"recommended number for intensity normalisation ({}). The "
"results may be unreliable".format(
self.num_sessions,
self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM))
pipeline = self.new_pipeline(
name='intensity_normalization',
desc="Corrects for B1 field inhomogeneity",
citations=[mrtrix_req.v('3.0rc3')],
name_maps=name_maps)
mrconvert = pipeline.add(
'mrconvert',
MRConvert(
out_ext='.mif'),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline)},
requirements=[mrtrix_req.v('3.0rc3')])
# Pair subject and visit ids together, expanding so they can be
# joined and chained together
session_ids = pipeline.add(
'session_ids',
utility.IdentityInterface(
['subject_id', 'visit_id']),
inputs={
'subject_id': (Study.SUBJECT_ID, int),
'visit_id': (Study.VISIT_ID, int)})
# Set up join nodes
join_fields = ['dwis', 'masks', 'subject_ids', 'visit_ids']
join_over_subjects = pipeline.add(
'join_over_subjects',
utility.IdentityInterface(
join_fields),
inputs={
'masks': (self.brain_mask_spec_name, nifti_gz_format),
'dwis': (mrconvert, 'out_file'),
'subject_ids': (session_ids, 'subject_id'),
'visit_ids': (session_ids, 'visit_id')},
joinsource=self.SUBJECT_ID,
joinfield=join_fields)
join_over_visits = pipeline.add(
'join_over_visits',
Chain(
join_fields),
inputs={
'dwis': (join_over_subjects, 'dwis'),
'masks': (join_over_subjects, 'masks'),
'subject_ids': (join_over_subjects, 'subject_ids'),
'visit_ids': (join_over_subjects, 'visit_ids')},
joinsource=self.VISIT_ID,
joinfield=join_fields)
# Intensity normalization
intensity_norm = pipeline.add(
'dwiintensitynorm',
DWIIntensityNorm(),
inputs={
'in_files': (join_over_visits, 'dwis'),
'masks': (join_over_visits, 'masks')},
outputs={
'norm_intens_fa_template': ('fa_template',
mrtrix_image_format),
'norm_intens_wm_mask': ('wm_mask', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# Set up expand nodes
pipeline.add(
'expand', SelectSession(),
inputs={
'subject_ids': (join_over_visits, 'subject_ids'),
'visit_ids': (join_over_visits, 'visit_ids'),
'inlist': (intensity_norm, 'out_files'),
'subject_id': (Study.SUBJECT_ID, int),
'visit_id': (Study.VISIT_ID, int)},
outputs={
'norm_intensity': ('item', mrtrix_image_format)})
# Connect inputs
return pipeline
def tensor_pipeline(self, **name_maps):
"""
Fits the apparrent diffusion tensor (DT) to each voxel of the image
"""
pipeline = self.new_pipeline(
name='tensor',
desc=("Estimates the apparent diffusion tensor in each "
"voxel"),
citations=[],
name_maps=name_maps)
# Create tensor fit node
pipeline.add(
'dwi2tensor',
FitTensor(
out_file='dti.nii.gz'),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'tensor': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def tensor_metrics_pipeline(self, **name_maps):
"""
Fits the apparrent diffusion tensor (DT) to each voxel of the image
"""
pipeline = self.new_pipeline(
name='fa',
desc=("Calculates the FA and ADC from a tensor image"),
citations=[],
name_maps=name_maps)
# Create tensor fit node
pipeline.add(
'metrics',
TensorMetrics(
out_fa='fa.nii.gz',
out_adc='adc.nii.gz'),
inputs={
'in_file': ('tensor', nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'fa': ('out_fa', nifti_gz_format),
'adc': ('out_adc', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def response_pipeline(self, **name_maps):
"""
Estimates the fibre orientation distribution (FOD) using constrained
spherical deconvolution
Parameters
----------
response_algorithm : str
Algorithm used to estimate the response
"""
pipeline = self.new_pipeline(
name='response',
desc=("Estimates the fibre response function"),
citations=[mrtrix_cite],
name_maps=name_maps)
# Create fod fit node
response = pipeline.add(
'response',
ResponseSD(
algorithm=self.parameter('response_algorithm')),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'wm_response': ('wm_file', text_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# Connect to outputs
if self.multi_tissue:
response.inputs.gm_file = 'gm.txt',
response.inputs.csf_file = 'csf.txt',
pipeline.connect_output('gm_response', response, 'gm_file',
text_format)
pipeline.connect_output('csf_response', response, 'csf_file',
text_format)
return pipeline
def average_response_pipeline(self, **name_maps):
"""
Averages the estimate response function over all subjects in the
project
"""
pipeline = self.new_pipeline(
name='average_response',
desc=(
"Averages the fibre response function over the project"),
citations=[mrtrix_cite],
name_maps=name_maps)
join_subjects = pipeline.add(
'join_subjects',
utility.IdentityInterface(['responses']),
inputs={
'responses': ('wm_response', text_format)},
outputs={},
joinsource=self.SUBJECT_ID,
joinfield=['responses'])
join_visits = pipeline.add(
'join_visits',
Chain(['responses']),
inputs={
'responses': (join_subjects, 'responses')},
joinsource=self.VISIT_ID,
joinfield=['responses'])
pipeline.add(
'avg_response',
AverageResponse(),
inputs={
'in_files': (join_visits, 'responses')},
outputs={
'avg_response': ('out_file', text_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def fod_pipeline(self, **name_maps):
"""
Estimates the fibre orientation distribution (FOD) using constrained
spherical deconvolution
Parameters
----------
"""
pipeline = self.new_pipeline(
name='fod',
desc=("Estimates the fibre orientation distribution in each"
" voxel"),
citations=[mrtrix_cite],
name_maps=name_maps)
# Create fod fit node
dwi2fod = pipeline.add(
'dwi2fod',
EstimateFOD(
algorithm=self.parameter('fod_algorithm')),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'wm_txt': ('wm_response', text_format),
'mask_file': (self.brain_mask_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline)},
outputs={
'wm_odf': ('wm_odf', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.multi_tissue:
dwi2fod.inputs.gm_odf = 'gm.mif',
dwi2fod.inputs.csf_odf = 'csf.mif',
pipeline.connect_input('gm_response', dwi2fod, 'gm_txt',
text_format),
pipeline.connect_input('csf_response', dwi2fod, 'csf_txt',
text_format),
pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf',
nifti_gz_format),
pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf',
nifti_gz_format),
# Check inputs/output are connected
return pipeline
def extract_b0_pipeline(self, **name_maps):
"""
Extracts the b0 images from a DWI study and takes their mean
"""
pipeline = self.new_pipeline(
name='extract_b0',
desc="Extract b0 image from a DWI study",
citations=[mrtrix_cite],
name_maps=name_maps)
# Extraction node
extract_b0s = pipeline.add(
'extract_b0s',
ExtractDWIorB0(
bzero=True,
quiet=True),
inputs={
'fslgrad': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
# FIXME: Need a registration step before the mean
# Mean calculation node
mean = pipeline.add(
"mean",
MRMath(
axis=3,
operation='mean',
quiet=True),
inputs={
'in_files': (extract_b0s, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
# Convert to Nifti
pipeline.add(
"output_conversion",
MRConvert(
out_ext='.nii.gz',
quiet=True),
inputs={
'in_file': (mean, 'out_file')},
outputs={
'b0': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def global_tracking_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='global_tracking',
desc="Extract b0 image from a DWI study",
citations=[mrtrix_cite],
name_maps=name_maps)
mask = pipeline.add(
'mask',
DWI2Mask(),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
tracking = pipeline.add(
'tracking',
Tractography(
select=self.parameter('num_global_tracks'),
cutoff=self.parameter('global_tracks_cutoff')),
inputs={
'seed_image': (mask, 'out_file'),
'in_file': ('wm_odf', mrtrix_image_format)},
outputs={
'global_tracks': ('out_file', mrtrix_track_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.provided('anat_5tt'):
pipeline.connect_input('anat_5tt', tracking, 'act_file',
mrtrix_image_format)
return pipeline
def intrascan_alignment_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='affine_mat_generation',
desc=("Generation of the affine matrices for the main dwi "
"sequence starting from eddy motion parameters"),
citations=[fsl_cite],
name_maps=name_maps)
pipeline.add(
'gen_aff_mats',
AffineMatrixGeneration(),
inputs={
'reference_image': ('mag_preproc', nifti_gz_format),
'motion_parameters': ('eddy_par', eddy_par_format)},
outputs={
'align_mats': ('affine_matrices', motion_mats_format)})
return pipeline
def connectome_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='connectome',
desc=("Generate a connectome from whole brain connectivity"),
citations=[],
name_maps=name_maps)
aseg_path = pipeline.add(
'aseg_path',
AppendPath(
sub_paths=['mri', 'aparc+aseg.mgz']),
inputs={
'base_path': ('anat_fs_recon_all', directory_format)})
pipeline.add(
'connectome',
mrtrix3.BuildConnectome(),
inputs={
'in_file': ('global_tracks', mrtrix_track_format),
'in_parc': (aseg_path, 'out_path')},
outputs={
'connectome': ('out_file', csv_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
class DwiRefStudy(EpiStudy, metaclass=StudyMetaClass):
add_data_specs = [
InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True)
]
desc = ("A special study used in the MR-PET motion correction algorithm to"
" perform distortion correction on the reverse-phase/reference b0 "
"scans by flipping it around and using the DWI series as the "
"reference")
def preprocess_pipeline(self, **name_maps):
if self.provided('reverse_phase'):
return self._topup_pipeline(**name_maps)
else:
return super().preprocess_pipeline(**name_maps)
def _topup_pipeline(self, **name_maps):
"""
Implementation of separate topup pipeline, moved from EPI study as it
is only really relevant for spin-echo DWI. Need to work out what to do
with it
"""
pipeline = self.new_pipeline(
name='preprocess_pipeline',
desc=("Topup distortion correction pipeline"),
citations=[fsl_cite],
name_maps=name_maps)
reorient_epi_in = pipeline.add(
'reorient_epi_in',
fsl.utils.Reorient2Std(),
inputs={
'in_file': ('magnitude', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
reorient_epi_opposite = pipeline.add(
'reorient_epi_opposite',
fsl.utils.Reorient2Std(),
inputs={
'in_file': ('reverse_phase', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
prep_dwi = pipeline.add(
'prepare_dwi',
PrepareDWI(
topup=True),
inputs={
'pe_dir': ('ped', str),
'ped_polarity': ('pe_angle', str),
'dwi': (reorient_epi_in, 'out_file'),
'dwi1': (reorient_epi_opposite, 'out_file')})
ped = pipeline.add(
'gen_config',
GenTopupConfigFiles(),
inputs={
'ped': (prep_dwi, 'pe')})
merge_outputs = pipeline.add(
'merge_files',
merge_lists(2),
inputs={
'in1': (prep_dwi, 'main'),
'in2': (prep_dwi, 'secondary')})
merge = pipeline.add(
'FslMerge',
FslMerge(
dimension='t',
output_type='NIFTI_GZ'),
inputs={
'in_files': (merge_outputs, 'out')},
requirements=[fsl_req.v('5.0.9')])
topup = pipeline.add(
'topup',
TOPUP(
output_type='NIFTI_GZ'),
inputs={
'in_file': (merge, 'merged_file'),
'encoding_file': (ped, 'config_file')},
requirements=[fsl_req.v('5.0.9')])
in_apply_tp = pipeline.add(
'in_apply_tp',
merge_lists(1),
inputs={
'in1': (reorient_epi_in, 'out_file')})
pipeline.add(
'applytopup',
ApplyTOPUP(
method='jac',
in_index=[1],
output_type='NIFTI_GZ'),
inputs={
'in_files': (in_apply_tp, 'out'),
'encoding_file': (ped, 'apply_topup_config'),
'in_topup_movpar': (topup, 'out_movpar'),
'in_topup_fieldcoef': (topup, 'out_fieldcoef')},
outputs={
'mag_preproc': ('out_corrected', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
return pipeline
| 37.064484
| 79
| 0.532855
|
from logging import getLogger
from nipype.interfaces.utility import Merge
from nipype.interfaces.fsl import (
TOPUP, ApplyTOPUP, BET, FUGUE, Merge as FslMerge)
from nipype.interfaces import fsl
from nipype.interfaces.utility import Merge as merge_lists
from nipype.interfaces.fsl.epi import PrepareFieldmap
from nipype.interfaces.mrtrix3 import ResponseSD, Tractography
from nipype.interfaces.mrtrix3.utils import BrainMask, TensorMetrics
from nipype.interfaces.mrtrix3.reconst import FitTensor, EstimateFOD
from banana.interfaces.custom.motion_correction import GenTopupConfigFiles
from banana.interfaces.mrtrix import (
DWIPreproc, MRCat, ExtractDWIorB0, MRMath, DWIBiasCorrect, DWIDenoise,
MRCalc, DWIIntensityNorm, AverageResponse, DWI2Mask)
from nipype.interfaces import fsl, mrtrix3, utility
from arcana.utils.interfaces import MergeTuple, Chain
from arcana.data import FilesetSpec, InputFilesetSpec
from arcana.utils.interfaces import SelectSession
from arcana.study import ParamSpec, SwitchSpec
from arcana.exceptions import ArcanaMissingDataException, ArcanaNameError
from banana.requirement import (
fsl_req, mrtrix_req, ants_req)
from banana.interfaces.mrtrix import MRConvert, ExtractFSLGradients
from banana.study import StudyMetaClass
from banana.interfaces.custom.motion_correction import (
PrepareDWI, AffineMatrixGeneration)
from banana.interfaces.custom.dwi import TransformGradients
from banana.interfaces.utility import AppendPath
from banana.study.base import Study
from banana.bids_ import BidsInputs, BidsAssocInputs
from banana.exceptions import BananaUsageError
from banana.citation import (
mrtrix_cite, fsl_cite, eddy_cite, topup_cite, distort_correct_cite,
n4_cite, dwidenoise_cites)
from banana.file_format import (
mrtrix_image_format, nifti_gz_format, nifti_gz_x_format, fsl_bvecs_format,
fsl_bvals_format, text_format, dicom_format, eddy_par_format,
mrtrix_track_format, motion_mats_format, text_matrix_format,
directory_format, csv_format, zip_format, STD_IMAGE_FORMATS)
from .base import MriStudy
from .epi import EpiSeriesStudy, EpiStudy
logger = getLogger('banana')
class DwiStudy(EpiSeriesStudy, metaclass=StudyMetaClass):
desc = "Diffusion-weighted MRI contrast"
add_data_specs = [
InputFilesetSpec('anat_5tt', mrtrix_image_format,
desc=("A co-registered segmentation image taken from "
"freesurfer output and simplified into 5 tissue"
" types. Used in ACT streamlines tractography"),
optional=True),
InputFilesetSpec('anat_fs_recon_all', zip_format, optional=True,
desc=("Co-registered freesurfer recon-all output. "
"Used in building the connectome")),
InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True),
FilesetSpec('grad_dirs', fsl_bvecs_format, 'preprocess_pipeline'),
FilesetSpec('grad_dirs_coreg', fsl_bvecs_format,
'series_coreg_pipeline',
desc=("The gradient directions coregistered to the "
"orientation of the coreg reference")),
FilesetSpec('bvalues', fsl_bvals_format, 'preprocess_pipeline',
desc=("")),
FilesetSpec('eddy_par', eddy_par_format, 'preprocess_pipeline',
desc=("")),
FilesetSpec('noise_residual', mrtrix_image_format,
'preprocess_pipeline',
desc=("")),
FilesetSpec('tensor', nifti_gz_format, 'tensor_pipeline',
desc=("")),
FilesetSpec('fa', nifti_gz_format, 'tensor_metrics_pipeline',
desc=("")),
FilesetSpec('adc', nifti_gz_format, 'tensor_metrics_pipeline',
desc=("")),
FilesetSpec('wm_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('gm_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('csf_response', text_format, 'response_pipeline',
desc=("")),
FilesetSpec('avg_response', text_format, 'average_response_pipeline',
desc=("")),
FilesetSpec('wm_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('gm_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('csf_odf', mrtrix_image_format, 'fod_pipeline',
desc=("")),
FilesetSpec('norm_intensity', mrtrix_image_format,
'intensity_normalisation_pipeline',
desc=("")),
FilesetSpec('norm_intens_fa_template', mrtrix_image_format,
'intensity_normalisation_pipeline', frequency='per_study',
desc=("")),
FilesetSpec('norm_intens_wm_mask', mrtrix_image_format,
'intensity_normalisation_pipeline', frequency='per_study',
desc=("")),
FilesetSpec('global_tracks', mrtrix_track_format,
'global_tracking_pipeline',
desc=("")),
FilesetSpec('wm_mask', mrtrix_image_format,
'global_tracking_pipeline',
desc=("")),
FilesetSpec('connectome', csv_format, 'connectome_pipeline',
desc=(""))]
add_param_specs = [
ParamSpec('multi_tissue', True,
desc=("")),
ParamSpec('preproc_pe_dir', None, dtype=str,
desc=("")),
ParamSpec('tbss_skel_thresh', 0.2,
desc=("")),
ParamSpec('fsl_mask_f', 0.25,
desc=("")),
ParamSpec('bet_robust', True,
desc=("")),
ParamSpec('bet_f_threshold', 0.2,
desc=("")),
ParamSpec('bet_reduce_bias', False,
desc=("")),
ParamSpec('num_global_tracks', int(1e9),
desc=("")),
ParamSpec('global_tracks_cutoff', 0.05,
desc=("")),
SwitchSpec('preproc_denoise', False,
desc=("")),
SwitchSpec('response_algorithm', 'tax',
('tax', 'dhollander', 'msmt_5tt'),
desc=("")),
SwitchSpec('fod_algorithm', 'csd', ('csd', 'msmt_csd'),
desc=("")),
MriStudy.param_spec('bet_method').with_new_choices('mrtrix'),
SwitchSpec('reorient2std', False,
desc=(""))]
primary_bids_input = BidsInputs(
spec_name='series', type='dwi',
valid_formats=(nifti_gz_x_format, nifti_gz_format))
default_bids_inputs = [primary_bids_input,
BidsAssocInputs(
spec_name='bvalues',
primary=primary_bids_input,
association='grads',
type='bval',
format=fsl_bvals_format),
BidsAssocInputs(
spec_name='grad_dirs',
primary=primary_bids_input,
association='grads',
type='bvec',
format=fsl_bvecs_format),
BidsAssocInputs(
spec_name='reverse_phase',
primary=primary_bids_input,
association='epi',
format=nifti_gz_format,
drop_if_missing=True)]
RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM = 5
primary_scan_name = 'series'
@property
def multi_tissue(self):
return self.branch('response_algorithm',
('msmt_5tt', 'dhollander'))
def fsl_grads(self, pipeline, coregistered=True):
try:
fslgrad = pipeline.node('fslgrad')
except ArcanaNameError:
if self.is_coregistered and coregistered:
grad_dirs = 'grad_dirs_coreg'
else:
grad_dirs = 'grad_dirs'
fslgrad = pipeline.add(
"fslgrad",
MergeTuple(2),
inputs={
'in1': (grad_dirs, fsl_bvecs_format),
'in2': ('bvalues', fsl_bvals_format)})
return (fslgrad, 'out')
def extract_magnitude_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
'extract_magnitude',
desc="Extracts the first b==0 volume from the series",
citations=[],
name_maps=name_maps)
dwiextract = pipeline.add(
'dwiextract',
ExtractDWIorB0(
bzero=True,
out_ext='.nii.gz'),
inputs={
'in_file': ('series', nifti_gz_format),
'fslgrad': self.fsl_grads(pipeline, coregistered=False)},
requirements=[mrtrix_req.v('3.0rc3')])
pipeline.add(
"extract_first_vol",
MRConvert(
coord=(3, 0)),
inputs={
'in_file': (dwiextract, 'out_file')},
outputs={
'magnitude': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def preprocess_pipeline(self, **name_maps):
references = [fsl_cite, eddy_cite, topup_cite,
distort_correct_cite, n4_cite]
if self.branch('preproc_denoise'):
references.extend(dwidenoise_cites)
pipeline = self.new_pipeline(
name='preprocess',
name_maps=name_maps,
desc=(
"Preprocess dMRI studies using distortion correction"),
citations=references)
if self.input('series').format == dicom_format:
extract_grad = pipeline.add(
"extract_grad",
ExtractFSLGradients(),
inputs={
'in_file': ('series', dicom_format)},
outputs={
'grad_dirs': ('bvecs_file', fsl_bvecs_format),
'bvalues': ('bvals_file', fsl_bvals_format)},
requirements=[mrtrix_req.v('3.0rc3')])
grad_fsl_inputs = {'in1': (extract_grad, 'bvecs_file'),
'in2': (extract_grad, 'bvals_file')}
elif self.provided('grad_dirs') and self.provided('bvalues'):
grad_fsl_inputs = {'in1': ('grad_dirs', fsl_bvecs_format),
'in2': ('bvalues', fsl_bvals_format)}
else:
raise BananaUsageError(
"Either input 'magnitude' image needs to be in DICOM format "
"or gradient directions and b-values need to be explicitly "
"provided to {}".format(self))
grad_fsl = pipeline.add(
"grad_fsl",
MergeTuple(2),
inputs=grad_fsl_inputs)
gradients = (grad_fsl, 'out')
if self.branch('reorient2std'):
reorient = pipeline.add(
'fslreorient2std',
fsl.utils.Reorient2Std(
output_type='NIFTI_GZ'),
inputs={
'in_file': ('series', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
reoriented = (reorient, 'out_file')
else:
reoriented = ('series', nifti_gz_format)
if self.branch('preproc_denoise'):
denoise = pipeline.add(
'denoise',
DWIDenoise(),
inputs={
'in_file': reoriented},
requirements=[mrtrix_req.v('3.0rc3')])
subtract_operands = pipeline.add(
'subtract_operands',
Merge(2),
inputs={
'in1': reoriented,
'in2': (denoise, 'noise')})
pipeline.add(
'subtract',
MRCalc(
operation='subtract'),
inputs={
'operands': (subtract_operands, 'out')},
outputs={
'noise_residual': ('out_file', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
denoised = (denoise, 'out_file')
else:
denoised = reoriented
preproc_kwargs = {}
preproc_inputs = {'in_file': denoised,
'grad_fsl': gradients}
if self.provided('reverse_phase'):
if self.provided('magnitude', default_okay=False):
dwi_reference = ('magnitude', mrtrix_image_format)
else:
dwiextract = pipeline.add(
'dwiextract',
ExtractDWIorB0(
bzero=True,
out_ext='.nii.gz'),
inputs={
'in_file': denoised,
'fslgrad': gradients},
requirements=[mrtrix_req.v('3.0rc3')])
extract_first_b0 = pipeline.add(
"extract_first_vol",
MRConvert(
coord=(3, 0)),
inputs={
'in_file': (dwiextract, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
dwi_reference = (extract_first_b0, 'out_file')
combined_images = pipeline.add(
'combined_images',
MRCat(),
inputs={
'first_scan': dwi_reference,
'second_scan': ('reverse_phase', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
prep_dwi = pipeline.add(
'prepare_dwi',
PrepareDWI(),
inputs={
'pe_dir': ('ped', float),
'ped_polarity': ('pe_angle', float)})
preproc_kwargs['rpe_pair'] = True
distortion_correction = True
preproc_inputs['se_epi'] = (combined_images, 'out_file')
else:
distortion_correction = False
preproc_kwargs['rpe_none'] = True
if self.parameter('preproc_pe_dir') is not None:
preproc_kwargs['pe_dir'] = self.parameter('preproc_pe_dir')
preproc = pipeline.add(
'dwipreproc',
DWIPreproc(
no_clean_up=True,
out_file_ext='.nii.gz',
temp_dir='dwipreproc_tempdir',
**preproc_kwargs),
inputs=preproc_inputs,
outputs={
'eddy_par': ('eddy_parameters', eddy_par_format)},
requirements=[mrtrix_req.v('3.0rc3'), fsl_req.v('5.0.10')],
wall_time=60)
if distortion_correction:
pipeline.connect(prep_dwi, 'pe', preproc, 'pe_dir')
mask = pipeline.add(
'dwi2mask',
BrainMask(
out_file='brainmask.nii.gz'),
inputs={
'in_file': (preproc, 'out_file'),
'grad_fsl': gradients},
requirements=[mrtrix_req.v('3.0rc3')])
pipeline.add(
"bias_correct",
DWIBiasCorrect(
method='ants'),
inputs={
'grad_fsl': gradients,
'in_file': (preproc, 'out_file'),
'mask': (mask, 'out_file')},
outputs={
'series_preproc': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3'), ants_req.v('2.0')])
return pipeline
def brain_extraction_pipeline(self, **name_maps):
if self.branch('bet_method', 'mrtrix'):
pipeline = self.new_pipeline(
'brain_extraction',
desc="Generate brain mask from b0 images",
citations=[mrtrix_cite],
name_maps=name_maps)
if self.provided('coreg_ref'):
series = 'series_coreg'
else:
series = 'series_preproc'
masker = pipeline.add(
'dwi2mask',
BrainMask(
out_file='brain_mask.nii.gz'),
inputs={
'in_file': (series, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},
outputs={
'brain_mask': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
merge = pipeline.add(
'merge_operands',
Merge(2),
inputs={
'in1': ('mag_preproc', nifti_gz_format),
'in2': (masker, 'out_file')})
pipeline.add(
'apply_mask',
MRCalc(
operation='multiply'),
inputs={
'operands': (merge, 'out')},
outputs={
'brain': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
else:
pipeline = super().brain_extraction_pipeline(**name_maps)
return pipeline
def series_coreg_pipeline(self, **name_maps):
pipeline = super().series_coreg_pipeline(**name_maps)
pipeline.add(
'transform_grads',
TransformGradients(),
inputs={
'gradients': ('grad_dirs', fsl_bvecs_format),
'transform': ('coreg_fsl_mat', text_matrix_format)},
outputs={
'grad_dirs_coreg': ('transformed', fsl_bvecs_format)})
return pipeline
def intensity_normalisation_pipeline(self, **name_maps):
if self.num_sessions < 2:
raise ArcanaMissingDataException(
"Cannot normalise intensities of DWI images as study only "
"contains a single session")
elif self.num_sessions < self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM:
logger.warning(
"The number of sessions in the study ({}) is less than the "
"recommended number for intensity normalisation ({}). The "
"results may be unreliable".format(
self.num_sessions,
self.RECOMMENDED_NUM_SESSIONS_FOR_INTENS_NORM))
pipeline = self.new_pipeline(
name='intensity_normalization',
desc="Corrects for B1 field inhomogeneity",
citations=[mrtrix_req.v('3.0rc3')],
name_maps=name_maps)
mrconvert = pipeline.add(
'mrconvert',
MRConvert(
out_ext='.mif'),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline)},
requirements=[mrtrix_req.v('3.0rc3')])
session_ids = pipeline.add(
'session_ids',
utility.IdentityInterface(
['subject_id', 'visit_id']),
inputs={
'subject_id': (Study.SUBJECT_ID, int),
'visit_id': (Study.VISIT_ID, int)})
join_fields = ['dwis', 'masks', 'subject_ids', 'visit_ids']
join_over_subjects = pipeline.add(
'join_over_subjects',
utility.IdentityInterface(
join_fields),
inputs={
'masks': (self.brain_mask_spec_name, nifti_gz_format),
'dwis': (mrconvert, 'out_file'),
'subject_ids': (session_ids, 'subject_id'),
'visit_ids': (session_ids, 'visit_id')},
joinsource=self.SUBJECT_ID,
joinfield=join_fields)
join_over_visits = pipeline.add(
'join_over_visits',
Chain(
join_fields),
inputs={
'dwis': (join_over_subjects, 'dwis'),
'masks': (join_over_subjects, 'masks'),
'subject_ids': (join_over_subjects, 'subject_ids'),
'visit_ids': (join_over_subjects, 'visit_ids')},
joinsource=self.VISIT_ID,
joinfield=join_fields)
intensity_norm = pipeline.add(
'dwiintensitynorm',
DWIIntensityNorm(),
inputs={
'in_files': (join_over_visits, 'dwis'),
'masks': (join_over_visits, 'masks')},
outputs={
'norm_intens_fa_template': ('fa_template',
mrtrix_image_format),
'norm_intens_wm_mask': ('wm_mask', mrtrix_image_format)},
requirements=[mrtrix_req.v('3.0rc3')])
pipeline.add(
'expand', SelectSession(),
inputs={
'subject_ids': (join_over_visits, 'subject_ids'),
'visit_ids': (join_over_visits, 'visit_ids'),
'inlist': (intensity_norm, 'out_files'),
'subject_id': (Study.SUBJECT_ID, int),
'visit_id': (Study.VISIT_ID, int)},
outputs={
'norm_intensity': ('item', mrtrix_image_format)})
return pipeline
def tensor_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='tensor',
desc=("Estimates the apparent diffusion tensor in each "
"voxel"),
citations=[],
name_maps=name_maps)
pipeline.add(
'dwi2tensor',
FitTensor(
out_file='dti.nii.gz'),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'tensor': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def tensor_metrics_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='fa',
desc=("Calculates the FA and ADC from a tensor image"),
citations=[],
name_maps=name_maps)
pipeline.add(
'metrics',
TensorMetrics(
out_fa='fa.nii.gz',
out_adc='adc.nii.gz'),
inputs={
'in_file': ('tensor', nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'fa': ('out_fa', nifti_gz_format),
'adc': ('out_adc', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def response_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='response',
desc=("Estimates the fibre response function"),
citations=[mrtrix_cite],
name_maps=name_maps)
response = pipeline.add(
'response',
ResponseSD(
algorithm=self.parameter('response_algorithm')),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'in_mask': (self.brain_mask_spec_name, nifti_gz_format)},
outputs={
'wm_response': ('wm_file', text_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.multi_tissue:
response.inputs.gm_file = 'gm.txt',
response.inputs.csf_file = 'csf.txt',
pipeline.connect_output('gm_response', response, 'gm_file',
text_format)
pipeline.connect_output('csf_response', response, 'csf_file',
text_format)
return pipeline
def average_response_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='average_response',
desc=(
"Averages the fibre response function over the project"),
citations=[mrtrix_cite],
name_maps=name_maps)
join_subjects = pipeline.add(
'join_subjects',
utility.IdentityInterface(['responses']),
inputs={
'responses': ('wm_response', text_format)},
outputs={},
joinsource=self.SUBJECT_ID,
joinfield=['responses'])
join_visits = pipeline.add(
'join_visits',
Chain(['responses']),
inputs={
'responses': (join_subjects, 'responses')},
joinsource=self.VISIT_ID,
joinfield=['responses'])
pipeline.add(
'avg_response',
AverageResponse(),
inputs={
'in_files': (join_visits, 'responses')},
outputs={
'avg_response': ('out_file', text_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def fod_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='fod',
desc=("Estimates the fibre orientation distribution in each"
" voxel"),
citations=[mrtrix_cite],
name_maps=name_maps)
dwi2fod = pipeline.add(
'dwi2fod',
EstimateFOD(
algorithm=self.parameter('fod_algorithm')),
inputs={
'in_file': (self.series_preproc_spec_name, nifti_gz_format),
'wm_txt': ('wm_response', text_format),
'mask_file': (self.brain_mask_spec_name, nifti_gz_format),
'grad_fsl': self.fsl_grads(pipeline)},
outputs={
'wm_odf': ('wm_odf', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.multi_tissue:
dwi2fod.inputs.gm_odf = 'gm.mif',
dwi2fod.inputs.csf_odf = 'csf.mif',
pipeline.connect_input('gm_response', dwi2fod, 'gm_txt',
text_format),
pipeline.connect_input('csf_response', dwi2fod, 'csf_txt',
text_format),
pipeline.connect_output('gm_odf', dwi2fod, 'gm_odf',
nifti_gz_format),
pipeline.connect_output('csf_odf', dwi2fod, 'csf_odf',
nifti_gz_format),
return pipeline
def extract_b0_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='extract_b0',
desc="Extract b0 image from a DWI study",
citations=[mrtrix_cite],
name_maps=name_maps)
extract_b0s = pipeline.add(
'extract_b0s',
ExtractDWIorB0(
bzero=True,
quiet=True),
inputs={
'fslgrad': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
mean = pipeline.add(
"mean",
MRMath(
axis=3,
operation='mean',
quiet=True),
inputs={
'in_files': (extract_b0s, 'out_file')},
requirements=[mrtrix_req.v('3.0rc3')])
pipeline.add(
"output_conversion",
MRConvert(
out_ext='.nii.gz',
quiet=True),
inputs={
'in_file': (mean, 'out_file')},
outputs={
'b0': ('out_file', nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
def global_tracking_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='global_tracking',
desc="Extract b0 image from a DWI study",
citations=[mrtrix_cite],
name_maps=name_maps)
mask = pipeline.add(
'mask',
DWI2Mask(),
inputs={
'grad_fsl': self.fsl_grads(pipeline),
'in_file': (self.series_preproc_spec_name, nifti_gz_format)},
requirements=[mrtrix_req.v('3.0rc3')])
tracking = pipeline.add(
'tracking',
Tractography(
select=self.parameter('num_global_tracks'),
cutoff=self.parameter('global_tracks_cutoff')),
inputs={
'seed_image': (mask, 'out_file'),
'in_file': ('wm_odf', mrtrix_image_format)},
outputs={
'global_tracks': ('out_file', mrtrix_track_format)},
requirements=[mrtrix_req.v('3.0rc3')])
if self.provided('anat_5tt'):
pipeline.connect_input('anat_5tt', tracking, 'act_file',
mrtrix_image_format)
return pipeline
def intrascan_alignment_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='affine_mat_generation',
desc=("Generation of the affine matrices for the main dwi "
"sequence starting from eddy motion parameters"),
citations=[fsl_cite],
name_maps=name_maps)
pipeline.add(
'gen_aff_mats',
AffineMatrixGeneration(),
inputs={
'reference_image': ('mag_preproc', nifti_gz_format),
'motion_parameters': ('eddy_par', eddy_par_format)},
outputs={
'align_mats': ('affine_matrices', motion_mats_format)})
return pipeline
def connectome_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='connectome',
desc=("Generate a connectome from whole brain connectivity"),
citations=[],
name_maps=name_maps)
aseg_path = pipeline.add(
'aseg_path',
AppendPath(
sub_paths=['mri', 'aparc+aseg.mgz']),
inputs={
'base_path': ('anat_fs_recon_all', directory_format)})
pipeline.add(
'connectome',
mrtrix3.BuildConnectome(),
inputs={
'in_file': ('global_tracks', mrtrix_track_format),
'in_parc': (aseg_path, 'out_path')},
outputs={
'connectome': ('out_file', csv_format)},
requirements=[mrtrix_req.v('3.0rc3')])
return pipeline
class DwiRefStudy(EpiStudy, metaclass=StudyMetaClass):
add_data_specs = [
InputFilesetSpec('reverse_phase', STD_IMAGE_FORMATS, optional=True)
]
desc = ("A special study used in the MR-PET motion correction algorithm to"
" perform distortion correction on the reverse-phase/reference b0 "
"scans by flipping it around and using the DWI series as the "
"reference")
def preprocess_pipeline(self, **name_maps):
if self.provided('reverse_phase'):
return self._topup_pipeline(**name_maps)
else:
return super().preprocess_pipeline(**name_maps)
def _topup_pipeline(self, **name_maps):
pipeline = self.new_pipeline(
name='preprocess_pipeline',
desc=("Topup distortion correction pipeline"),
citations=[fsl_cite],
name_maps=name_maps)
reorient_epi_in = pipeline.add(
'reorient_epi_in',
fsl.utils.Reorient2Std(),
inputs={
'in_file': ('magnitude', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
reorient_epi_opposite = pipeline.add(
'reorient_epi_opposite',
fsl.utils.Reorient2Std(),
inputs={
'in_file': ('reverse_phase', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
prep_dwi = pipeline.add(
'prepare_dwi',
PrepareDWI(
topup=True),
inputs={
'pe_dir': ('ped', str),
'ped_polarity': ('pe_angle', str),
'dwi': (reorient_epi_in, 'out_file'),
'dwi1': (reorient_epi_opposite, 'out_file')})
ped = pipeline.add(
'gen_config',
GenTopupConfigFiles(),
inputs={
'ped': (prep_dwi, 'pe')})
merge_outputs = pipeline.add(
'merge_files',
merge_lists(2),
inputs={
'in1': (prep_dwi, 'main'),
'in2': (prep_dwi, 'secondary')})
merge = pipeline.add(
'FslMerge',
FslMerge(
dimension='t',
output_type='NIFTI_GZ'),
inputs={
'in_files': (merge_outputs, 'out')},
requirements=[fsl_req.v('5.0.9')])
topup = pipeline.add(
'topup',
TOPUP(
output_type='NIFTI_GZ'),
inputs={
'in_file': (merge, 'merged_file'),
'encoding_file': (ped, 'config_file')},
requirements=[fsl_req.v('5.0.9')])
in_apply_tp = pipeline.add(
'in_apply_tp',
merge_lists(1),
inputs={
'in1': (reorient_epi_in, 'out_file')})
pipeline.add(
'applytopup',
ApplyTOPUP(
method='jac',
in_index=[1],
output_type='NIFTI_GZ'),
inputs={
'in_files': (in_apply_tp, 'out'),
'encoding_file': (ped, 'apply_topup_config'),
'in_topup_movpar': (topup, 'out_movpar'),
'in_topup_fieldcoef': (topup, 'out_fieldcoef')},
outputs={
'mag_preproc': ('out_corrected', nifti_gz_format)},
requirements=[fsl_req.v('5.0.9')])
return pipeline
| true
| true
|
f715f0ca82d3720e709126ea59e933b3baab9523
| 2,969
|
py
|
Python
|
test/features/steps/crud_table.py
|
lyrl/mycli
|
d62eefdc819a11ecdb97d93dd7ad1922d28a3795
|
[
"BSD-3-Clause"
] | 10,997
|
2015-07-27T06:59:04.000Z
|
2022-03-31T07:49:26.000Z
|
test/features/steps/crud_table.py
|
lyrl/mycli
|
d62eefdc819a11ecdb97d93dd7ad1922d28a3795
|
[
"BSD-3-Clause"
] | 937
|
2015-07-29T09:25:30.000Z
|
2022-03-30T23:54:03.000Z
|
test/features/steps/crud_table.py
|
lyrl/mycli
|
d62eefdc819a11ecdb97d93dd7ad1922d28a3795
|
[
"BSD-3-Clause"
] | 799
|
2015-07-27T13:13:49.000Z
|
2022-03-29T21:24:39.000Z
|
"""Steps for behavioral style tests are defined in this module.
Each step is defined by the string decorating it. This string is used
to call the step in "*.feature" file.
"""
import wrappers
from behave import when, then
from textwrap import dedent
@when('we create table')
def step_create_table(context):
"""Send create table."""
context.cli.sendline('create table a(x text);')
@when('we insert into table')
def step_insert_into_table(context):
"""Send insert into table."""
context.cli.sendline('''insert into a(x) values('xxx');''')
@when('we update table')
def step_update_table(context):
"""Send insert into table."""
context.cli.sendline('''update a set x = 'yyy' where x = 'xxx';''')
@when('we select from table')
def step_select_from_table(context):
"""Send select from table."""
context.cli.sendline('select * from a;')
@when('we delete from table')
def step_delete_from_table(context):
"""Send deete from table."""
context.cli.sendline('''delete from a where x = 'yyy';''')
@when('we drop table')
def step_drop_table(context):
"""Send drop table."""
context.cli.sendline('drop table a;')
@then('we see table created')
def step_see_table_created(context):
"""Wait to see create table output."""
wrappers.expect_exact(context, 'Query OK, 0 rows affected', timeout=2)
@then('we see record inserted')
def step_see_record_inserted(context):
"""Wait to see insert output."""
wrappers.expect_exact(context, 'Query OK, 1 row affected', timeout=2)
@then('we see record updated')
def step_see_record_updated(context):
"""Wait to see update output."""
wrappers.expect_exact(context, 'Query OK, 1 row affected', timeout=2)
@then('we see data selected')
def step_see_data_selected(context):
"""Wait to see select output."""
wrappers.expect_pager(
context, dedent("""\
+-----+\r
| x |\r
+-----+\r
| yyy |\r
+-----+\r
\r
"""), timeout=2)
wrappers.expect_exact(context, '1 row in set', timeout=2)
@then('we see record deleted')
def step_see_data_deleted(context):
"""Wait to see delete output."""
wrappers.expect_exact(context, 'Query OK, 1 row affected', timeout=2)
@then('we see table dropped')
def step_see_table_dropped(context):
"""Wait to see drop output."""
wrappers.expect_exact(context, 'Query OK, 0 rows affected', timeout=2)
@when('we select null')
def step_select_null(context):
"""Send select null."""
context.cli.sendline('select null;')
@then('we see null selected')
def step_see_null_selected(context):
"""Wait to see null output."""
wrappers.expect_pager(
context, dedent("""\
+--------+\r
| NULL |\r
+--------+\r
| <null> |\r
+--------+\r
\r
"""), timeout=2)
wrappers.expect_exact(context, '1 row in set', timeout=2)
| 26.274336
| 74
| 0.629505
|
import wrappers
from behave import when, then
from textwrap import dedent
@when('we create table')
def step_create_table(context):
context.cli.sendline('create table a(x text);')
@when('we insert into table')
def step_insert_into_table(context):
context.cli.sendline('''insert into a(x) values('xxx');''')
@when('we update table')
def step_update_table(context):
context.cli.sendline('''update a set x = 'yyy' where x = 'xxx';''')
@when('we select from table')
def step_select_from_table(context):
context.cli.sendline('select * from a;')
@when('we delete from table')
def step_delete_from_table(context):
context.cli.sendline('''delete from a where x = 'yyy';''')
@when('we drop table')
def step_drop_table(context):
context.cli.sendline('drop table a;')
@then('we see table created')
def step_see_table_created(context):
wrappers.expect_exact(context, 'Query OK, 0 rows affected', timeout=2)
@then('we see record inserted')
def step_see_record_inserted(context):
wrappers.expect_exact(context, 'Query OK, 1 row affected', timeout=2)
@then('we see record updated')
def step_see_record_updated(context):
wrappers.expect_exact(context, 'Query OK, 1 row affected', timeout=2)
@then('we see data selected')
def step_see_data_selected(context):
wrappers.expect_pager(
context, dedent("""\
+-----+\r
| x |\r
+-----+\r
| yyy |\r
+-----+\r
\r
"""), timeout=2)
wrappers.expect_exact(context, '1 row in set', timeout=2)
@then('we see record deleted')
def step_see_data_deleted(context):
wrappers.expect_exact(context, 'Query OK, 1 row affected', timeout=2)
@then('we see table dropped')
def step_see_table_dropped(context):
wrappers.expect_exact(context, 'Query OK, 0 rows affected', timeout=2)
@when('we select null')
def step_select_null(context):
context.cli.sendline('select null;')
@then('we see null selected')
def step_see_null_selected(context):
wrappers.expect_pager(
context, dedent("""\
+--------+\r
| NULL |\r
+--------+\r
| <null> |\r
+--------+\r
\r
"""), timeout=2)
wrappers.expect_exact(context, '1 row in set', timeout=2)
| true
| true
|
f715f0d86c7d1a3a041efd85461b676d0a329b65
| 395
|
py
|
Python
|
django_dapp/migrations/0005_application_default.py
|
phonkee/django-desktopapp
|
bd89434470c9d80074e8911d24059f962934c52a
|
[
"MIT"
] | null | null | null |
django_dapp/migrations/0005_application_default.py
|
phonkee/django-desktopapp
|
bd89434470c9d80074e8911d24059f962934c52a
|
[
"MIT"
] | null | null | null |
django_dapp/migrations/0005_application_default.py
|
phonkee/django-desktopapp
|
bd89434470c9d80074e8911d24059f962934c52a
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2019-04-12 22:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_dapp', '0004_auto_20190412_2138'),
]
operations = [
migrations.AddField(
model_name='application',
name='default',
field=models.BooleanField(default=False),
),
]
| 20.789474
| 53
| 0.610127
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_dapp', '0004_auto_20190412_2138'),
]
operations = [
migrations.AddField(
model_name='application',
name='default',
field=models.BooleanField(default=False),
),
]
| true
| true
|
f715f13073c90b7260a27beedc68a5672549e84b
| 1,221
|
py
|
Python
|
desktop/libs/notebook/setup.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/libs/notebook/setup.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/libs/notebook/setup.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "notebook",
version = VERSION,
author = "Hue",
url = 'http://github.com/cloudera/hue',
description = "Type various snippets of code",
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': 'notebook=notebook' },
)
| 42.103448
| 74
| 0.72154
|
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "notebook",
version = VERSION,
author = "Hue",
url = 'http://github.com/cloudera/hue',
description = "Type various snippets of code",
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': 'notebook=notebook' },
)
| true
| true
|
f715f2ea5f3929680f1eec4293377f802b326b46
| 103
|
py
|
Python
|
game/urls.py
|
BehindLoader/strategy-try
|
f7f0007515804b2078bb18ae831a326e6e338bbd
|
[
"MIT"
] | null | null | null |
game/urls.py
|
BehindLoader/strategy-try
|
f7f0007515804b2078bb18ae831a326e6e338bbd
|
[
"MIT"
] | null | null | null |
game/urls.py
|
BehindLoader/strategy-try
|
f7f0007515804b2078bb18ae831a326e6e338bbd
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('get_all', views.get_all)
]
| 14.714286
| 34
| 0.708738
|
from django.urls import path
from . import views
urlpatterns = [
path('get_all', views.get_all)
]
| true
| true
|
f715f3a67df36230e6e7a6cbb43f59bd83e295a2
| 4,603
|
py
|
Python
|
pipeline/service/pipeline_engine_adapter/adapter_api.py
|
sdgdsffdsfff/bk-sops-tencent
|
e8aff91f822e79031e12b0f66943830f44ced506
|
[
"Apache-2.0"
] | 1
|
2020-09-24T07:39:16.000Z
|
2020-09-24T07:39:16.000Z
|
pipeline/service/pipeline_engine_adapter/adapter_api.py
|
sdgdsffdsfff/bk-sops-tencent
|
e8aff91f822e79031e12b0f66943830f44ced506
|
[
"Apache-2.0"
] | 5
|
2021-02-08T20:46:54.000Z
|
2021-06-10T22:54:45.000Z
|
pipeline/service/pipeline_engine_adapter/adapter_api.py
|
sdgdsffdsfff/bk-sops-tencent
|
e8aff91f822e79031e12b0f66943830f44ced506
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from pipeline.engine import api
from pipeline.log.models import LogEntry
STATE_MAP = {
'CREATED': 'RUNNING',
'READY': 'RUNNING',
'RUNNING': 'RUNNING',
'BLOCKED': 'BLOCKED',
'SUSPENDED': 'SUSPENDED',
'FINISHED': 'FINISHED',
'FAILED': 'FAILED',
'REVOKED': 'REVOKED'
}
def run_pipeline(pipeline_instance, instance_id=None, check_workers=True):
return api.start_pipeline(pipeline_instance, check_workers=check_workers)
def pause_pipeline(pipeline_id):
return api.pause_pipeline(pipeline_id)
def revoke_pipeline(pipeline_id):
return api.revoke_pipeline(pipeline_id)
def resume_pipeline(pipeline_id):
return api.resume_pipeline(pipeline_id)
def pause_activity(act_id):
return api.pause_node_appointment(act_id)
def resume_activity(act_id):
return api.resume_node_appointment(act_id)
def retry_activity(act_id, inputs=None):
return api.retry_node(act_id, inputs=inputs)
def skip_activity(act_id):
return api.skip_node(act_id)
def pause_subprocess(subprocess_id):
return api.pause_subprocess(subprocess_id)
def skip_exclusive_gateway(gateway_id, flow_id):
return api.skip_exclusive_gateway(gateway_id, flow_id)
def forced_fail(node_id):
return api.forced_fail(node_id)
def get_inputs(act_id):
return api.get_inputs(act_id)
def get_outputs(act_id):
return api.get_outputs(act_id)
def get_activity_histories(act_id):
histories = api.get_activity_histories(act_id)
for item in histories:
item['started_time'] = _better_time_or_none(item['started_time'])
item['finished_time'] = _better_time_or_none(item.pop('archived_time'))
return histories
def callback(act_id, data=None):
return api.activity_callback(act_id, data)
def get_state(node_id):
tree = api.get_status_tree(node_id, max_depth=100)
res = _map(tree)
# collect all atom
descendants = {}
_collect_descendants(tree, descendants)
res['children'] = descendants
# return
return res
def _get_node_state(tree):
status = []
# return state when meet leaf
if not tree.get('children', []):
return STATE_MAP[tree['state']]
# iterate children and get child state recursively
for identifier_code, child_tree in tree['children'].items():
status.append(_get_node_state(child_tree))
# summary parent state
return STATE_MAP[_get_parent_state_from_children_state(tree['state'], status)]
def _get_parent_state_from_children_state(parent_state, children_state_list):
"""
@summary: 根据子任务状态计算父任务状态
@param parent_state:
@param children_state_list:
@return:
"""
children_state_set = set(children_state_list)
if parent_state == 'BLOCKED':
if 'RUNNING' in children_state_set:
parent_state = 'RUNNING'
if 'FAILED' in children_state_set:
parent_state = 'FAILED'
return parent_state
def _collect_descendants(tree, descendants):
# iterate children for tree
for identifier_code, child_tree in tree['children'].items():
child_status = _map(child_tree)
descendants[identifier_code] = child_status
# collect children
if child_tree['children']:
_collect_descendants(child_tree, descendants)
def _better_time_or_none(time):
return time.strftime('%Y-%m-%d %H:%M:%S') if time else time
def _map(tree):
tree.setdefault('children', {})
return {
'id': tree['id'],
'state': _get_node_state(tree),
'start_time': _better_time_or_none(tree['started_time']),
'finish_time': _better_time_or_none(tree['archived_time']),
'loop': tree['loop'],
'retry': tree['retry'],
'skip': tree['skip']
}
def get_plain_log_for_node(node_id, history_id):
return LogEntry.objects.plain_log_for_node(node_id=node_id, history_id=history_id)
| 27.39881
| 115
| 0.718662
|
from pipeline.engine import api
from pipeline.log.models import LogEntry
STATE_MAP = {
'CREATED': 'RUNNING',
'READY': 'RUNNING',
'RUNNING': 'RUNNING',
'BLOCKED': 'BLOCKED',
'SUSPENDED': 'SUSPENDED',
'FINISHED': 'FINISHED',
'FAILED': 'FAILED',
'REVOKED': 'REVOKED'
}
def run_pipeline(pipeline_instance, instance_id=None, check_workers=True):
return api.start_pipeline(pipeline_instance, check_workers=check_workers)
def pause_pipeline(pipeline_id):
return api.pause_pipeline(pipeline_id)
def revoke_pipeline(pipeline_id):
return api.revoke_pipeline(pipeline_id)
def resume_pipeline(pipeline_id):
return api.resume_pipeline(pipeline_id)
def pause_activity(act_id):
return api.pause_node_appointment(act_id)
def resume_activity(act_id):
return api.resume_node_appointment(act_id)
def retry_activity(act_id, inputs=None):
return api.retry_node(act_id, inputs=inputs)
def skip_activity(act_id):
return api.skip_node(act_id)
def pause_subprocess(subprocess_id):
return api.pause_subprocess(subprocess_id)
def skip_exclusive_gateway(gateway_id, flow_id):
return api.skip_exclusive_gateway(gateway_id, flow_id)
def forced_fail(node_id):
return api.forced_fail(node_id)
def get_inputs(act_id):
return api.get_inputs(act_id)
def get_outputs(act_id):
return api.get_outputs(act_id)
def get_activity_histories(act_id):
histories = api.get_activity_histories(act_id)
for item in histories:
item['started_time'] = _better_time_or_none(item['started_time'])
item['finished_time'] = _better_time_or_none(item.pop('archived_time'))
return histories
def callback(act_id, data=None):
return api.activity_callback(act_id, data)
def get_state(node_id):
tree = api.get_status_tree(node_id, max_depth=100)
res = _map(tree)
descendants = {}
_collect_descendants(tree, descendants)
res['children'] = descendants
return res
def _get_node_state(tree):
status = []
if not tree.get('children', []):
return STATE_MAP[tree['state']]
for identifier_code, child_tree in tree['children'].items():
status.append(_get_node_state(child_tree))
return STATE_MAP[_get_parent_state_from_children_state(tree['state'], status)]
def _get_parent_state_from_children_state(parent_state, children_state_list):
children_state_set = set(children_state_list)
if parent_state == 'BLOCKED':
if 'RUNNING' in children_state_set:
parent_state = 'RUNNING'
if 'FAILED' in children_state_set:
parent_state = 'FAILED'
return parent_state
def _collect_descendants(tree, descendants):
for identifier_code, child_tree in tree['children'].items():
child_status = _map(child_tree)
descendants[identifier_code] = child_status
if child_tree['children']:
_collect_descendants(child_tree, descendants)
def _better_time_or_none(time):
return time.strftime('%Y-%m-%d %H:%M:%S') if time else time
def _map(tree):
tree.setdefault('children', {})
return {
'id': tree['id'],
'state': _get_node_state(tree),
'start_time': _better_time_or_none(tree['started_time']),
'finish_time': _better_time_or_none(tree['archived_time']),
'loop': tree['loop'],
'retry': tree['retry'],
'skip': tree['skip']
}
def get_plain_log_for_node(node_id, history_id):
return LogEntry.objects.plain_log_for_node(node_id=node_id, history_id=history_id)
| true
| true
|
f715f47669d217a83d920335ed050f78d844a22c
| 5,720
|
py
|
Python
|
examples/jsonrpc/JSONRPCExample.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
examples/jsonrpc/JSONRPCExample.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
examples/jsonrpc/JSONRPCExample.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-11-18T14:17:59.000Z
|
2019-11-18T14:17:59.000Z
|
import pyjd # dummy in pyjs
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.TextArea import TextArea
from pyjamas.ui.Label import Label
from pyjamas.ui.Button import Button
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.ListBox import ListBox
from pyjamas.JSONService import JSONProxy
class JSONRPCExample:
def onModuleLoad(self):
self.TEXT_WAITING = "Waiting for response..."
self.TEXT_ERROR = "Server Error"
self.METHOD_ECHO = "Echo"
self.METHOD_REVERSE = "Reverse"
self.METHOD_UPPERCASE = "UPPERCASE"
self.METHOD_LOWERCASE = "lowercase"
self.METHOD_NONEXISTANT = "Non existant"
self.methods = [self.METHOD_ECHO, self.METHOD_REVERSE,
self.METHOD_UPPERCASE, self.METHOD_LOWERCASE,
self.METHOD_NONEXISTANT]
self.remote_php = EchoServicePHP()
self.remote_py = EchoServicePython()
self.status=Label()
self.text_area = TextArea()
self.text_area.setText("""{'Test'} [\"String\"]
\tTest Tab
Test Newline\n
after newline
""" + r"""Literal String:
{'Test'} [\"String\"]
""")
self.text_area.setCharacterWidth(80)
self.text_area.setVisibleLines(8)
self.method_list = ListBox()
self.method_list.setName("hello")
self.method_list.setVisibleItemCount(1)
for method in self.methods:
self.method_list.addItem(method)
self.method_list.setSelectedIndex(0)
method_panel = HorizontalPanel()
method_panel.add(HTML("Remote string method to call: "))
method_panel.add(self.method_list)
method_panel.setSpacing(8)
self.button_php = Button("Send to PHP Service", self)
self.button_py = Button("Send to Python Service", self)
buttons = HorizontalPanel()
buttons.add(self.button_php)
buttons.add(self.button_py)
buttons.setSpacing(8)
info = """<h2>JSON-RPC Example</h2>
<p>This example demonstrates the calling of server services with
<a href="http://json-rpc.org/">JSON-RPC</a>.
</p>
<p>Enter some text below, and press a button to send the text
to an Echo service on your server. An echo service simply sends the exact same text back that it receives.
</p>"""
panel = VerticalPanel()
panel.add(HTML(info))
panel.add(self.text_area)
panel.add(method_panel)
panel.add(buttons)
panel.add(self.status)
RootPanel().add(panel)
def onClick(self, sender):
self.status.setText(self.TEXT_WAITING)
method = self.methods[self.method_list.getSelectedIndex()]
text = self.text_area.getText()
# demonstrate proxy & callMethod()
if sender == self.button_php:
if method == self.METHOD_ECHO:
id = self.remote_php.echo(text, self)
elif method == self.METHOD_REVERSE:
id = self.remote_php.callMethod("reverse", [text], self)
elif method == self.METHOD_UPPERCASE:
id = self.remote_php.uppercase(text, self)
elif method == self.METHOD_LOWERCASE:
id = self.remote_php.lowercase(self, msg=text)
elif method == self.METHOD_NONEXISTANT:
id = self.remote_php.nonexistant(text, self)
else:
if method == self.METHOD_ECHO:
id = self.remote_py.echo(text, self)
elif method == self.METHOD_REVERSE:
id = self.remote_py.reverse(text, self)
elif method == self.METHOD_UPPERCASE:
id = self.remote_py.uppercase(text, self)
elif method == self.METHOD_LOWERCASE:
id = self.remote_py.lowercase(text, self)
elif method == self.METHOD_NONEXISTANT:
id = self.remote_py.nonexistant(text, self)
def onRemoteResponse(self, response, request_info):
self.status.setText(response)
def onRemoteError(self, code, errobj, request_info):
# onRemoteError gets the HTTP error code or 0 and
# errobj is an jsonrpc 2.0 error dict:
# {
# 'code': jsonrpc-error-code (integer) ,
# 'message': jsonrpc-error-message (string) ,
# 'data' : extra-error-data
# }
message = errobj['message']
if code != 0:
self.status.setText("HTTP error %d: %s" %
(code, message))
else:
code = errobj['code']
self.status.setText("JSONRPC Error %s: %s" %
(code, message))
class EchoServicePHP(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "services/EchoService.php", ["echo", "reverse", "uppercase", "lowercase", "nonexistant"])
class EchoServicePython(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "services/EchoService.py", ["echo", "reverse", "uppercase", "lowercase", "nonexistant"])
if __name__ == '__main__':
# for pyjd, set up a web server and load the HTML from there:
# this convinces the browser engine that the AJAX will be loaded
# from the same URI base as the URL, it's all a bit messy...
# Use the second pyjd.setup if you're using apache-php locally
# as described in the README
#pyjd.setup("http://127.0.0.1:8000/public/JSONRPCExample.html")
pyjd.setup("http://127.0.0.1/examples/jsonrpc/public/JSONRPCExample.html")
app = JSONRPCExample()
app.onModuleLoad()
pyjd.run()
| 38.389262
| 122
| 0.618706
|
import pyjd
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.TextArea import TextArea
from pyjamas.ui.Label import Label
from pyjamas.ui.Button import Button
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.ListBox import ListBox
from pyjamas.JSONService import JSONProxy
class JSONRPCExample:
def onModuleLoad(self):
self.TEXT_WAITING = "Waiting for response..."
self.TEXT_ERROR = "Server Error"
self.METHOD_ECHO = "Echo"
self.METHOD_REVERSE = "Reverse"
self.METHOD_UPPERCASE = "UPPERCASE"
self.METHOD_LOWERCASE = "lowercase"
self.METHOD_NONEXISTANT = "Non existant"
self.methods = [self.METHOD_ECHO, self.METHOD_REVERSE,
self.METHOD_UPPERCASE, self.METHOD_LOWERCASE,
self.METHOD_NONEXISTANT]
self.remote_php = EchoServicePHP()
self.remote_py = EchoServicePython()
self.status=Label()
self.text_area = TextArea()
self.text_area.setText("""{'Test'} [\"String\"]
\tTest Tab
Test Newline\n
after newline
""" + r"""Literal String:
{'Test'} [\"String\"]
""")
self.text_area.setCharacterWidth(80)
self.text_area.setVisibleLines(8)
self.method_list = ListBox()
self.method_list.setName("hello")
self.method_list.setVisibleItemCount(1)
for method in self.methods:
self.method_list.addItem(method)
self.method_list.setSelectedIndex(0)
method_panel = HorizontalPanel()
method_panel.add(HTML("Remote string method to call: "))
method_panel.add(self.method_list)
method_panel.setSpacing(8)
self.button_php = Button("Send to PHP Service", self)
self.button_py = Button("Send to Python Service", self)
buttons = HorizontalPanel()
buttons.add(self.button_php)
buttons.add(self.button_py)
buttons.setSpacing(8)
info = """<h2>JSON-RPC Example</h2>
<p>This example demonstrates the calling of server services with
<a href="http://json-rpc.org/">JSON-RPC</a>.
</p>
<p>Enter some text below, and press a button to send the text
to an Echo service on your server. An echo service simply sends the exact same text back that it receives.
</p>"""
panel = VerticalPanel()
panel.add(HTML(info))
panel.add(self.text_area)
panel.add(method_panel)
panel.add(buttons)
panel.add(self.status)
RootPanel().add(panel)
def onClick(self, sender):
self.status.setText(self.TEXT_WAITING)
method = self.methods[self.method_list.getSelectedIndex()]
text = self.text_area.getText()
if sender == self.button_php:
if method == self.METHOD_ECHO:
id = self.remote_php.echo(text, self)
elif method == self.METHOD_REVERSE:
id = self.remote_php.callMethod("reverse", [text], self)
elif method == self.METHOD_UPPERCASE:
id = self.remote_php.uppercase(text, self)
elif method == self.METHOD_LOWERCASE:
id = self.remote_php.lowercase(self, msg=text)
elif method == self.METHOD_NONEXISTANT:
id = self.remote_php.nonexistant(text, self)
else:
if method == self.METHOD_ECHO:
id = self.remote_py.echo(text, self)
elif method == self.METHOD_REVERSE:
id = self.remote_py.reverse(text, self)
elif method == self.METHOD_UPPERCASE:
id = self.remote_py.uppercase(text, self)
elif method == self.METHOD_LOWERCASE:
id = self.remote_py.lowercase(text, self)
elif method == self.METHOD_NONEXISTANT:
id = self.remote_py.nonexistant(text, self)
def onRemoteResponse(self, response, request_info):
self.status.setText(response)
def onRemoteError(self, code, errobj, request_info):
message = errobj['message']
if code != 0:
self.status.setText("HTTP error %d: %s" %
(code, message))
else:
code = errobj['code']
self.status.setText("JSONRPC Error %s: %s" %
(code, message))
class EchoServicePHP(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "services/EchoService.php", ["echo", "reverse", "uppercase", "lowercase", "nonexistant"])
class EchoServicePython(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "services/EchoService.py", ["echo", "reverse", "uppercase", "lowercase", "nonexistant"])
if __name__ == '__main__':
# Use the second pyjd.setup if you're using apache-php locally
pyjd.setup("http://127.0.0.1/examples/jsonrpc/public/JSONRPCExample.html")
app = JSONRPCExample()
app.onModuleLoad()
pyjd.run()
| true
| true
|
f715f48de694a6699da344700b6ccc25623f65f8
| 59,016
|
py
|
Python
|
haystack/nodes/reader/farm.py
|
ZanSara/haystack
|
b2e6dcc99899d9ad728d21f925c5300632683d4d
|
[
"Apache-2.0"
] | 1
|
2022-02-20T02:04:49.000Z
|
2022-02-20T02:04:49.000Z
|
haystack/nodes/reader/farm.py
|
shenyezh/haystack
|
2a674eaff7d711f38db1bd57ece9bb632fb928bd
|
[
"Apache-2.0"
] | null | null | null |
haystack/nodes/reader/farm.py
|
shenyezh/haystack
|
2a674eaff7d711f38db1bd57ece9bb632fb928bd
|
[
"Apache-2.0"
] | null | null | null |
from typing import List, Optional, Dict, Any, Union, Callable
import logging
import multiprocessing
from pathlib import Path
from collections import defaultdict
from time import perf_counter
import torch
from haystack.modeling.data_handler.data_silo import DataSilo, DistillationDataSilo
from haystack.modeling.data_handler.processor import SquadProcessor, Processor
from haystack.modeling.data_handler.dataloader import NamedDataLoader
from haystack.modeling.data_handler.inputs import QAInput, Question
from haystack.modeling.infer import QAInferencer
from haystack.modeling.model.optimization import initialize_optimizer
from haystack.modeling.model.predictions import QAPred, QACandidate
from haystack.modeling.model.adaptive_model import AdaptiveModel
from haystack.modeling.training import Trainer, DistillationTrainer, TinyBERTDistillationTrainer
from haystack.modeling.evaluation import Evaluator
from haystack.modeling.utils import set_all_seeds, initialize_device_settings
from haystack.schema import Document, Answer, Span
from haystack.document_stores import BaseDocumentStore
from haystack.nodes.reader import BaseReader
logger = logging.getLogger(__name__)
class FARMReader(BaseReader):
"""
Transformer based model for extractive Question Answering using the FARM framework (https://github.com/deepset-ai/FARM).
While the underlying model can vary (BERT, Roberta, DistilBERT, ...), the interface remains the same.
| With a FARMReader, you can:
- directly get predictions via predict()
- fine-tune the model on QA data via train()
"""
def __init__(
self,
model_name_or_path: str,
model_version: Optional[str] = None,
context_window_size: int = 150,
batch_size: int = 50,
use_gpu: bool = True,
no_ans_boost: float = 0.0,
return_no_answer: bool = False,
top_k: int = 10,
top_k_per_candidate: int = 3,
top_k_per_sample: int = 1,
num_processes: Optional[int] = None,
max_seq_len: int = 256,
doc_stride: int = 128,
progress_bar: bool = True,
duplicate_filtering: int = 0,
use_confidence_scores: bool = True,
proxies: Optional[Dict[str, str]] = None,
local_files_only=False,
force_download=False,
use_auth_token: Optional[Union[str, bool]] = None,
**kwargs,
):
"""
:param model_name_or_path: Directory of a saved model or the name of a public model e.g. 'bert-base-cased',
'deepset/bert-base-cased-squad2', 'deepset/bert-base-cased-squad2', 'distilbert-base-uncased-distilled-squad'.
See https://huggingface.co/models for full list of available models.
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param context_window_size: The size, in characters, of the window around the answer span that is used when
displaying the context around the answer.
:param batch_size: Number of samples the model receives in one batch for inference.
Memory consumption is much lower in inference mode. Recommendation: Increase the batch size
to a value so only a single batch is used.
:param use_gpu: Whether to use GPU (if available)
:param no_ans_boost: How much the no_answer logit is boosted/increased.
If set to 0 (default), the no_answer logit is not changed.
If a negative number, there is a lower chance of "no_answer" being predicted.
If a positive number, there is an increased chance of "no_answer"
:param return_no_answer: Whether to include no_answer predictions in the results.
:param top_k: The maximum number of answers to return
:param top_k_per_candidate: How many answers to extract for each candidate doc that is coming from the retriever (might be a long text).
Note that this is not the number of "final answers" you will receive
(see `top_k` in FARMReader.predict() or Finder.get_answers() for that)
and that FARM includes no_answer in the sorted list of predictions.
:param top_k_per_sample: How many answers to extract from each small text passage that the model can process at once
(one "candidate doc" is usually split into many smaller "passages").
You usually want a very small value here, as it slows down inference
and you don't gain much of quality by having multiple answers from one passage.
Note that this is not the number of "final answers" you will receive
(see `top_k` in FARMReader.predict() or Finder.get_answers() for that)
and that FARM includes no_answer in the sorted list of predictions.
:param num_processes: The number of processes for `multiprocessing.Pool`. Set to value of 0 to disable
multiprocessing. Set to None to let Inferencer determine optimum number. If you
want to debug the Language Model, you might need to disable multiprocessing!
:param max_seq_len: Max sequence length of one input text for the model
:param doc_stride: Length of striding window for splitting long texts (used if ``len(text) > max_seq_len``)
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
:param duplicate_filtering: Answers are filtered based on their position. Both start and end position of the answers are considered.
The higher the value, answers that are more apart are filtered out. 0 corresponds to exact duplicates. -1 turns off duplicate removal.
:param use_confidence_scores: Sets the type of score that is returned with every predicted answer.
`True` => a scaled confidence / relevance score between [0, 1].
This score can also be further calibrated on your dataset via self.eval()
(see https://haystack.deepset.ai/components/reader#confidence-scores) .
`False` => an unscaled, raw score [-inf, +inf] which is the sum of start and end logit
from the model for the predicted span.
:param proxies: Dict of proxy servers to use for downloading external models. Example: {'http': 'some.proxy:1234', 'http://hostname': 'my.proxy:3111'}
:param local_files_only: Whether to force checking for local files only (and forbid downloads)
:param force_download: Whether fo force a (re-)download even if the model exists locally in the cache.
:param use_auth_token: API token used to download private models from Huggingface. If this parameter is set to `True`,
the local token will be used, which must be previously created via `transformer-cli login`.
Additional information can be found here https://huggingface.co/transformers/main_classes/model.html#transformers.PreTrainedModel.from_pretrained
"""
# save init parameters to enable export of component config as YAML
self.set_config(
model_name_or_path=model_name_or_path,
model_version=model_version,
context_window_size=context_window_size,
batch_size=batch_size,
use_gpu=use_gpu,
no_ans_boost=no_ans_boost,
return_no_answer=return_no_answer,
top_k=top_k,
top_k_per_candidate=top_k_per_candidate,
top_k_per_sample=top_k_per_sample,
num_processes=num_processes,
max_seq_len=max_seq_len,
doc_stride=doc_stride,
progress_bar=progress_bar,
duplicate_filtering=duplicate_filtering,
proxies=proxies,
local_files_only=local_files_only,
force_download=force_download,
use_confidence_scores=use_confidence_scores,
**kwargs,
)
self.devices, _ = initialize_device_settings(use_cuda=use_gpu, multi_gpu=False)
self.return_no_answers = return_no_answer
self.top_k = top_k
self.top_k_per_candidate = top_k_per_candidate
self.inferencer = QAInferencer.load(
model_name_or_path,
batch_size=batch_size,
gpu=use_gpu,
task_type="question_answering",
max_seq_len=max_seq_len,
doc_stride=doc_stride,
num_processes=num_processes,
revision=model_version,
disable_tqdm=not progress_bar,
strict=False,
proxies=proxies,
local_files_only=local_files_only,
force_download=force_download,
devices=self.devices,
use_auth_token=use_auth_token,
**kwargs,
)
self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
self.inferencer.model.prediction_heads[0].n_best = top_k_per_candidate + 1 # including possible no_answer
try:
self.inferencer.model.prediction_heads[0].n_best_per_sample = top_k_per_sample
except:
logger.warning("Could not set `top_k_per_sample` in FARM. Please update FARM version.")
try:
self.inferencer.model.prediction_heads[0].duplicate_filtering = duplicate_filtering
except:
logger.warning("Could not set `duplicate_filtering` in FARM. Please update FARM version.")
self.max_seq_len = max_seq_len
self.use_gpu = use_gpu
self.progress_bar = progress_bar
self.use_confidence_scores = use_confidence_scores
def _training_procedure(
self,
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 2,
learning_rate: float = 1e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
teacher_model: Optional["FARMReader"] = None,
teacher_batch_size: Optional[int] = None,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
distillation_loss_weight: float = 0.5,
distillation_loss: Union[str, Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = "kl_div",
temperature: float = 1.0,
tinybert: bool = False,
processor: Optional[Processor] = None,
):
if dev_filename:
dev_split = 0
if num_processes is None:
num_processes = multiprocessing.cpu_count() - 1 or 1
set_all_seeds(seed=42)
# For these variables, by default, we use the value set when initializing the FARMReader.
# These can also be manually set when train() is called if you want a different value at train vs inference
if use_gpu is None:
use_gpu = self.use_gpu
if max_seq_len is None:
max_seq_len = self.max_seq_len
devices, n_gpu = initialize_device_settings(use_cuda=use_gpu, multi_gpu=False)
if not save_dir:
save_dir = f"../../saved_models/{self.inferencer.model.language_model.name}"
if tinybert:
save_dir += "_tinybert_stage_1"
# 1. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
label_list = ["start_token", "end_token"]
metric = "squad"
if processor is None:
processor = SquadProcessor(
tokenizer=self.inferencer.processor.tokenizer,
max_seq_len=max_seq_len,
label_list=label_list,
metric=metric,
train_filename=train_filename,
dev_filename=dev_filename,
dev_split=dev_split,
test_filename=test_filename,
data_dir=Path(data_dir),
)
data_silo: DataSilo
# 2. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them
# and calculates a few descriptive statistics of our datasets
if (
teacher_model and not tinybert
): # checks if teacher model is passed as parameter, in that case assume model distillation is used
data_silo = DistillationDataSilo(
teacher_model,
teacher_batch_size or batch_size,
device=devices[0],
processor=processor,
batch_size=batch_size,
distributed=False,
max_processes=num_processes,
caching=caching,
cache_path=cache_path,
)
else: # caching would need too much memory for tinybert distillation so in that case we use the default data silo
data_silo = DataSilo(
processor=processor,
batch_size=batch_size,
distributed=False,
max_processes=num_processes,
caching=caching,
cache_path=cache_path,
)
# 3. Create an optimizer and pass the already initialized model
model, optimizer, lr_schedule = initialize_optimizer(
model=self.inferencer.model,
# model=self.inferencer.model,
learning_rate=learning_rate,
schedule_opts={"name": "LinearWarmup", "warmup_proportion": warmup_proportion},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
device=devices[0],
use_amp=use_amp,
)
# 4. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time
if tinybert:
if not teacher_model:
raise ValueError("TinyBERT distillation requires a teacher model.")
trainer = TinyBERTDistillationTrainer.create_or_load_checkpoint(
model=model,
teacher_model=teacher_model.inferencer.model, # teacher needs to be passed as teacher outputs aren't cached
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=devices[0],
use_amp=use_amp,
disable_tqdm=not self.progress_bar,
checkpoint_root_dir=Path(checkpoint_root_dir),
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
)
elif (
teacher_model
): # checks again if teacher model is passed as parameter, in that case assume model distillation is used
trainer = DistillationTrainer.create_or_load_checkpoint(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=devices[0],
use_amp=use_amp,
disable_tqdm=not self.progress_bar,
checkpoint_root_dir=Path(checkpoint_root_dir),
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
distillation_loss=distillation_loss,
distillation_loss_weight=distillation_loss_weight,
temperature=temperature,
)
else:
trainer = Trainer.create_or_load_checkpoint(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=devices[0],
use_amp=use_amp,
disable_tqdm=not self.progress_bar,
checkpoint_root_dir=Path(checkpoint_root_dir),
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
)
# 5. Let it grow!
self.inferencer.model = trainer.train()
self.save(Path(save_dir))
def train(
self,
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 2,
learning_rate: float = 1e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
):
"""
Fine-tune a model on a QA dataset. Options:
- Take a plain language model (e.g. `bert-base-cased`) and train it for QA (e.g. on SQuAD data)
- Take a QA model (e.g. `deepset/bert-base-cased-squad2`) and fine-tune it for your domain (e.g. using your labels collected via the haystack annotation tool)
Checkpoints can be stored via setting `checkpoint_every` to a custom number of steps.
If any checkpoints are stored, a subsequent run of train() will resume training from the latest available checkpoint.
:param data_dir: Path to directory containing your training data in SQuAD style
:param train_filename: Filename of training data
:param dev_filename: Filename of dev / eval data
:param test_filename: Filename of test data
:param dev_split: Instead of specifying a dev_filename, you can also specify a ratio (e.g. 0.1) here
that gets split off from training data for eval.
:param use_gpu: Whether to use GPU (if available)
:param batch_size: Number of samples the model receives in one batch for training
:param n_epochs: Number of iterations on the whole training data set
:param learning_rate: Learning rate of the optimizer
:param max_seq_len: Maximum text length (in tokens). Everything longer gets cut down.
:param warmup_proportion: Proportion of training steps until maximum learning rate is reached.
Until that point LR is increasing linearly. After that it's decreasing again linearly.
Options for different schedules are available in FARM.
:param evaluate_every: Evaluate the model every X steps on the hold-out eval dataset
:param save_dir: Path to store the final model
:param num_processes: The number of processes for `multiprocessing.Pool` during preprocessing.
Set to value of 1 to disable multiprocessing. When set to 1, you cannot split away a dev set from train set.
Set to None to use all CPU cores minus one.
:param use_amp: Optimization level of NVIDIA's automatic mixed precision (AMP). The higher the level, the faster the model.
Available options:
None (Don't use AMP)
"O0" (Normal FP32 training)
"O1" (Mixed Precision => Recommended)
"O2" (Almost FP16)
"O3" (Pure FP16).
See details on: https://nvidia.github.io/apex/amp.html
:param checkpoint_root_dir: the Path of directory where all train checkpoints are saved. For each individual
checkpoint, a subdirectory with the name epoch_{epoch_num}_step_{step_num} is created.
:param checkpoint_every: save a train checkpoint after this many steps of training.
:param checkpoints_to_keep: maximum number of train checkpoints to save.
:param caching: whether or not to use caching for preprocessed dataset
:param cache_path: Path to cache the preprocessed dataset
:param processor: The processor to use for preprocessing. If None, the default SquadProcessor is used.
:return: None
"""
return self._training_procedure(
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
use_gpu=use_gpu,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
max_seq_len=max_seq_len,
warmup_proportion=warmup_proportion,
dev_split=dev_split,
evaluate_every=evaluate_every,
save_dir=save_dir,
num_processes=num_processes,
use_amp=use_amp,
checkpoint_root_dir=checkpoint_root_dir,
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
caching=caching,
cache_path=cache_path,
)
def distil_prediction_layer_from(
self,
teacher_model: "FARMReader",
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
student_batch_size: int = 10,
teacher_batch_size: Optional[int] = None,
n_epochs: int = 2,
learning_rate: float = 3e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
distillation_loss_weight: float = 0.5,
distillation_loss: Union[str, Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = "kl_div",
temperature: float = 1.0,
):
"""
Fine-tune a model on a QA dataset using logit-based distillation. You need to provide a teacher model that is already finetuned on the dataset
and a student model that will be trained using the teacher's logits. The idea of this is to increase the accuracy of a lightweight student model.
using a more complex teacher.
Originally proposed in: https://arxiv.org/pdf/1503.02531.pdf
This can also be considered as the second stage of distillation finetuning as described in the TinyBERT paper:
https://arxiv.org/pdf/1909.10351.pdf
**Example**
```python
student = FARMReader(model_name_or_path="prajjwal1/bert-medium")
teacher = FARMReader(model_name_or_path="deepset/bert-large-uncased-whole-word-masking-squad2")
student.distil_prediction_layer_from(teacher, data_dir="squad2", train_filename="train.json", test_filename="dev.json",
learning_rate=3e-5, distillation_loss_weight=1.0, temperature=5)
```
Checkpoints can be stored via setting `checkpoint_every` to a custom number of steps.
If any checkpoints are stored, a subsequent run of train() will resume training from the latest available checkpoint.
:param teacher_model: Model whose logits will be used to improve accuracy
:param data_dir: Path to directory containing your training data in SQuAD style
:param train_filename: Filename of training data
:param dev_filename: Filename of dev / eval data
:param test_filename: Filename of test data
:param dev_split: Instead of specifying a dev_filename, you can also specify a ratio (e.g. 0.1) here
that gets split off from training data for eval.
:param use_gpu: Whether to use GPU (if available)
:param student_batch_size: Number of samples the student model receives in one batch for training
:param student_batch_size: Number of samples the teacher model receives in one batch for distillation
:param n_epochs: Number of iterations on the whole training data set
:param learning_rate: Learning rate of the optimizer
:param max_seq_len: Maximum text length (in tokens). Everything longer gets cut down.
:param warmup_proportion: Proportion of training steps until maximum learning rate is reached.
Until that point LR is increasing linearly. After that it's decreasing again linearly.
Options for different schedules are available in FARM.
:param evaluate_every: Evaluate the model every X steps on the hold-out eval dataset
:param save_dir: Path to store the final model
:param num_processes: The number of processes for `multiprocessing.Pool` during preprocessing.
Set to value of 1 to disable multiprocessing. When set to 1, you cannot split away a dev set from train set.
Set to None to use all CPU cores minus one.
:param use_amp: Optimization level of NVIDIA's automatic mixed precision (AMP). The higher the level, the faster the model.
Available options:
None (Don't use AMP)
"O0" (Normal FP32 training)
"O1" (Mixed Precision => Recommended)
"O2" (Almost FP16)
"O3" (Pure FP16).
See details on: https://nvidia.github.io/apex/amp.html
:param checkpoint_root_dir: the Path of directory where all train checkpoints are saved. For each individual
checkpoint, a subdirectory with the name epoch_{epoch_num}_step_{step_num} is created.
:param checkpoint_every: save a train checkpoint after this many steps of training.
:param checkpoints_to_keep: maximum number of train checkpoints to save.
:param caching: whether or not to use caching for preprocessed dataset and teacher logits
:param cache_path: Path to cache the preprocessed dataset and teacher logits
:param distillation_loss_weight: The weight of the distillation loss. A higher weight means the teacher outputs are more important.
:param distillation_loss: Specifies how teacher and model logits should be compared. Can either be a string ("mse" for mean squared error or "kl_div" for kl divergence loss) or a callable loss function (needs to have named parameters student_logits and teacher_logits)
:param temperature: The temperature for distillation. A higher temperature will result in less certainty of teacher outputs. A lower temperature means more certainty. A temperature of 1.0 does not change the certainty of the model.
:param tinybert_loss: Whether to use the TinyBERT loss function for distillation. This requires the student to be a TinyBERT model and the teacher to be a finetuned version of bert-base-uncased.
:param tinybert_epochs: Number of epochs to train the student model with the TinyBERT loss function. After this many epochs, the student model is trained with the regular distillation loss function.
:param tinybert_learning_rate: Learning rate to use when training the student model with the TinyBERT loss function.
:param tinybert_train_filename: Filename of training data to use when training the student model with the TinyBERT loss function. To best follow the original paper, this should be an augmented version of the training data created using the augment_squad.py script. If not specified, the training data from the original training is used.
:param processor: The processor to use for preprocessing. If None, the default SquadProcessor is used.
:return: None
"""
return self._training_procedure(
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
use_gpu=use_gpu,
batch_size=student_batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
max_seq_len=max_seq_len,
warmup_proportion=warmup_proportion,
dev_split=dev_split,
evaluate_every=evaluate_every,
save_dir=save_dir,
num_processes=num_processes,
use_amp=use_amp,
checkpoint_root_dir=checkpoint_root_dir,
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
teacher_model=teacher_model,
teacher_batch_size=teacher_batch_size,
caching=caching,
cache_path=cache_path,
distillation_loss_weight=distillation_loss_weight,
distillation_loss=distillation_loss,
temperature=temperature,
)
def distil_intermediate_layers_from(
self,
teacher_model: "FARMReader",
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 5,
learning_rate: float = 5e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
distillation_loss: Union[str, Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = "mse",
temperature: float = 1.0,
processor: Optional[Processor] = None,
):
"""
The first stage of distillation finetuning as described in the TinyBERT paper:
https://arxiv.org/pdf/1909.10351.pdf
**Example**
```python
student = FARMReader(model_name_or_path="prajjwal1/bert-medium")
teacher = FARMReader(model_name_or_path="huawei-noah/TinyBERT_General_6L_768D")
student.distil_intermediate_layers_from(teacher, data_dir="squad2", train_filename="train.json", test_filename="dev.json",
learning_rate=3e-5, distillation_loss_weight=1.0, temperature=5)
```
Checkpoints can be stored via setting `checkpoint_every` to a custom number of steps.
If any checkpoints are stored, a subsequent run of train() will resume training from the latest available checkpoint.
:param teacher_model: Model whose logits will be used to improve accuracy
:param data_dir: Path to directory containing your training data in SQuAD style
:param train_filename: Filename of training data. To best follow the original paper, this should be an augmented version of the training data created using the augment_squad.py script
:param dev_filename: Filename of dev / eval data
:param test_filename: Filename of test data
:param dev_split: Instead of specifying a dev_filename, you can also specify a ratio (e.g. 0.1) here
that gets split off from training data for eval.
:param use_gpu: Whether to use GPU (if available)
:param student_batch_size: Number of samples the student model receives in one batch for training
:param student_batch_size: Number of samples the teacher model receives in one batch for distillation
:param n_epochs: Number of iterations on the whole training data set
:param learning_rate: Learning rate of the optimizer
:param max_seq_len: Maximum text length (in tokens). Everything longer gets cut down.
:param warmup_proportion: Proportion of training steps until maximum learning rate is reached.
Until that point LR is increasing linearly. After that it's decreasing again linearly.
Options for different schedules are available in FARM.
:param evaluate_every: Evaluate the model every X steps on the hold-out eval dataset
:param save_dir: Path to store the final model
:param num_processes: The number of processes for `multiprocessing.Pool` during preprocessing.
Set to value of 1 to disable multiprocessing. When set to 1, you cannot split away a dev set from train set.
Set to None to use all CPU cores minus one.
:param use_amp: Optimization level of NVIDIA's automatic mixed precision (AMP). The higher the level, the faster the model.
Available options:
None (Don't use AMP)
"O0" (Normal FP32 training)
"O1" (Mixed Precision => Recommended)
"O2" (Almost FP16)
"O3" (Pure FP16).
See details on: https://nvidia.github.io/apex/amp.html
:param checkpoint_root_dir: the Path of directory where all train checkpoints are saved. For each individual
checkpoint, a subdirectory with the name epoch_{epoch_num}_step_{step_num} is created.
:param checkpoint_every: save a train checkpoint after this many steps of training.
:param checkpoints_to_keep: maximum number of train checkpoints to save.
:param caching: whether or not to use caching for preprocessed dataset and teacher logits
:param cache_path: Path to cache the preprocessed dataset and teacher logits
:param distillation_loss_weight: The weight of the distillation loss. A higher weight means the teacher outputs are more important.
:param distillation_loss: Specifies how teacher and model logits should be compared. Can either be a string ("mse" for mean squared error or "kl_div" for kl divergence loss) or a callable loss function (needs to have named parameters student_logits and teacher_logits)
:param temperature: The temperature for distillation. A higher temperature will result in less certainty of teacher outputs. A lower temperature means more certainty. A temperature of 1.0 does not change the certainty of the model.
:param processor: The processor to use for preprocessing. If None, the default SquadProcessor is used.
:return: None
"""
return self._training_procedure(
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
use_gpu=use_gpu,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
max_seq_len=max_seq_len,
warmup_proportion=warmup_proportion,
dev_split=dev_split,
evaluate_every=evaluate_every,
save_dir=save_dir,
num_processes=num_processes,
use_amp=use_amp,
checkpoint_root_dir=checkpoint_root_dir,
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
teacher_model=teacher_model,
teacher_batch_size=batch_size,
caching=caching,
cache_path=cache_path,
distillation_loss=distillation_loss,
temperature=temperature,
tinybert=True,
processor=processor,
)
def update_parameters(
self,
context_window_size: Optional[int] = None,
no_ans_boost: Optional[float] = None,
return_no_answer: Optional[bool] = None,
max_seq_len: Optional[int] = None,
doc_stride: Optional[int] = None,
):
"""
Hot update parameters of a loaded Reader. It may not to be safe when processing concurrent requests.
"""
if no_ans_boost is not None:
self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
if return_no_answer is not None:
self.return_no_answers = return_no_answer
if doc_stride is not None:
self.inferencer.processor.doc_stride = doc_stride
if context_window_size is not None:
self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
if max_seq_len is not None:
self.inferencer.processor.max_seq_len = max_seq_len
self.max_seq_len = max_seq_len
def save(self, directory: Path):
"""
Saves the Reader model so that it can be reused at a later point in time.
:param directory: Directory where the Reader model should be saved
"""
logger.info(f"Saving reader model to {directory}")
self.inferencer.model.save(directory)
self.inferencer.processor.save(directory)
def predict_batch(self, query_doc_list: List[dict], top_k: int = None, batch_size: int = None):
"""
Use loaded QA model to find answers for a list of queries in each query's supplied list of Document.
Returns list of dictionaries containing answers sorted by (desc.) score
:param query_doc_list: List of dictionaries containing queries with their retrieved documents
:param top_k: The maximum number of answers to return for each query
:param batch_size: Number of samples the model receives in one batch for inference
:return: List of dictionaries containing query and answers
"""
if top_k is None:
top_k = self.top_k
# convert input to FARM format
inputs = []
number_of_docs = []
labels = []
# build input objects for inference_from_objects
for query_with_docs in query_doc_list:
documents = query_with_docs["docs"]
query = query_with_docs["question"]
labels.append(query)
number_of_docs.append(len(documents))
for doc in documents:
cur = QAInput(doc_text=doc.content, questions=Question(text=query.query, uid=doc.id))
inputs.append(cur)
self.inferencer.batch_size = batch_size
# make predictions on all document-query pairs
predictions = self.inferencer.inference_from_objects(
objects=inputs, return_json=False, multiprocessing_chunksize=10
)
# group predictions together
grouped_predictions = []
left_idx = 0
right_idx = 0
for number in number_of_docs:
right_idx = left_idx + number
grouped_predictions.append(predictions[left_idx:right_idx])
left_idx = right_idx
result = []
for idx, group in enumerate(grouped_predictions):
answers, max_no_ans_gap = self._extract_answers_of_predictions(group, top_k)
query = group[0].query
cur_label = labels[idx]
result.append({"query": query, "no_ans_gap": max_no_ans_gap, "answers": answers, "label": cur_label})
return result
def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None):
"""
Use loaded QA model to find answers for a query in the supplied list of Document.
Returns dictionaries containing answers sorted by (desc.) score.
Example:
```python
|{
| 'query': 'Who is the father of Arya Stark?',
| 'answers':[Answer(
| 'answer': 'Eddard,',
| 'context': "She travels with her father, Eddard, to King's Landing when he is",
| 'score': 0.9787139466668613,
| 'offsets_in_context': [Span(start=29, end=35],
| 'offsets_in_context': [Span(start=347, end=353],
| 'document_id': '88d1ed769d003939d3a0d28034464ab2'
| ),...
| ]
|}
```
:param query: Query string
:param documents: List of Document in which to search for the answer
:param top_k: The maximum number of answers to return
:return: Dict containing query and answers
"""
if top_k is None:
top_k = self.top_k
# convert input to FARM format
inputs = []
for doc in documents:
cur = QAInput(doc_text=doc.content, questions=Question(text=query, uid=doc.id))
inputs.append(cur)
# get answers from QA model
# TODO: Need fix in FARM's `to_dict` function of `QAInput` class
predictions = self.inferencer.inference_from_objects(
objects=inputs, return_json=False, multiprocessing_chunksize=1
)
# assemble answers from all the different documents & format them.
answers, max_no_ans_gap = self._extract_answers_of_predictions(predictions, top_k)
# TODO: potentially simplify return here to List[Answer] and handle no_ans_gap differently
result = {"query": query, "no_ans_gap": max_no_ans_gap, "answers": answers}
return result
def eval_on_file(self, data_dir: str, test_filename: str, device: Optional[str] = None):
"""
Performs evaluation on a SQuAD-formatted file.
Returns a dict containing the following metrics:
- "EM": exact match score
- "f1": F1-Score
- "top_n_accuracy": Proportion of predicted answers that overlap with correct answer
:param data_dir: The directory in which the test set can be found
:type data_dir: Path or str
:param test_filename: The name of the file containing the test data in SQuAD format.
:type test_filename: str
:param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda" or use the Reader's device by default.
:type device: str
"""
if device is None:
device = self.devices[0]
eval_processor = SquadProcessor(
tokenizer=self.inferencer.processor.tokenizer,
max_seq_len=self.inferencer.processor.max_seq_len,
label_list=self.inferencer.processor.tasks["question_answering"]["label_list"],
metric=self.inferencer.processor.tasks["question_answering"]["metric"],
train_filename=None,
dev_filename=None,
dev_split=0,
test_filename=test_filename,
data_dir=Path(data_dir),
)
data_silo = DataSilo(processor=eval_processor, batch_size=self.inferencer.batch_size, distributed=False)
data_loader = data_silo.get_data_loader("test")
evaluator = Evaluator(data_loader=data_loader, tasks=eval_processor.tasks, device=device)
eval_results = evaluator.eval(self.inferencer.model)
results = {
"EM": eval_results[0]["EM"],
"f1": eval_results[0]["f1"],
"top_n_accuracy": eval_results[0]["top_n_accuracy"],
}
return results
def eval(
self,
document_store: BaseDocumentStore,
device: Optional[str] = None,
label_index: str = "label",
doc_index: str = "eval_document",
label_origin: str = "gold-label",
calibrate_conf_scores: bool = False,
):
"""
Performs evaluation on evaluation documents in the DocumentStore.
Returns a dict containing the following metrics:
- "EM": Proportion of exact matches of predicted answers with their corresponding correct answers
- "f1": Average overlap between predicted answers and their corresponding correct answers
- "top_n_accuracy": Proportion of predicted answers that overlap with correct answer
:param document_store: DocumentStore containing the evaluation documents
:param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda" or use the Reader's device by default.
:param label_index: Index/Table name where labeled questions are stored
:param doc_index: Index/Table name where documents that are used for evaluation are stored
:param label_origin: Field name where the gold labels are stored
:param calibrate_conf_scores: Whether to calibrate the temperature for temperature scaling of the confidence scores
"""
if device is None:
device = self.devices[0]
if self.top_k_per_candidate != 4:
logger.info(
f"Performing Evaluation using top_k_per_candidate = {self.top_k_per_candidate} \n"
f"and consequently, QuestionAnsweringPredictionHead.n_best = {self.top_k_per_candidate + 1}. \n"
f"This deviates from FARM's default where QuestionAnsweringPredictionHead.n_best = 5"
)
# extract all questions for evaluation
filters: Dict = {"origin": [label_origin]}
labels = document_store.get_all_labels(index=label_index, filters=filters)
# Aggregate all answer labels per question
aggregated_per_doc = defaultdict(list)
for label in labels:
if not label.document.id:
logger.error(f"Label does not contain a document id")
continue
aggregated_per_doc[label.document.id].append(label)
# Create squad style dicts
d: Dict[str, Any] = {}
all_doc_ids = [x.id for x in document_store.get_all_documents(doc_index)]
for doc_id in all_doc_ids:
doc = document_store.get_document_by_id(doc_id, index=doc_index)
if not doc:
logger.error(f"Document with the ID '{doc_id}' is not present in the document store.")
continue
d[str(doc_id)] = {"context": doc.content}
# get all questions / answers
# TODO check if we can simplify this by using MultiLabel
aggregated_per_question: Dict[tuple, Any] = defaultdict(list)
if doc_id in aggregated_per_doc:
for label in aggregated_per_doc[doc_id]:
aggregation_key = (doc_id, label.query)
if label.answer is None:
logger.error(f"Label.answer was None, but Answer object was expected: {label} ")
continue
if label.answer.offsets_in_document is None:
logger.error(
f"Label.answer.offsets_in_document was None, but Span object was expected: {label} "
)
continue
else:
# add to existing answers
# TODO offsets (whole block)
if aggregation_key in aggregated_per_question.keys():
if label.no_answer:
continue
else:
# Hack to fix problem where duplicate questions are merged by doc_store processing creating a QA example with 8 annotations > 6 annotation max
if len(aggregated_per_question[aggregation_key]["answers"]) >= 6:
logger.warning(
f"Answers in this sample are being dropped because it has more than 6 answers. (doc_id: {doc_id}, question: {label.query}, label_id: {label.id})"
)
continue
aggregated_per_question[aggregation_key]["answers"].append(
{
"text": label.answer.answer,
"answer_start": label.answer.offsets_in_document[0].start,
}
)
aggregated_per_question[aggregation_key]["is_impossible"] = False
# create new one
else:
# We don't need to create an answer dict if is_impossible / no_answer
if label.no_answer == True:
aggregated_per_question[aggregation_key] = {
"id": str(hash(str(doc_id) + label.query)),
"question": label.query,
"answers": [],
"is_impossible": True,
}
else:
aggregated_per_question[aggregation_key] = {
"id": str(hash(str(doc_id) + label.query)),
"question": label.query,
"answers": [
{
"text": label.answer.answer,
"answer_start": label.answer.offsets_in_document[0].start,
}
],
"is_impossible": False,
}
# Get rid of the question key again (after we aggregated we don't need it anymore)
d[str(doc_id)]["qas"] = [v for v in aggregated_per_question.values()]
# Convert input format for FARM
farm_input = [v for v in d.values()]
n_queries = len([y for x in farm_input for y in x["qas"]])
# Create DataLoader that can be passed to the Evaluator
tic = perf_counter()
indices = range(len(farm_input))
dataset, tensor_names, problematic_ids = self.inferencer.processor.dataset_from_dicts(
farm_input, indices=indices
)
data_loader = NamedDataLoader(dataset=dataset, batch_size=self.inferencer.batch_size, tensor_names=tensor_names)
evaluator = Evaluator(data_loader=data_loader, tasks=self.inferencer.processor.tasks, device=device)
eval_results = evaluator.eval(self.inferencer.model, calibrate_conf_scores=calibrate_conf_scores)
toc = perf_counter()
reader_time = toc - tic
results = {
"EM": eval_results[0]["EM"] * 100,
"f1": eval_results[0]["f1"] * 100,
"top_n_accuracy": eval_results[0]["top_n_accuracy"] * 100,
"top_n": self.inferencer.model.prediction_heads[0].n_best,
"reader_time": reader_time,
"seconds_per_query": reader_time / n_queries,
}
return results
def _extract_answers_of_predictions(self, predictions: List[QAPred], top_k: Optional[int] = None):
# Assemble answers from all the different documents and format them.
# For the 'no answer' option, we collect all no_ans_gaps and decide how likely
# a no answer is based on all no_ans_gaps values across all documents
answers: List[Answer] = []
no_ans_gaps = []
best_score_answer = 0
for pred in predictions:
answers_per_document = []
no_ans_gaps.append(pred.no_answer_gap)
for ans in pred.prediction:
# skip 'no answers' here
if self._check_no_answer(ans):
pass
else:
cur = Answer(
answer=ans.answer,
type="extractive",
score=ans.confidence if self.use_confidence_scores else ans.score,
context=ans.context_window,
document_id=pred.id,
offsets_in_context=[
Span(
start=ans.offset_answer_start - ans.offset_context_window_start,
end=ans.offset_answer_end - ans.offset_context_window_start,
)
],
offsets_in_document=[Span(start=ans.offset_answer_start, end=ans.offset_answer_end)],
)
answers_per_document.append(cur)
if ans.score > best_score_answer:
best_score_answer = ans.score
# Only take n best candidates. Answers coming back from FARM are sorted with decreasing relevance
answers += answers_per_document[: self.top_k_per_candidate]
# calculate the score for predicting 'no answer', relative to our best positive answer score
no_ans_prediction, max_no_ans_gap = self._calc_no_answer(
no_ans_gaps, best_score_answer, self.use_confidence_scores
)
if self.return_no_answers:
answers.append(no_ans_prediction)
# sort answers by score (descending) and select top-k
answers = sorted(answers, reverse=True)
answers = answers[:top_k]
return answers, max_no_ans_gap
def calibrate_confidence_scores(
self,
document_store: BaseDocumentStore,
device: Optional[str] = None,
label_index: str = "label",
doc_index: str = "eval_document",
label_origin: str = "gold_label",
):
"""
Calibrates confidence scores on evaluation documents in the DocumentStore.
:param document_store: DocumentStore containing the evaluation documents
:param device: The device on which the tensors should be processed. Choose from "cpu" and "cuda" or use the Reader's device by default.
:param label_index: Index/Table name where labeled questions are stored
:param doc_index: Index/Table name where documents that are used for evaluation are stored
:param label_origin: Field name where the gold labels are stored
"""
if device is None:
device = self.devices[0]
self.eval(
document_store=document_store,
device=device,
label_index=label_index,
doc_index=doc_index,
label_origin=label_origin,
calibrate_conf_scores=True,
)
@staticmethod
def _check_no_answer(c: QACandidate):
# check for correct value in "answer"
if c.offset_answer_start == 0 and c.offset_answer_end == 0:
if c.answer != "no_answer":
logger.error(
"Invalid 'no_answer': Got a prediction for position 0, but answer string is not 'no_answer'"
)
if c.answer == "no_answer":
return True
else:
return False
def predict_on_texts(self, question: str, texts: List[str], top_k: Optional[int] = None):
"""
Use loaded QA model to find answers for a question in the supplied list of Document.
Returns dictionaries containing answers sorted by (desc.) score.
Example:
```python
|{
| 'question': 'Who is the father of Arya Stark?',
| 'answers':[
| {'answer': 'Eddard,',
| 'context': " She travels with her father, Eddard, to King's Landing when he is ",
| 'offset_answer_start': 147,
| 'offset_answer_end': 154,
| 'score': 0.9787139466668613,
| 'document_id': '1337'
| },...
| ]
|}
```
:param question: Question string
:param documents: List of documents as string type
:param top_k: The maximum number of answers to return
:return: Dict containing question and answers
"""
documents = []
for text in texts:
documents.append(Document(content=text))
predictions = self.predict(question, documents, top_k)
return predictions
@classmethod
def convert_to_onnx(
cls,
model_name: str,
output_path: Path,
convert_to_float16: bool = False,
quantize: bool = False,
task_type: str = "question_answering",
opset_version: int = 11,
):
"""
Convert a PyTorch BERT model to ONNX format and write to ./onnx-export dir. The converted ONNX model
can be loaded with in the `FARMReader` using the export path as `model_name_or_path` param.
Usage:
`from haystack.reader.farm import FARMReader
from pathlib import Path
onnx_model_path = Path("roberta-onnx-model")
FARMReader.convert_to_onnx(model_name="deepset/bert-base-cased-squad2", output_path=onnx_model_path)
reader = FARMReader(onnx_model_path)`
:param model_name: transformers model name
:param output_path: Path to output the converted model
:param convert_to_float16: Many models use float32 precision by default. With the half precision of float16,
inference is faster on Nvidia GPUs with Tensor core like T4 or V100. On older GPUs,
float32 could still be be more performant.
:param quantize: convert floating point number to integers
:param task_type: Type of task for the model. Available options: "question_answering" or "embeddings".
:param opset_version: ONNX opset version
"""
AdaptiveModel.convert_to_onnx(
model_name=model_name,
output_path=output_path,
task_type=task_type,
convert_to_float16=convert_to_float16,
quantize=quantize,
opset_version=opset_version,
)
| 51.407666
| 344
| 0.62561
|
from typing import List, Optional, Dict, Any, Union, Callable
import logging
import multiprocessing
from pathlib import Path
from collections import defaultdict
from time import perf_counter
import torch
from haystack.modeling.data_handler.data_silo import DataSilo, DistillationDataSilo
from haystack.modeling.data_handler.processor import SquadProcessor, Processor
from haystack.modeling.data_handler.dataloader import NamedDataLoader
from haystack.modeling.data_handler.inputs import QAInput, Question
from haystack.modeling.infer import QAInferencer
from haystack.modeling.model.optimization import initialize_optimizer
from haystack.modeling.model.predictions import QAPred, QACandidate
from haystack.modeling.model.adaptive_model import AdaptiveModel
from haystack.modeling.training import Trainer, DistillationTrainer, TinyBERTDistillationTrainer
from haystack.modeling.evaluation import Evaluator
from haystack.modeling.utils import set_all_seeds, initialize_device_settings
from haystack.schema import Document, Answer, Span
from haystack.document_stores import BaseDocumentStore
from haystack.nodes.reader import BaseReader
logger = logging.getLogger(__name__)
class FARMReader(BaseReader):
def __init__(
self,
model_name_or_path: str,
model_version: Optional[str] = None,
context_window_size: int = 150,
batch_size: int = 50,
use_gpu: bool = True,
no_ans_boost: float = 0.0,
return_no_answer: bool = False,
top_k: int = 10,
top_k_per_candidate: int = 3,
top_k_per_sample: int = 1,
num_processes: Optional[int] = None,
max_seq_len: int = 256,
doc_stride: int = 128,
progress_bar: bool = True,
duplicate_filtering: int = 0,
use_confidence_scores: bool = True,
proxies: Optional[Dict[str, str]] = None,
local_files_only=False,
force_download=False,
use_auth_token: Optional[Union[str, bool]] = None,
**kwargs,
):
self.set_config(
model_name_or_path=model_name_or_path,
model_version=model_version,
context_window_size=context_window_size,
batch_size=batch_size,
use_gpu=use_gpu,
no_ans_boost=no_ans_boost,
return_no_answer=return_no_answer,
top_k=top_k,
top_k_per_candidate=top_k_per_candidate,
top_k_per_sample=top_k_per_sample,
num_processes=num_processes,
max_seq_len=max_seq_len,
doc_stride=doc_stride,
progress_bar=progress_bar,
duplicate_filtering=duplicate_filtering,
proxies=proxies,
local_files_only=local_files_only,
force_download=force_download,
use_confidence_scores=use_confidence_scores,
**kwargs,
)
self.devices, _ = initialize_device_settings(use_cuda=use_gpu, multi_gpu=False)
self.return_no_answers = return_no_answer
self.top_k = top_k
self.top_k_per_candidate = top_k_per_candidate
self.inferencer = QAInferencer.load(
model_name_or_path,
batch_size=batch_size,
gpu=use_gpu,
task_type="question_answering",
max_seq_len=max_seq_len,
doc_stride=doc_stride,
num_processes=num_processes,
revision=model_version,
disable_tqdm=not progress_bar,
strict=False,
proxies=proxies,
local_files_only=local_files_only,
force_download=force_download,
devices=self.devices,
use_auth_token=use_auth_token,
**kwargs,
)
self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
self.inferencer.model.prediction_heads[0].n_best = top_k_per_candidate + 1
try:
self.inferencer.model.prediction_heads[0].n_best_per_sample = top_k_per_sample
except:
logger.warning("Could not set `top_k_per_sample` in FARM. Please update FARM version.")
try:
self.inferencer.model.prediction_heads[0].duplicate_filtering = duplicate_filtering
except:
logger.warning("Could not set `duplicate_filtering` in FARM. Please update FARM version.")
self.max_seq_len = max_seq_len
self.use_gpu = use_gpu
self.progress_bar = progress_bar
self.use_confidence_scores = use_confidence_scores
def _training_procedure(
self,
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 2,
learning_rate: float = 1e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
teacher_model: Optional["FARMReader"] = None,
teacher_batch_size: Optional[int] = None,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
distillation_loss_weight: float = 0.5,
distillation_loss: Union[str, Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = "kl_div",
temperature: float = 1.0,
tinybert: bool = False,
processor: Optional[Processor] = None,
):
if dev_filename:
dev_split = 0
if num_processes is None:
num_processes = multiprocessing.cpu_count() - 1 or 1
set_all_seeds(seed=42)
if use_gpu is None:
use_gpu = self.use_gpu
if max_seq_len is None:
max_seq_len = self.max_seq_len
devices, n_gpu = initialize_device_settings(use_cuda=use_gpu, multi_gpu=False)
if not save_dir:
save_dir = f"../../saved_models/{self.inferencer.model.language_model.name}"
if tinybert:
save_dir += "_tinybert_stage_1"
label_list = ["start_token", "end_token"]
metric = "squad"
if processor is None:
processor = SquadProcessor(
tokenizer=self.inferencer.processor.tokenizer,
max_seq_len=max_seq_len,
label_list=label_list,
metric=metric,
train_filename=train_filename,
dev_filename=dev_filename,
dev_split=dev_split,
test_filename=test_filename,
data_dir=Path(data_dir),
)
data_silo: DataSilo
if (
teacher_model and not tinybert
):
data_silo = DistillationDataSilo(
teacher_model,
teacher_batch_size or batch_size,
device=devices[0],
processor=processor,
batch_size=batch_size,
distributed=False,
max_processes=num_processes,
caching=caching,
cache_path=cache_path,
)
else:
data_silo = DataSilo(
processor=processor,
batch_size=batch_size,
distributed=False,
max_processes=num_processes,
caching=caching,
cache_path=cache_path,
)
model, optimizer, lr_schedule = initialize_optimizer(
model=self.inferencer.model,
learning_rate=learning_rate,
schedule_opts={"name": "LinearWarmup", "warmup_proportion": warmup_proportion},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
device=devices[0],
use_amp=use_amp,
)
if tinybert:
if not teacher_model:
raise ValueError("TinyBERT distillation requires a teacher model.")
trainer = TinyBERTDistillationTrainer.create_or_load_checkpoint(
model=model,
teacher_model=teacher_model.inferencer.model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=devices[0],
use_amp=use_amp,
disable_tqdm=not self.progress_bar,
checkpoint_root_dir=Path(checkpoint_root_dir),
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
)
elif (
teacher_model
): # checks again if teacher model is passed as parameter, in that case assume model distillation is used
trainer = DistillationTrainer.create_or_load_checkpoint(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=devices[0],
use_amp=use_amp,
disable_tqdm=not self.progress_bar,
checkpoint_root_dir=Path(checkpoint_root_dir),
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
distillation_loss=distillation_loss,
distillation_loss_weight=distillation_loss_weight,
temperature=temperature,
)
else:
trainer = Trainer.create_or_load_checkpoint(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=devices[0],
use_amp=use_amp,
disable_tqdm=not self.progress_bar,
checkpoint_root_dir=Path(checkpoint_root_dir),
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
)
# 5. Let it grow!
self.inferencer.model = trainer.train()
self.save(Path(save_dir))
def train(
self,
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 2,
learning_rate: float = 1e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
):
return self._training_procedure(
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
use_gpu=use_gpu,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
max_seq_len=max_seq_len,
warmup_proportion=warmup_proportion,
dev_split=dev_split,
evaluate_every=evaluate_every,
save_dir=save_dir,
num_processes=num_processes,
use_amp=use_amp,
checkpoint_root_dir=checkpoint_root_dir,
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
caching=caching,
cache_path=cache_path,
)
def distil_prediction_layer_from(
self,
teacher_model: "FARMReader",
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
student_batch_size: int = 10,
teacher_batch_size: Optional[int] = None,
n_epochs: int = 2,
learning_rate: float = 3e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
distillation_loss_weight: float = 0.5,
distillation_loss: Union[str, Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = "kl_div",
temperature: float = 1.0,
):
return self._training_procedure(
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
use_gpu=use_gpu,
batch_size=student_batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
max_seq_len=max_seq_len,
warmup_proportion=warmup_proportion,
dev_split=dev_split,
evaluate_every=evaluate_every,
save_dir=save_dir,
num_processes=num_processes,
use_amp=use_amp,
checkpoint_root_dir=checkpoint_root_dir,
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
teacher_model=teacher_model,
teacher_batch_size=teacher_batch_size,
caching=caching,
cache_path=cache_path,
distillation_loss_weight=distillation_loss_weight,
distillation_loss=distillation_loss,
temperature=temperature,
)
def distil_intermediate_layers_from(
self,
teacher_model: "FARMReader",
data_dir: str,
train_filename: str,
dev_filename: Optional[str] = None,
test_filename: Optional[str] = None,
use_gpu: Optional[bool] = None,
batch_size: int = 10,
n_epochs: int = 5,
learning_rate: float = 5e-5,
max_seq_len: Optional[int] = None,
warmup_proportion: float = 0.2,
dev_split: float = 0,
evaluate_every: int = 300,
save_dir: Optional[str] = None,
num_processes: Optional[int] = None,
use_amp: str = None,
checkpoint_root_dir: Path = Path("model_checkpoints"),
checkpoint_every: Optional[int] = None,
checkpoints_to_keep: int = 3,
caching: bool = False,
cache_path: Path = Path("cache/data_silo"),
distillation_loss: Union[str, Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = "mse",
temperature: float = 1.0,
processor: Optional[Processor] = None,
):
return self._training_procedure(
data_dir=data_dir,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
use_gpu=use_gpu,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
max_seq_len=max_seq_len,
warmup_proportion=warmup_proportion,
dev_split=dev_split,
evaluate_every=evaluate_every,
save_dir=save_dir,
num_processes=num_processes,
use_amp=use_amp,
checkpoint_root_dir=checkpoint_root_dir,
checkpoint_every=checkpoint_every,
checkpoints_to_keep=checkpoints_to_keep,
teacher_model=teacher_model,
teacher_batch_size=batch_size,
caching=caching,
cache_path=cache_path,
distillation_loss=distillation_loss,
temperature=temperature,
tinybert=True,
processor=processor,
)
def update_parameters(
self,
context_window_size: Optional[int] = None,
no_ans_boost: Optional[float] = None,
return_no_answer: Optional[bool] = None,
max_seq_len: Optional[int] = None,
doc_stride: Optional[int] = None,
):
if no_ans_boost is not None:
self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
if return_no_answer is not None:
self.return_no_answers = return_no_answer
if doc_stride is not None:
self.inferencer.processor.doc_stride = doc_stride
if context_window_size is not None:
self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
if max_seq_len is not None:
self.inferencer.processor.max_seq_len = max_seq_len
self.max_seq_len = max_seq_len
def save(self, directory: Path):
logger.info(f"Saving reader model to {directory}")
self.inferencer.model.save(directory)
self.inferencer.processor.save(directory)
def predict_batch(self, query_doc_list: List[dict], top_k: int = None, batch_size: int = None):
if top_k is None:
top_k = self.top_k
# convert input to FARM format
inputs = []
number_of_docs = []
labels = []
# build input objects for inference_from_objects
for query_with_docs in query_doc_list:
documents = query_with_docs["docs"]
query = query_with_docs["question"]
labels.append(query)
number_of_docs.append(len(documents))
for doc in documents:
cur = QAInput(doc_text=doc.content, questions=Question(text=query.query, uid=doc.id))
inputs.append(cur)
self.inferencer.batch_size = batch_size
# make predictions on all document-query pairs
predictions = self.inferencer.inference_from_objects(
objects=inputs, return_json=False, multiprocessing_chunksize=10
)
# group predictions together
grouped_predictions = []
left_idx = 0
right_idx = 0
for number in number_of_docs:
right_idx = left_idx + number
grouped_predictions.append(predictions[left_idx:right_idx])
left_idx = right_idx
result = []
for idx, group in enumerate(grouped_predictions):
answers, max_no_ans_gap = self._extract_answers_of_predictions(group, top_k)
query = group[0].query
cur_label = labels[idx]
result.append({"query": query, "no_ans_gap": max_no_ans_gap, "answers": answers, "label": cur_label})
return result
def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None):
if top_k is None:
top_k = self.top_k
# convert input to FARM format
inputs = []
for doc in documents:
cur = QAInput(doc_text=doc.content, questions=Question(text=query, uid=doc.id))
inputs.append(cur)
# get answers from QA model
# TODO: Need fix in FARM's `to_dict` function of `QAInput` class
predictions = self.inferencer.inference_from_objects(
objects=inputs, return_json=False, multiprocessing_chunksize=1
)
answers, max_no_ans_gap = self._extract_answers_of_predictions(predictions, top_k)
result = {"query": query, "no_ans_gap": max_no_ans_gap, "answers": answers}
return result
def eval_on_file(self, data_dir: str, test_filename: str, device: Optional[str] = None):
if device is None:
device = self.devices[0]
eval_processor = SquadProcessor(
tokenizer=self.inferencer.processor.tokenizer,
max_seq_len=self.inferencer.processor.max_seq_len,
label_list=self.inferencer.processor.tasks["question_answering"]["label_list"],
metric=self.inferencer.processor.tasks["question_answering"]["metric"],
train_filename=None,
dev_filename=None,
dev_split=0,
test_filename=test_filename,
data_dir=Path(data_dir),
)
data_silo = DataSilo(processor=eval_processor, batch_size=self.inferencer.batch_size, distributed=False)
data_loader = data_silo.get_data_loader("test")
evaluator = Evaluator(data_loader=data_loader, tasks=eval_processor.tasks, device=device)
eval_results = evaluator.eval(self.inferencer.model)
results = {
"EM": eval_results[0]["EM"],
"f1": eval_results[0]["f1"],
"top_n_accuracy": eval_results[0]["top_n_accuracy"],
}
return results
def eval(
self,
document_store: BaseDocumentStore,
device: Optional[str] = None,
label_index: str = "label",
doc_index: str = "eval_document",
label_origin: str = "gold-label",
calibrate_conf_scores: bool = False,
):
if device is None:
device = self.devices[0]
if self.top_k_per_candidate != 4:
logger.info(
f"Performing Evaluation using top_k_per_candidate = {self.top_k_per_candidate} \n"
f"and consequently, QuestionAnsweringPredictionHead.n_best = {self.top_k_per_candidate + 1}. \n"
f"This deviates from FARM's default where QuestionAnsweringPredictionHead.n_best = 5"
)
# extract all questions for evaluation
filters: Dict = {"origin": [label_origin]}
labels = document_store.get_all_labels(index=label_index, filters=filters)
# Aggregate all answer labels per question
aggregated_per_doc = defaultdict(list)
for label in labels:
if not label.document.id:
logger.error(f"Label does not contain a document id")
continue
aggregated_per_doc[label.document.id].append(label)
# Create squad style dicts
d: Dict[str, Any] = {}
all_doc_ids = [x.id for x in document_store.get_all_documents(doc_index)]
for doc_id in all_doc_ids:
doc = document_store.get_document_by_id(doc_id, index=doc_index)
if not doc:
logger.error(f"Document with the ID '{doc_id}' is not present in the document store.")
continue
d[str(doc_id)] = {"context": doc.content}
# get all questions / answers
# TODO check if we can simplify this by using MultiLabel
aggregated_per_question: Dict[tuple, Any] = defaultdict(list)
if doc_id in aggregated_per_doc:
for label in aggregated_per_doc[doc_id]:
aggregation_key = (doc_id, label.query)
if label.answer is None:
logger.error(f"Label.answer was None, but Answer object was expected: {label} ")
continue
if label.answer.offsets_in_document is None:
logger.error(
f"Label.answer.offsets_in_document was None, but Span object was expected: {label} "
)
continue
else:
# add to existing answers
# TODO offsets (whole block)
if aggregation_key in aggregated_per_question.keys():
if label.no_answer:
continue
else:
# Hack to fix problem where duplicate questions are merged by doc_store processing creating a QA example with 8 annotations > 6 annotation max
if len(aggregated_per_question[aggregation_key]["answers"]) >= 6:
logger.warning(
f"Answers in this sample are being dropped because it has more than 6 answers. (doc_id: {doc_id}, question: {label.query}, label_id: {label.id})"
)
continue
aggregated_per_question[aggregation_key]["answers"].append(
{
"text": label.answer.answer,
"answer_start": label.answer.offsets_in_document[0].start,
}
)
aggregated_per_question[aggregation_key]["is_impossible"] = False
# create new one
else:
# We don't need to create an answer dict if is_impossible / no_answer
if label.no_answer == True:
aggregated_per_question[aggregation_key] = {
"id": str(hash(str(doc_id) + label.query)),
"question": label.query,
"answers": [],
"is_impossible": True,
}
else:
aggregated_per_question[aggregation_key] = {
"id": str(hash(str(doc_id) + label.query)),
"question": label.query,
"answers": [
{
"text": label.answer.answer,
"answer_start": label.answer.offsets_in_document[0].start,
}
],
"is_impossible": False,
}
d[str(doc_id)]["qas"] = [v for v in aggregated_per_question.values()]
# Convert input format for FARM
farm_input = [v for v in d.values()]
n_queries = len([y for x in farm_input for y in x["qas"]])
# Create DataLoader that can be passed to the Evaluator
tic = perf_counter()
indices = range(len(farm_input))
dataset, tensor_names, problematic_ids = self.inferencer.processor.dataset_from_dicts(
farm_input, indices=indices
)
data_loader = NamedDataLoader(dataset=dataset, batch_size=self.inferencer.batch_size, tensor_names=tensor_names)
evaluator = Evaluator(data_loader=data_loader, tasks=self.inferencer.processor.tasks, device=device)
eval_results = evaluator.eval(self.inferencer.model, calibrate_conf_scores=calibrate_conf_scores)
toc = perf_counter()
reader_time = toc - tic
results = {
"EM": eval_results[0]["EM"] * 100,
"f1": eval_results[0]["f1"] * 100,
"top_n_accuracy": eval_results[0]["top_n_accuracy"] * 100,
"top_n": self.inferencer.model.prediction_heads[0].n_best,
"reader_time": reader_time,
"seconds_per_query": reader_time / n_queries,
}
return results
def _extract_answers_of_predictions(self, predictions: List[QAPred], top_k: Optional[int] = None):
# Assemble answers from all the different documents and format them.
# For the 'no answer' option, we collect all no_ans_gaps and decide how likely
# a no answer is based on all no_ans_gaps values across all documents
answers: List[Answer] = []
no_ans_gaps = []
best_score_answer = 0
for pred in predictions:
answers_per_document = []
no_ans_gaps.append(pred.no_answer_gap)
for ans in pred.prediction:
# skip 'no answers' here
if self._check_no_answer(ans):
pass
else:
cur = Answer(
answer=ans.answer,
type="extractive",
score=ans.confidence if self.use_confidence_scores else ans.score,
context=ans.context_window,
document_id=pred.id,
offsets_in_context=[
Span(
start=ans.offset_answer_start - ans.offset_context_window_start,
end=ans.offset_answer_end - ans.offset_context_window_start,
)
],
offsets_in_document=[Span(start=ans.offset_answer_start, end=ans.offset_answer_end)],
)
answers_per_document.append(cur)
if ans.score > best_score_answer:
best_score_answer = ans.score
# Only take n best candidates. Answers coming back from FARM are sorted with decreasing relevance
answers += answers_per_document[: self.top_k_per_candidate]
# calculate the score for predicting 'no answer', relative to our best positive answer score
no_ans_prediction, max_no_ans_gap = self._calc_no_answer(
no_ans_gaps, best_score_answer, self.use_confidence_scores
)
if self.return_no_answers:
answers.append(no_ans_prediction)
# sort answers by score (descending) and select top-k
answers = sorted(answers, reverse=True)
answers = answers[:top_k]
return answers, max_no_ans_gap
def calibrate_confidence_scores(
self,
document_store: BaseDocumentStore,
device: Optional[str] = None,
label_index: str = "label",
doc_index: str = "eval_document",
label_origin: str = "gold_label",
):
if device is None:
device = self.devices[0]
self.eval(
document_store=document_store,
device=device,
label_index=label_index,
doc_index=doc_index,
label_origin=label_origin,
calibrate_conf_scores=True,
)
@staticmethod
def _check_no_answer(c: QACandidate):
# check for correct value in "answer"
if c.offset_answer_start == 0 and c.offset_answer_end == 0:
if c.answer != "no_answer":
logger.error(
"Invalid 'no_answer': Got a prediction for position 0, but answer string is not 'no_answer'"
)
if c.answer == "no_answer":
return True
else:
return False
def predict_on_texts(self, question: str, texts: List[str], top_k: Optional[int] = None):
documents = []
for text in texts:
documents.append(Document(content=text))
predictions = self.predict(question, documents, top_k)
return predictions
@classmethod
def convert_to_onnx(
cls,
model_name: str,
output_path: Path,
convert_to_float16: bool = False,
quantize: bool = False,
task_type: str = "question_answering",
opset_version: int = 11,
):
AdaptiveModel.convert_to_onnx(
model_name=model_name,
output_path=output_path,
task_type=task_type,
convert_to_float16=convert_to_float16,
quantize=quantize,
opset_version=opset_version,
)
| true
| true
|
f715f9eec1999c4f7c88c87f57493937d98df307
| 8,387
|
py
|
Python
|
thor/orbit.py
|
B612-Asteroid-Institute/thor
|
d3d1dcbe86f67a62c90b4cde3fc577e414825cf2
|
[
"BSD-3-Clause"
] | null | null | null |
thor/orbit.py
|
B612-Asteroid-Institute/thor
|
d3d1dcbe86f67a62c90b4cde3fc577e414825cf2
|
[
"BSD-3-Clause"
] | null | null | null |
thor/orbit.py
|
B612-Asteroid-Institute/thor
|
d3d1dcbe86f67a62c90b4cde3fc577e414825cf2
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from .utils import _checkTime
from .vectors import calcNae
from .vectors import calcDelta
from .vectors import calcXae
from .vectors import calcXa
from .vectors import calcNhat
from .vectors import calcR1
from .vectors import calcR2
from .projections import cartesianToGnomonic
from .coordinates import transformCoordinates
__all__ = ["TestOrbit"]
class TestOrbit:
"""
TestOrbit: Class that calculates and stores the rotation matrices
for a guess of heliocentric distance and velocity. To be used in
tandem with the Cell class.
Parameters
----------
elements : `~numpy.ndarray` (6)
Cartesian ecliptic orbital elements with postions in units of AU
and velocities in units of AU per day.
t0 : `~astropy.time.core.Time` (1)
Epoch at which orbital elements are defined.
"""
def __init__(self, elements, epoch):
_checkTime(epoch, "epoch")
self.elements = elements
self.epoch = epoch
def prepare(self, verbose=True):
"""
Calculate rotation matrices.
Populates the following class properties:
n_hat : vector normal to the plane of orbit
R1 : rotation matrix to rotate towards x-y plane
R2 : rotation matrix to rotate towards x-axis
M : final rotation matrix
Parameters
----------
verbose : bool, optional
Print progress statements.
[Default = True]
Returns
-------
None
"""
if verbose is True:
print("Calculating vector normal to plane of orbit...")
self.n_hat = calcNhat(self.elements[:3])
if verbose is True:
print("Calculating R1 rotation matrix...")
self.R1 = calcR1(self.elements[:3], self.n_hat)
self.x_a_xy = np.array(self.R1 @ self.elements[:3])[0]
if verbose is True:
print("Calculating R2 rotation matrix...")
self.R2 = calcR2(self.x_a_xy)
if verbose is True:
print("Calculating final rotation matrix...")
self.M = self.R2 @ self.R1
if verbose is True:
print("Done.")
print("")
return
def applyToObservations(self, observations, verbose=True):
"""
Apply the prepared rotations to the given observations. Adds the gnomonic
plane coordinates to observations (columns: theta_x_deg, theta_y_deg)
Parameters
----------
observations : `~pandas.DataFrame`
DataFrame of observations defined at the same epoch as this test orbit,
to project into the test orbit's frame.
verbose : bool, optional
Print progress statements?
[Default = True]
Returns
-------
None
"""
if verbose is True:
print("Applying rotation matrices to observations...")
print("Converting to ecliptic coordinates...")
#velocities_present = False
#if "vRAcosDec" in observations.columns and "vDec" in observations.columns:
# coords_eq_r = observations[["RA_deg", "Dec_deg"]].values
# coords_eq_v = observations[["vRAcosDec", "vDec"]].values
# coords_eq_v[:, 0] /= np.cos(np.radians(coords_eq_r[:, 1]))
# coords_eq = np.hstack([
# np.ones((len(coords_eq_r), 1)),
# coords_eq_r,
# np.zeros((len(coords_eq_r), 1)),
# coords_eq_v
# ])
# velocities_present = True
#else:
coords_eq = observations[["RA_deg", "Dec_deg"]].values
coords_eq = np.hstack([np.ones((len(coords_eq), 1)), coords_eq])
coords_ec = transformCoordinates(coords_eq,
"equatorial",
"ecliptic",
representation_in="spherical",
representation_out="spherical"
)
if verbose is True:
print("Calculating object to observer unit vector...")
n_ae = calcNae(coords_ec[:, 1:3])
x_e = observations[["obs_x", "obs_y", "obs_z"]].values
if verbose is True:
print("Calculating object to observer distance assuming r = {} AU...".format(np.linalg.norm(self.elements[:3])))
delta = np.zeros(len(n_ae))
for i in range(len(delta)):
delta[i] = calcDelta(np.linalg.norm(self.elements[:3]), x_e[i, :], n_ae[i, :])
if verbose is True:
print("Calculating object to observer position vector...")
x_ae = np.zeros([len(delta), 3])
for i, (delta_i, n_ae_i) in enumerate(zip(delta, n_ae)):
x_ae[i] = calcXae(delta_i, n_ae_i)
if verbose is True:
print("Calculating heliocentric object position vector...")
x_a = np.zeros([len(x_ae), 3])
for i, (x_ae_i, x_e_i) in enumerate(zip(x_ae, x_e)):
x_a[i] = calcXa(x_ae_i, x_e_i)
if verbose is True:
print("Applying rotation matrix M to heliocentric object position vector...")
coords_cart_rotated = np.array(self.M @ x_a.T).T
if verbose is True:
print("Performing gnomonic projection...")
gnomonic_coords = cartesianToGnomonic(coords_cart_rotated)
observations["obj_x"] = x_a[:, 0]
observations["obj_y"] = x_a[:, 1]
observations["obj_z"] = x_a[:, 2]
observations["theta_x_deg"] = np.degrees(gnomonic_coords[:, 0])
observations["theta_y_deg"] = np.degrees(gnomonic_coords[:, 1])
observations["test_obj_x"] = self.elements[0]
observations["test_obj_y"] = self.elements[1]
observations["test_obj_z"] = self.elements[2]
observations["test_obj_vx"] = self.elements[3]
observations["test_obj_vy"] = self.elements[4]
observations["test_obj_vz"] = self.elements[5]
if verbose is True:
print("Done.")
print("")
return
def applyToEphemeris(self, ephemeris, verbose=True):
"""
Apply the prepared rotations to the given ephemerides. Adds the gnomonic
plane coordinates to observations (columns: theta_x_deg, theta_y_deg, vtheta_x, and vtheta_y)
Parameters
----------
ephemeris : `~pandas.DataFrame`
DataFrame of ephemeris generated by a THOR backend defined at the same epoch as this test orbit,
to project into the test orbit's frame.
verbose : bool, optional
Print progress statements?
[Default = True]
Returns
-------
None
"""
coords_cart = ephemeris[["obj_x", "obj_y", "obj_z", "obj_vx", "obj_vy", "obj_vz"]].values
coords_cart_rotated = np.zeros_like(coords_cart)
if verbose is True:
print("Applying rotation matrix M to heliocentric object position vector...")
coords_cart_rotated[:, :3] = np.array(self.M @ coords_cart[:, :3].T).T
if verbose is True:
print("Applying rotation matrix M to heliocentric object velocity vector...")
# Calculate relative velocity, then rotate to projected frame
coords_cart[:, 3:] = coords_cart[:, 3:] - self.elements[3:].reshape(1, -1)
coords_cart_rotated[:, 3:] = np.array(self.M @ coords_cart[:, 3:].T).T
if verbose is True:
print("Performing gnomonic projection...")
gnomonic_coords = cartesianToGnomonic(coords_cart_rotated)
ephemeris["theta_x_deg"] = np.degrees(gnomonic_coords[:, 0])
ephemeris["theta_y_deg"] = np.degrees(gnomonic_coords[:, 1])
ephemeris["vtheta_x_deg"] = np.degrees(gnomonic_coords[:, 2])
ephemeris["vtheta_y_deg"] = np.degrees(gnomonic_coords[:, 3])
ephemeris["test_obj_x"] = self.elements[0]
ephemeris["test_obj_y"] = self.elements[1]
ephemeris["test_obj_z"] = self.elements[2]
ephemeris["test_obj_vx"] = self.elements[3]
ephemeris["test_obj_vy"] = self.elements[4]
ephemeris["test_obj_vz"] = self.elements[5]
if verbose is True:
print("Done.")
print("")
return
| 37.779279
| 124
| 0.582211
|
import numpy as np
from .utils import _checkTime
from .vectors import calcNae
from .vectors import calcDelta
from .vectors import calcXae
from .vectors import calcXa
from .vectors import calcNhat
from .vectors import calcR1
from .vectors import calcR2
from .projections import cartesianToGnomonic
from .coordinates import transformCoordinates
__all__ = ["TestOrbit"]
class TestOrbit:
def __init__(self, elements, epoch):
_checkTime(epoch, "epoch")
self.elements = elements
self.epoch = epoch
def prepare(self, verbose=True):
if verbose is True:
print("Calculating vector normal to plane of orbit...")
self.n_hat = calcNhat(self.elements[:3])
if verbose is True:
print("Calculating R1 rotation matrix...")
self.R1 = calcR1(self.elements[:3], self.n_hat)
self.x_a_xy = np.array(self.R1 @ self.elements[:3])[0]
if verbose is True:
print("Calculating R2 rotation matrix...")
self.R2 = calcR2(self.x_a_xy)
if verbose is True:
print("Calculating final rotation matrix...")
self.M = self.R2 @ self.R1
if verbose is True:
print("Done.")
print("")
return
def applyToObservations(self, observations, verbose=True):
if verbose is True:
print("Applying rotation matrices to observations...")
print("Converting to ecliptic coordinates...")
coords_eq = observations[["RA_deg", "Dec_deg"]].values
coords_eq = np.hstack([np.ones((len(coords_eq), 1)), coords_eq])
coords_ec = transformCoordinates(coords_eq,
"equatorial",
"ecliptic",
representation_in="spherical",
representation_out="spherical"
)
if verbose is True:
print("Calculating object to observer unit vector...")
n_ae = calcNae(coords_ec[:, 1:3])
x_e = observations[["obs_x", "obs_y", "obs_z"]].values
if verbose is True:
print("Calculating object to observer distance assuming r = {} AU...".format(np.linalg.norm(self.elements[:3])))
delta = np.zeros(len(n_ae))
for i in range(len(delta)):
delta[i] = calcDelta(np.linalg.norm(self.elements[:3]), x_e[i, :], n_ae[i, :])
if verbose is True:
print("Calculating object to observer position vector...")
x_ae = np.zeros([len(delta), 3])
for i, (delta_i, n_ae_i) in enumerate(zip(delta, n_ae)):
x_ae[i] = calcXae(delta_i, n_ae_i)
if verbose is True:
print("Calculating heliocentric object position vector...")
x_a = np.zeros([len(x_ae), 3])
for i, (x_ae_i, x_e_i) in enumerate(zip(x_ae, x_e)):
x_a[i] = calcXa(x_ae_i, x_e_i)
if verbose is True:
print("Applying rotation matrix M to heliocentric object position vector...")
coords_cart_rotated = np.array(self.M @ x_a.T).T
if verbose is True:
print("Performing gnomonic projection...")
gnomonic_coords = cartesianToGnomonic(coords_cart_rotated)
observations["obj_x"] = x_a[:, 0]
observations["obj_y"] = x_a[:, 1]
observations["obj_z"] = x_a[:, 2]
observations["theta_x_deg"] = np.degrees(gnomonic_coords[:, 0])
observations["theta_y_deg"] = np.degrees(gnomonic_coords[:, 1])
observations["test_obj_x"] = self.elements[0]
observations["test_obj_y"] = self.elements[1]
observations["test_obj_z"] = self.elements[2]
observations["test_obj_vx"] = self.elements[3]
observations["test_obj_vy"] = self.elements[4]
observations["test_obj_vz"] = self.elements[5]
if verbose is True:
print("Done.")
print("")
return
def applyToEphemeris(self, ephemeris, verbose=True):
coords_cart = ephemeris[["obj_x", "obj_y", "obj_z", "obj_vx", "obj_vy", "obj_vz"]].values
coords_cart_rotated = np.zeros_like(coords_cart)
if verbose is True:
print("Applying rotation matrix M to heliocentric object position vector...")
coords_cart_rotated[:, :3] = np.array(self.M @ coords_cart[:, :3].T).T
if verbose is True:
print("Applying rotation matrix M to heliocentric object velocity vector...")
coords_cart[:, 3:] = coords_cart[:, 3:] - self.elements[3:].reshape(1, -1)
coords_cart_rotated[:, 3:] = np.array(self.M @ coords_cart[:, 3:].T).T
if verbose is True:
print("Performing gnomonic projection...")
gnomonic_coords = cartesianToGnomonic(coords_cart_rotated)
ephemeris["theta_x_deg"] = np.degrees(gnomonic_coords[:, 0])
ephemeris["theta_y_deg"] = np.degrees(gnomonic_coords[:, 1])
ephemeris["vtheta_x_deg"] = np.degrees(gnomonic_coords[:, 2])
ephemeris["vtheta_y_deg"] = np.degrees(gnomonic_coords[:, 3])
ephemeris["test_obj_x"] = self.elements[0]
ephemeris["test_obj_y"] = self.elements[1]
ephemeris["test_obj_z"] = self.elements[2]
ephemeris["test_obj_vx"] = self.elements[3]
ephemeris["test_obj_vy"] = self.elements[4]
ephemeris["test_obj_vz"] = self.elements[5]
if verbose is True:
print("Done.")
print("")
return
| true
| true
|
f715fa3d62c55bf4f7f70f4b2e9a10454d261c5c
| 2,848
|
py
|
Python
|
python/test/testutil.py
|
AppScale/appengine-pipelines
|
277394648dac3e8214677af898935d07399ac8e1
|
[
"Apache-2.0"
] | 82
|
2015-01-13T03:24:32.000Z
|
2021-10-09T04:08:27.000Z
|
python/test/testutil.py
|
AppScale/appengine-pipelines
|
277394648dac3e8214677af898935d07399ac8e1
|
[
"Apache-2.0"
] | 57
|
2015-01-27T00:12:36.000Z
|
2020-10-30T16:47:05.000Z
|
python/test/testutil.py
|
AppScale/appengine-pipelines
|
277394648dac3e8214677af898935d07399ac8e1
|
[
"Apache-2.0"
] | 58
|
2015-01-22T21:32:26.000Z
|
2021-10-09T04:08:19.000Z
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test utilities for the Google App Engine Pipeline API."""
# Code originally from:
# http://code.google.com/p/pubsubhubbub/source/browse/trunk/hub/testutil.py
import logging
import os
import sys
import tempfile
class TestSetupMixin(object):
TEST_APP_ID = 'my-app-id'
TEST_VERSION_ID = 'my-version.1234'
def setUp(self):
super(TestSetupMixin, self).setUp()
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import memcache
from google.appengine.api import queueinfo
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
from google.appengine.ext.testbed import TASKQUEUE_SERVICE_NAME
before_level = logging.getLogger().getEffectiveLevel()
os.environ['APPLICATION_ID'] = self.TEST_APP_ID
os.environ['CURRENT_VERSION_ID'] = self.TEST_VERSION_ID
os.environ['HTTP_HOST'] = '%s.appspot.com' % self.TEST_APP_ID
os.environ['DEFAULT_VERSION_HOSTNAME'] = os.environ['HTTP_HOST']
os.environ['CURRENT_MODULE_ID'] = 'foo-module'
try:
logging.getLogger().setLevel(100)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(app_id=self.TEST_APP_ID, overwrite=True)
self.testbed.init_memcache_stub()
hr_policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
self.testbed.init_datastore_v3_stub(consistency_policy=hr_policy)
self.testbed.init_taskqueue_stub()
root_path = os.path.realpath(os.path.dirname(__file__))
# Actually need to flush, even though we've reallocated. Maybe because the
# memcache stub's cache is at the module level, not the API stub?
memcache.flush_all()
finally:
logging.getLogger().setLevel(before_level)
define_queues=['other']
taskqueue_stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')
taskqueue_stub.queue_yaml_parser = (
lambda x: queueinfo.LoadSingleQueue(
'queue:\n- name: default\n rate: 1/s\n' +
'\n'.join('- name: %s\n rate: 1/s' % name
for name in define_queues)))
def tearDown(self):
super(TestSetupMixin, self).tearDown()
self.testbed.deactivate()
| 34.313253
| 84
| 0.720857
|
import logging
import os
import sys
import tempfile
class TestSetupMixin(object):
TEST_APP_ID = 'my-app-id'
TEST_VERSION_ID = 'my-version.1234'
def setUp(self):
super(TestSetupMixin, self).setUp()
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import memcache
from google.appengine.api import queueinfo
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
from google.appengine.ext.testbed import TASKQUEUE_SERVICE_NAME
before_level = logging.getLogger().getEffectiveLevel()
os.environ['APPLICATION_ID'] = self.TEST_APP_ID
os.environ['CURRENT_VERSION_ID'] = self.TEST_VERSION_ID
os.environ['HTTP_HOST'] = '%s.appspot.com' % self.TEST_APP_ID
os.environ['DEFAULT_VERSION_HOSTNAME'] = os.environ['HTTP_HOST']
os.environ['CURRENT_MODULE_ID'] = 'foo-module'
try:
logging.getLogger().setLevel(100)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(app_id=self.TEST_APP_ID, overwrite=True)
self.testbed.init_memcache_stub()
hr_policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
self.testbed.init_datastore_v3_stub(consistency_policy=hr_policy)
self.testbed.init_taskqueue_stub()
root_path = os.path.realpath(os.path.dirname(__file__))
# memcache stub's cache is at the module level, not the API stub?
memcache.flush_all()
finally:
logging.getLogger().setLevel(before_level)
define_queues=['other']
taskqueue_stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')
taskqueue_stub.queue_yaml_parser = (
lambda x: queueinfo.LoadSingleQueue(
'queue:\n- name: default\n rate: 1/s\n' +
'\n'.join('- name: %s\n rate: 1/s' % name
for name in define_queues)))
def tearDown(self):
super(TestSetupMixin, self).tearDown()
self.testbed.deactivate()
| true
| true
|
f715fb59542e094790abdec20a4091318946f4e3
| 1,283
|
py
|
Python
|
ParserTest/ParserTest.py
|
isaacrez/ShowdownParser
|
965d5b35968978ad5101f3df3deede3219284154
|
[
"MIT"
] | null | null | null |
ParserTest/ParserTest.py
|
isaacrez/ShowdownParser
|
965d5b35968978ad5101f3df3deede3219284154
|
[
"MIT"
] | null | null | null |
ParserTest/ParserTest.py
|
isaacrez/ShowdownParser
|
965d5b35968978ad5101f3df3deede3219284154
|
[
"MIT"
] | null | null | null |
import unittest
from ParserTest.TestUtil import *
class TestParserMethods(unittest.TestCase):
DIRECT_KOs_ID = 2
PASSIVE_KOs_ID = 3
DEATHS_ID = 4
def test_direct_KO(self):
pokemon_data = {
"Raichu-Alola": ["p1", "Stokin' Dude!"],
"Magikarp": ["p2", "A Karp"]
}
simulator = ParserSimulator(pokemon_data)
simulator.load_all()
simulator.switch_in_all()
simulator.move("Stokin' Dude!", "Thunderbolt", "A Karp")
simulator.damage("A Karp")
def test_toxic_spikes(self):
pokemon_data = {
"Toxapex": ["p1", "The Worst"],
"Magikarp": ["p2", "Sushi Incarnate"],
"Pichu": ["p2", "Baby Pikachu"]
}
simulator = ParserSimulator(pokemon_data)
simulator.load_all()
simulator.switch_in_species("Toxapex")
simulator.switch_in_species("Magikarp")
simulator.move("The Worst", "Toxic Spikes", "Sushi Incarnate")
simulator.move("Sushi Incarnate", "Splash", "The Worst")
simulator.switch_in_species("Pichu")
simulator.damage("Baby Pikachu", "psn")
simulator.faint("Baby Pikachu")
def test_stealth_rocks(self):
pass
if __name__ == '__main__':
unittest.main()
| 27.891304
| 70
| 0.600156
|
import unittest
from ParserTest.TestUtil import *
class TestParserMethods(unittest.TestCase):
DIRECT_KOs_ID = 2
PASSIVE_KOs_ID = 3
DEATHS_ID = 4
def test_direct_KO(self):
pokemon_data = {
"Raichu-Alola": ["p1", "Stokin' Dude!"],
"Magikarp": ["p2", "A Karp"]
}
simulator = ParserSimulator(pokemon_data)
simulator.load_all()
simulator.switch_in_all()
simulator.move("Stokin' Dude!", "Thunderbolt", "A Karp")
simulator.damage("A Karp")
def test_toxic_spikes(self):
pokemon_data = {
"Toxapex": ["p1", "The Worst"],
"Magikarp": ["p2", "Sushi Incarnate"],
"Pichu": ["p2", "Baby Pikachu"]
}
simulator = ParserSimulator(pokemon_data)
simulator.load_all()
simulator.switch_in_species("Toxapex")
simulator.switch_in_species("Magikarp")
simulator.move("The Worst", "Toxic Spikes", "Sushi Incarnate")
simulator.move("Sushi Incarnate", "Splash", "The Worst")
simulator.switch_in_species("Pichu")
simulator.damage("Baby Pikachu", "psn")
simulator.faint("Baby Pikachu")
def test_stealth_rocks(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
f715fcdc9f378810a87d7cb126f42c12bd2af0f1
| 1,256
|
py
|
Python
|
gan_test.py
|
Aitical/ADspeech2face
|
2e811ff8cc7333729f4b77d1b1067296253e8e38
|
[
"MIT"
] | 1
|
2022-01-27T14:19:04.000Z
|
2022-01-27T14:19:04.000Z
|
gan_test.py
|
Aitical/ADspeech2face
|
2e811ff8cc7333729f4b77d1b1067296253e8e38
|
[
"MIT"
] | null | null | null |
gan_test.py
|
Aitical/ADspeech2face
|
2e811ff8cc7333729f4b77d1b1067296253e8e38
|
[
"MIT"
] | null | null | null |
import os
import glob
import torch
import torchvision.utils as vutils
import webrtcvad
from mfcc import MFCC
from utils import voice2face
from tqdm import tqdm
import sys
from parse_config import get_model
import importlib
# initialization
vad_obj = webrtcvad.Vad(2)
mfc_obj = MFCC(nfilt=64, lowerf=20., upperf=7200., samprate=16000, nfft=1024, wlen=0.025)
config_name = sys.argv[1]
command = sys.argv[2]
model_config = importlib.import_module(f'configs.{config_name}')
dataset_config = model_config.dataset_config
model_config.generator['pretrained'] = True
e_net = get_model(model_config.voice_encoder)
g_net = get_model(model_config.generator)
voice_path = os.path.join(dataset_config['test_path'], '*/*/*.wav')
voice_list = glob.glob(voice_path)
for filename in tqdm(voice_list):
face_image = voice2face(e_net, g_net, filename, vad_obj, mfc_obj, stylegan=True)
face = face_image[0]
wav_file_path, wav_file_name = os.path.split(filename)
face_name = wav_file_name.replace('.wav', f'_{command}.png')
face_path = wav_file_path.replace('voxceleb', 'voxceleb_face')
os.makedirs(face_path, exist_ok=True)
vutils.save_image(face.detach().clamp(-1, 1),
os.path.join(face_path, face_name), normalize=True)
| 33.052632
| 89
| 0.755573
|
import os
import glob
import torch
import torchvision.utils as vutils
import webrtcvad
from mfcc import MFCC
from utils import voice2face
from tqdm import tqdm
import sys
from parse_config import get_model
import importlib
vad_obj = webrtcvad.Vad(2)
mfc_obj = MFCC(nfilt=64, lowerf=20., upperf=7200., samprate=16000, nfft=1024, wlen=0.025)
config_name = sys.argv[1]
command = sys.argv[2]
model_config = importlib.import_module(f'configs.{config_name}')
dataset_config = model_config.dataset_config
model_config.generator['pretrained'] = True
e_net = get_model(model_config.voice_encoder)
g_net = get_model(model_config.generator)
voice_path = os.path.join(dataset_config['test_path'], '*/*/*.wav')
voice_list = glob.glob(voice_path)
for filename in tqdm(voice_list):
face_image = voice2face(e_net, g_net, filename, vad_obj, mfc_obj, stylegan=True)
face = face_image[0]
wav_file_path, wav_file_name = os.path.split(filename)
face_name = wav_file_name.replace('.wav', f'_{command}.png')
face_path = wav_file_path.replace('voxceleb', 'voxceleb_face')
os.makedirs(face_path, exist_ok=True)
vutils.save_image(face.detach().clamp(-1, 1),
os.path.join(face_path, face_name), normalize=True)
| true
| true
|
f715fd4edad72a3f3aef57ff6fe4ac57d3a4a5ee
| 4,645
|
py
|
Python
|
Jokey/main.py
|
MilesWJ/Jokey
|
9d9ead1e4e643d4947327635d97a1320b898138a
|
[
"Unlicense"
] | null | null | null |
Jokey/main.py
|
MilesWJ/Jokey
|
9d9ead1e4e643d4947327635d97a1320b898138a
|
[
"Unlicense"
] | null | null | null |
Jokey/main.py
|
MilesWJ/Jokey
|
9d9ead1e4e643d4947327635d97a1320b898138a
|
[
"Unlicense"
] | null | null | null |
from datetime import datetime
import discord
from discord.ext import commands, tasks
from discord_slash import SlashCommand, SlashContext
from discord_slash.utils.manage_commands import create_option, create_choice
from json import loads
from itertools import cycle
from random import choice
from urllib import request
TOKEN = YOUR TOKEN HERE
GUILD_ID = YOUR GUILD ID HERE
# Slash commands enabled, use those instead. ("application.commands" on discord.com/developers)
Jokey = commands.Bot(command_prefix="/")
slash = SlashCommand(Jokey, sync_commands=True)
URL = "https://v2.jokeapi.dev/joke/Any?type=twopart"
status = cycle(
["Minecraft",
"Garry's Mod",
"Grand Theft Auto V",
"Terraria",
"League of Legends"]
)
# ------------------------------------------------------------- #
# Bot Presence Loop
@tasks.loop(seconds=3600)
async def status_loop():
await Jokey.change_presence(activity=discord.Game(next(status)))
# ------------------------------------------------------------- #
# Bot Running Indicator
@Jokey.event
async def on_ready():
print(f"\n{Jokey.user} is running! (Started at {datetime.now()})")
status_loop.start()
# ------------------------------------------------------------- #
# Help Command
@slash.slash(
name="help",
description="Returns a list of available commands.",
guild_ids=[GUILD_ID],
)
async def _help(ctx: SlashContext):
messages = ["Here you go!", "Hope this helps!"]
with open("command_list.txt", "r") as command_list:
all_commands = command_list.read()
help_command_embed = discord.Embed(
title="ALL AVAILABLE COMMANDS",
color=discord.Color.blue(),
description=all_commands,
)
help_command_embed.set_author(name="Jokey", icon_url=Jokey.user.avatar_url)
await ctx.send(embed=help_command_embed)
# ------------------------------------------------------------- #
# Ping Command
@slash.slash(
name="ping",
description="Returns bot latency.",
guild_ids=[GUILD_ID],
)
async def _ping(ctx: SlashContext):
await ctx.send(f"Pong! ({round(Jokey.latency*1000)}ms)")
# ------------------------------------------------------------- #
# Invite Command
@slash.slash(
name="invite",
description="Returns the bot invite link.",
guild_ids=[GUILD_ID],
)
async def _invite(ctx: SlashContext):
invite_link = "https://discord.com/api/oauth2/authorize?client_id=873627985327030284&permissions=2147560512&scope=bot%20applications.commands"
# Required Scopes: bot, application.commands
# Required Permissions: Use Slash Commands, Send Messages, Read Message History, Manage Messages, View Channels, Add Reactions
# Permissions Integer: 2147560512
invite_command_embed = discord.Embed(
title="BOT INVITE LINK",
color=discord.Color.blue(),
description=invite_link
)
invite_command_embed.set_author(
name="Jokey", icon_url=Jokey.user.avatar_url)
await ctx.send(embed=invite_command_embed)
# ------------------------------------------------------------- #
# Clear Command
@slash.slash(
name="clear",
description="Clears a suggested amount of messages.",
guild_ids=[GUILD_ID],
options=[
create_option(
name="amount",
description="How many messages would you like to clear?",
required=True,
option_type=4,
)
]
)
@commands.has_permissions(manage_messages=True)
async def _clear(ctx: SlashContext, amount: int):
# Required Permissions: Manage Messages
if amount > 0:
if amount == 1:
await ctx.send(f"Clearing **{amount}** message...")
else:
await ctx.send(f"Clearing **{amount}** messages...")
await ctx.channel.purge(limit=amount + 1)
else:
await ctx.send(f"{ctx.author.mention} clear amount must be greater than 0.")
# ------------------------------------------------------------- #
# Joke Command (1/2)
def request_joke(url):
r = request.urlopen(url)
data = r.read()
json_data = loads(data)
information = [json_data["setup"], json_data["delivery"]]
joke = f"{information[0]} {information[1]}"
return joke
# Joke Command (2/2)
@slash.slash(
name="joke",
description="Returns a random joke.",
guild_ids=[GUILD_ID],
)
async def _joke(ctx: SlashContext):
joke = await ctx.send(request_joke(URL))
await joke.add_reaction("👍")
await joke.add_reaction("👎")
# ------------------------------------------------------------- #
if __name__ == "__main__":
print(f"\nStarting bot...")
Jokey.run(TOKEN)
| 25.382514
| 146
| 0.60366
|
from datetime import datetime
import discord
from discord.ext import commands, tasks
from discord_slash import SlashCommand, SlashContext
from discord_slash.utils.manage_commands import create_option, create_choice
from json import loads
from itertools import cycle
from random import choice
from urllib import request
TOKEN = YOUR TOKEN HERE
GUILD_ID = YOUR GUILD ID HERE
Jokey = commands.Bot(command_prefix="/")
slash = SlashCommand(Jokey, sync_commands=True)
URL = "https://v2.jokeapi.dev/joke/Any?type=twopart"
status = cycle(
["Minecraft",
"Garry's Mod",
"Grand Theft Auto V",
"Terraria",
"League of Legends"]
)
# ------------------------------------------------------------- #
# Bot Presence Loop
@tasks.loop(seconds=3600)
async def status_loop():
await Jokey.change_presence(activity=discord.Game(next(status)))
# ------------------------------------------------------------- #
# Bot Running Indicator
@Jokey.event
async def on_ready():
print(f"\n{Jokey.user} is running! (Started at {datetime.now()})")
status_loop.start()
# ------------------------------------------------------------- #
# Help Command
@slash.slash(
name="help",
description="Returns a list of available commands.",
guild_ids=[GUILD_ID],
)
async def _help(ctx: SlashContext):
messages = ["Here you go!", "Hope this helps!"]
with open("command_list.txt", "r") as command_list:
all_commands = command_list.read()
help_command_embed = discord.Embed(
title="ALL AVAILABLE COMMANDS",
color=discord.Color.blue(),
description=all_commands,
)
help_command_embed.set_author(name="Jokey", icon_url=Jokey.user.avatar_url)
await ctx.send(embed=help_command_embed)
# ------------------------------------------------------------- #
# Ping Command
@slash.slash(
name="ping",
description="Returns bot latency.",
guild_ids=[GUILD_ID],
)
async def _ping(ctx: SlashContext):
await ctx.send(f"Pong! ({round(Jokey.latency*1000)}ms)")
# ------------------------------------------------------------- #
# Invite Command
@slash.slash(
name="invite",
description="Returns the bot invite link.",
guild_ids=[GUILD_ID],
)
async def _invite(ctx: SlashContext):
invite_link = "https://discord.com/api/oauth2/authorize?client_id=873627985327030284&permissions=2147560512&scope=bot%20applications.commands"
# Required Scopes: bot, application.commands
# Required Permissions: Use Slash Commands, Send Messages, Read Message History, Manage Messages, View Channels, Add Reactions
# Permissions Integer: 2147560512
invite_command_embed = discord.Embed(
title="BOT INVITE LINK",
color=discord.Color.blue(),
description=invite_link
)
invite_command_embed.set_author(
name="Jokey", icon_url=Jokey.user.avatar_url)
await ctx.send(embed=invite_command_embed)
# ------------------------------------------------------------- #
# Clear Command
@slash.slash(
name="clear",
description="Clears a suggested amount of messages.",
guild_ids=[GUILD_ID],
options=[
create_option(
name="amount",
description="How many messages would you like to clear?",
required=True,
option_type=4,
)
]
)
@commands.has_permissions(manage_messages=True)
async def _clear(ctx: SlashContext, amount: int):
# Required Permissions: Manage Messages
if amount > 0:
if amount == 1:
await ctx.send(f"Clearing **{amount}** message...")
else:
await ctx.send(f"Clearing **{amount}** messages...")
await ctx.channel.purge(limit=amount + 1)
else:
await ctx.send(f"{ctx.author.mention} clear amount must be greater than 0.")
# ------------------------------------------------------------- #
# Joke Command (1/2)
def request_joke(url):
r = request.urlopen(url)
data = r.read()
json_data = loads(data)
information = [json_data["setup"], json_data["delivery"]]
joke = f"{information[0]} {information[1]}"
return joke
# Joke Command (2/2)
@slash.slash(
name="joke",
description="Returns a random joke.",
guild_ids=[GUILD_ID],
)
async def _joke(ctx: SlashContext):
joke = await ctx.send(request_joke(URL))
await joke.add_reaction("👍")
await joke.add_reaction("👎")
# ------------------------------------------------------------- #
if __name__ == "__main__":
print(f"\nStarting bot...")
Jokey.run(TOKEN)
| false
| true
|
f715fde92dd9503f60b1a71b39a46e6d2f9e42ad
| 8,020
|
py
|
Python
|
yatsm/cache.py
|
bullocke/yatsm_nrt
|
b0ded56032bf9f9dcdf6b7b749f6554ade56de1e
|
[
"MIT"
] | 2
|
2018-04-25T02:10:30.000Z
|
2021-07-30T03:57:49.000Z
|
yatsm/cache.py
|
bullocke/yatsm_nrt
|
b0ded56032bf9f9dcdf6b7b749f6554ade56de1e
|
[
"MIT"
] | null | null | null |
yatsm/cache.py
|
bullocke/yatsm_nrt
|
b0ded56032bf9f9dcdf6b7b749f6554ade56de1e
|
[
"MIT"
] | 1
|
2017-04-01T16:11:52.000Z
|
2017-04-01T16:11:52.000Z
|
""" Functions related to writing to and retrieving from cache files
"""
import os
import numpy as np
from log_yatsm import logger
_image_ID_str = 'image_IDs'
def get_line_cache_name(dataset_config, n_images, row, nbands):
""" Returns cache filename for specified config and line number
Args:
dataset_config (dict): configuration information about the dataset
n_images (int): number of images in dataset
row (int): line of the dataset for output
nbands (int): number of bands in dataset
Returns:
str: filename of cache file
"""
path = dataset_config.get('cache_line_dir')
if not path:
return
filename = 'yatsm_r%i_n%i_b%i.npy.npz' % (row, n_images, nbands)
return os.path.join(path, filename)
def get_line_cache_pattern(row, nbands, regex=False):
""" Returns a pattern for a cache file from a certain row
This function is useful for finding all cache files from a line, ignoring
the number of images in the file.
Args:
row (int): line of the dataset for output
nbands (int): number of bands in dataset
regex (bool, optional): return a regular expression instead of glob
style (default: False)
Returns:
str: filename pattern for cache files from line ``row``
"""
wildcard = '.*' if regex else '*'
pattern = 'yatsm_r{l}_n{w}_b{b}.npy.npz'.format(
l=row, w=wildcard, b=nbands)
return pattern
def test_cache(dataset_config):
""" Test cache directory for ability to read from or write to
Args:
dataset_config (dict): dictionary of dataset configuration options
Returns:
tuple: tuple of bools describing ability to read from and write to
cache directory
"""
# Try to find / use cache
read_cache = False
write_cache = False
cache_dir = dataset_config.get('cache_line_dir')
if cache_dir:
# Test existence
if os.path.isdir(cache_dir):
if os.access(cache_dir, os.R_OK):
read_cache = True
if os.access(cache_dir, os.W_OK):
write_cache = True
if read_cache and not write_cache:
logger.warning('Cache directory exists but is not writable')
else:
# If it doesn't already exist, can we create it?
try:
os.makedirs(cache_dir)
except:
logger.warning('Could not create cache directory')
else:
read_cache = True
write_cache = True
logger.debug('Attempt reading in from cache directory?: {b}'.format(
b=read_cache))
logger.debug('Attempt writing to cache directory?: {b}'.format(
b=write_cache))
return read_cache, write_cache
def read_cache_file(cache_filename, image_IDs=None):
""" Returns image data from a cache file
If ``image_IDs`` is not None this function will try to ensure data from
cache file come from the list of image IDs provided. If cache file does not
contain a list of image IDs, it will skip the check and return cache data.
Args:
cache_filename (str): cache filename
image_IDs (iterable, optional): list of image IDs corresponding to data
in cache file. If not specified, function will not check for
correspondence (default: None)
Returns:
np.ndarray, or None: Return Y as np.ndarray if possible and if the
cache file passes the consistency check specified by ``image_IDs``,
else None
"""
try:
cache = np.load(cache_filename)
except IOError:
return None
if _image_ID_str in cache.files and image_IDs is not None:
if not np.array_equal(image_IDs, cache[_image_ID_str]):
logger.warning('Cache file data in {f} do not match images '
'specified'.format(f=cache_filename))
return None
return cache['Y']
def write_cache_file(cache_filename, Y, image_IDs):
""" Writes data to a cache file using np.savez_compressed
Args:
cache_filename (str): cache filename
Y (np.ndarray): data to write to cache file
image_IDs (iterable): list of image IDs corresponding to data in cache
file. If not specified, function will not check for correspondence
"""
np.savez_compressed(cache_filename, **{
'Y': Y, _image_ID_str: image_IDs
})
# Cache file updating
def update_cache_file(images, image_IDs,
old_cache_filename, new_cache_filename,
line, reader):
""" Modify an existing cache file to contain data within `images`
This should be useful for updating a set of cache files to reflect
modifications to the timeseries dataset without completely reading the
data into another cache file.
For example, the cache file could be updated to reflect the deletion of
a misregistered or cloudy image. Another common example would be for
updating cache files to include newly acquired observations.
Note that this updater will not handle updating cache files to include
new bands.
Args:
images (iterable): list of new image filenames
image_IDs (iterable): list of new image identifying strings
old_cache_filename (str): filename of cache file to update
new_cache_filename (str): filename of new cache file which includes
modified data
line (int): the line of data to be updated
reader (callable): GDAL or BIP image reader function from
:mod:`yatsm.io.stack_line_readers`
Raises:
ValueError: Raise error if old cache file does not record ``image_IDs``
"""
images = np.asarray(images)
image_IDs = np.asarray(image_IDs)
# Cannot proceed if old cache file doesn't store filenames
old_cache = np.load(old_cache_filename)
if _image_ID_str not in old_cache.files:
raise ValueError('Cannot update cache.'
'Old cache file does not store image IDs.')
old_IDs = old_cache[_image_ID_str]
old_Y = old_cache['Y']
nband, _, ncol = old_Y.shape
# Create new Y and add in values retained from old cache
new_Y = np.zeros((nband, image_IDs.size, ncol),
dtype=old_Y.dtype.type)
new_IDs = np.zeros(image_IDs.size, dtype=image_IDs.dtype)
# Check deletions -- find which indices to retain in new cache
retain_old = np.where(np.in1d(old_IDs, image_IDs))[0]
if retain_old.size == 0:
logger.warning('No image IDs in common in old cache file.')
else:
logger.debug(' retaining {r} of {n} images'.format(
r=retain_old.size, n=old_IDs.size))
# Find indices of old data to insert into new data
idx_old_IDs = np.argsort(old_IDs)
sorted_old_IDs = old_IDs[idx_old_IDs]
idx_IDs = np.searchsorted(sorted_old_IDs,
image_IDs[np.in1d(image_IDs, old_IDs)])
retain_old = idx_old_IDs[idx_IDs]
# Indices to insert into new data
retain_new = np.where(np.in1d(image_IDs, old_IDs))[0]
new_Y[:, retain_new, :] = old_Y[:, retain_old, :]
new_IDs[retain_new] = old_IDs[retain_old]
# Check additions -- find which indices we need to insert
insert = np.where(np.in1d(image_IDs, old_IDs, invert=True))[0]
if retain_old.size == 0 and insert.size == 0:
raise ValueError('Cannot update cache file -- '
'no data retained or added')
# Read in the remaining data from disk
if insert.size > 0:
logger.debug('Inserting {n} new images into cache'.format(
n=insert.size))
insert_Y = reader.read_row(images[insert], line)
new_Y[:, insert, :] = insert_Y
new_IDs[insert] = image_IDs[insert]
np.testing.assert_equal(new_IDs, image_IDs)
# Save
write_cache_file(new_cache_filename, new_Y, image_IDs)
| 33.983051
| 79
| 0.646758
|
import os
import numpy as np
from log_yatsm import logger
_image_ID_str = 'image_IDs'
def get_line_cache_name(dataset_config, n_images, row, nbands):
path = dataset_config.get('cache_line_dir')
if not path:
return
filename = 'yatsm_r%i_n%i_b%i.npy.npz' % (row, n_images, nbands)
return os.path.join(path, filename)
def get_line_cache_pattern(row, nbands, regex=False):
wildcard = '.*' if regex else '*'
pattern = 'yatsm_r{l}_n{w}_b{b}.npy.npz'.format(
l=row, w=wildcard, b=nbands)
return pattern
def test_cache(dataset_config):
read_cache = False
write_cache = False
cache_dir = dataset_config.get('cache_line_dir')
if cache_dir:
if os.path.isdir(cache_dir):
if os.access(cache_dir, os.R_OK):
read_cache = True
if os.access(cache_dir, os.W_OK):
write_cache = True
if read_cache and not write_cache:
logger.warning('Cache directory exists but is not writable')
else:
try:
os.makedirs(cache_dir)
except:
logger.warning('Could not create cache directory')
else:
read_cache = True
write_cache = True
logger.debug('Attempt reading in from cache directory?: {b}'.format(
b=read_cache))
logger.debug('Attempt writing to cache directory?: {b}'.format(
b=write_cache))
return read_cache, write_cache
def read_cache_file(cache_filename, image_IDs=None):
try:
cache = np.load(cache_filename)
except IOError:
return None
if _image_ID_str in cache.files and image_IDs is not None:
if not np.array_equal(image_IDs, cache[_image_ID_str]):
logger.warning('Cache file data in {f} do not match images '
'specified'.format(f=cache_filename))
return None
return cache['Y']
def write_cache_file(cache_filename, Y, image_IDs):
np.savez_compressed(cache_filename, **{
'Y': Y, _image_ID_str: image_IDs
})
# Cache file updating
def update_cache_file(images, image_IDs,
old_cache_filename, new_cache_filename,
line, reader):
images = np.asarray(images)
image_IDs = np.asarray(image_IDs)
# Cannot proceed if old cache file doesn't store filenames
old_cache = np.load(old_cache_filename)
if _image_ID_str not in old_cache.files:
raise ValueError('Cannot update cache.'
'Old cache file does not store image IDs.')
old_IDs = old_cache[_image_ID_str]
old_Y = old_cache['Y']
nband, _, ncol = old_Y.shape
new_Y = np.zeros((nband, image_IDs.size, ncol),
dtype=old_Y.dtype.type)
new_IDs = np.zeros(image_IDs.size, dtype=image_IDs.dtype)
retain_old = np.where(np.in1d(old_IDs, image_IDs))[0]
if retain_old.size == 0:
logger.warning('No image IDs in common in old cache file.')
else:
logger.debug(' retaining {r} of {n} images'.format(
r=retain_old.size, n=old_IDs.size))
idx_old_IDs = np.argsort(old_IDs)
sorted_old_IDs = old_IDs[idx_old_IDs]
idx_IDs = np.searchsorted(sorted_old_IDs,
image_IDs[np.in1d(image_IDs, old_IDs)])
retain_old = idx_old_IDs[idx_IDs]
retain_new = np.where(np.in1d(image_IDs, old_IDs))[0]
new_Y[:, retain_new, :] = old_Y[:, retain_old, :]
new_IDs[retain_new] = old_IDs[retain_old]
insert = np.where(np.in1d(image_IDs, old_IDs, invert=True))[0]
if retain_old.size == 0 and insert.size == 0:
raise ValueError('Cannot update cache file -- '
'no data retained or added')
if insert.size > 0:
logger.debug('Inserting {n} new images into cache'.format(
n=insert.size))
insert_Y = reader.read_row(images[insert], line)
new_Y[:, insert, :] = insert_Y
new_IDs[insert] = image_IDs[insert]
np.testing.assert_equal(new_IDs, image_IDs)
write_cache_file(new_cache_filename, new_Y, image_IDs)
| true
| true
|
f715fe7e69213de66aedfbfecdb0bdd840ada5fd
| 985
|
py
|
Python
|
examples/plot_hue.py
|
mewbak/hypertools
|
bc2947737be8bd5a6e2a3bdca84132f6fee8989c
|
[
"MIT"
] | 1,681
|
2017-01-28T00:28:02.000Z
|
2022-03-11T00:57:13.000Z
|
examples/plot_hue.py
|
mewbak/hypertools
|
bc2947737be8bd5a6e2a3bdca84132f6fee8989c
|
[
"MIT"
] | 170
|
2017-01-27T22:59:09.000Z
|
2022-02-12T03:47:46.000Z
|
examples/plot_hue.py
|
mewbak/hypertools
|
bc2947737be8bd5a6e2a3bdca84132f6fee8989c
|
[
"MIT"
] | 180
|
2017-02-01T04:34:42.000Z
|
2022-02-22T15:46:23.000Z
|
# -*- coding: utf-8 -*-
"""
=============================
Grouping data by category
=============================
When plotting, its useful to have a way to color points by some category or
variable. Hypertools does this using the `hue` kwarg, which takes a list
of string category labels or numerical values. If text labels are passed, the
data is restructured according to those labels and plotted in different colors
according to your color palette. If numerical values are passed, the values
are binned (default resolution: 100) and plotted according to your color
palette.
"""
# Code source: Andrew Heusser
# License: MIT
# import
import hypertools as hyp
import numpy as np
# load example data
geo = hyp.load('weights_sample')
data = geo.get_data()
# simulate random groups
hue=[]
for idx,i in enumerate(data):
tmp=[]
for iidx,ii in enumerate(i):
tmp.append(int(np.random.randint(1000, size=1)))
hue.append(tmp)
# plot
geo.plot(fmt='.', hue=hue)
| 26.621622
| 78
| 0.683249
|
import hypertools as hyp
import numpy as np
geo = hyp.load('weights_sample')
data = geo.get_data()
hue=[]
for idx,i in enumerate(data):
tmp=[]
for iidx,ii in enumerate(i):
tmp.append(int(np.random.randint(1000, size=1)))
hue.append(tmp)
geo.plot(fmt='.', hue=hue)
| true
| true
|
f715ff5939535a01e6aa0c240e3f32c7ba477d37
| 1,866
|
py
|
Python
|
labyrinth_generator.py
|
ImTheTom/labyrinth-explorer
|
56fa7590aa93e11d0f2bc53f58de2194227a4034
|
[
"MIT"
] | null | null | null |
labyrinth_generator.py
|
ImTheTom/labyrinth-explorer
|
56fa7590aa93e11d0f2bc53f58de2194227a4034
|
[
"MIT"
] | null | null | null |
labyrinth_generator.py
|
ImTheTom/labyrinth-explorer
|
56fa7590aa93e11d0f2bc53f58de2194227a4034
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
#
# Python Script
#
# Copyleft © Manoel Vilela
#
#
WIDTH,HEIGHT = 2,3
from random import shuffle, randrange
def make_maze(w=WIDTH, h=HEIGHT):
vis = [[0] * w + [1] for _ in range(h)] + [[1] * (w + 1)]
nowalls = []
def walk(x, y):
vis[x][y] = 1
d = [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)]
shuffle(d)
for (x_n, y_n) in d:
if vis[x_n][y_n]:
continue
nowalls.append((x, y, x_n, y_n))
walk(x_n, y_n)
walk(randrange(h), randrange(w))
return(nowalls)
def draw_maze(nowalls, w=WIDTH, h=HEIGHT):
ver = [["| "] * w + ['|'] for _ in range(h)] + [[]]
hor = [["+--"] * w + ['+'] for _ in range(h + 1)]
for (x, y, x_n, y_n) in nowalls:
if x_n == x:
ver[x][max(y, y_n)] = " "
if y_n == y:
hor[max(x, x_n)][y] = "+ "
arrange = []
for (a, b) in zip(hor, ver):
l = ''.join(a + ['\n'] + b).split('\n')
arrange.extend(l)
return arrange
def random_replace(maze, block):
from random import randint
x, y = randint(1, len(maze) - 2), randint(0, len(maze[0]) - 1)
if maze[x][y] == ' ':
maze[x] = maze[x][:y] + block + maze[x][y + 1:]
else:
maze = random_replace(maze, block)
return maze
def translate(maze):
from re import sub
return [sub(r'[\-\+\|]', 'W', x) for x in maze]
def draw(maze):
for x, line in enumerate(maze):
print('{:>2}'.format(x), line)
def generate(width,height,blocks='EP'):
nw = make_maze(width,height)
maze = draw_maze(nw,width,height)
# nwabs = nowallsabs(nw)
for block in blocks:
maze = random_replace(maze, block)
draw(maze)
translated = translate(maze)
return translated
if __name__ == '__main__':
generate()
| 21.448276
| 66
| 0.505895
|
WIDTH,HEIGHT = 2,3
from random import shuffle, randrange
def make_maze(w=WIDTH, h=HEIGHT):
vis = [[0] * w + [1] for _ in range(h)] + [[1] * (w + 1)]
nowalls = []
def walk(x, y):
vis[x][y] = 1
d = [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)]
shuffle(d)
for (x_n, y_n) in d:
if vis[x_n][y_n]:
continue
nowalls.append((x, y, x_n, y_n))
walk(x_n, y_n)
walk(randrange(h), randrange(w))
return(nowalls)
def draw_maze(nowalls, w=WIDTH, h=HEIGHT):
ver = [["| "] * w + ['|'] for _ in range(h)] + [[]]
hor = [["+--"] * w + ['+'] for _ in range(h + 1)]
for (x, y, x_n, y_n) in nowalls:
if x_n == x:
ver[x][max(y, y_n)] = " "
if y_n == y:
hor[max(x, x_n)][y] = "+ "
arrange = []
for (a, b) in zip(hor, ver):
l = ''.join(a + ['\n'] + b).split('\n')
arrange.extend(l)
return arrange
def random_replace(maze, block):
from random import randint
x, y = randint(1, len(maze) - 2), randint(0, len(maze[0]) - 1)
if maze[x][y] == ' ':
maze[x] = maze[x][:y] + block + maze[x][y + 1:]
else:
maze = random_replace(maze, block)
return maze
def translate(maze):
from re import sub
return [sub(r'[\-\+\|]', 'W', x) for x in maze]
def draw(maze):
for x, line in enumerate(maze):
print('{:>2}'.format(x), line)
def generate(width,height,blocks='EP'):
nw = make_maze(width,height)
maze = draw_maze(nw,width,height)
for block in blocks:
maze = random_replace(maze, block)
draw(maze)
translated = translate(maze)
return translated
if __name__ == '__main__':
generate()
| true
| true
|
f715ffab1cd92657d37ddc8d113efdafe9821bad
| 8,233
|
py
|
Python
|
preprocess.py
|
gewoonrik/pullreqs-dnn
|
dbafd1866c1cd44424d238618e5ca54841c358c0
|
[
"MIT"
] | 1
|
2017-02-17T06:51:36.000Z
|
2017-02-17T06:51:36.000Z
|
preprocess.py
|
gewoonrik/pullreqs-dnn
|
dbafd1866c1cd44424d238618e5ca54841c358c0
|
[
"MIT"
] | null | null | null |
preprocess.py
|
gewoonrik/pullreqs-dnn
|
dbafd1866c1cd44424d238618e5ca54841c358c0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# (c) 2016 -- onwards Georgios Gousios <gousiosg@gmail.com>, Rik Nijessen <riknijessen@gmail.com>
#
from __future__ import print_function
import pickle
import random
import urllib
import numpy as np
import argparse
from config import *
from code_tokenizer import CodeTokenizer
from my_tokenizer import MyTokenizer
from keras.preprocessing.sequence import pad_sequences
@timeit
def load_pr_csv(file):
"""
Load a PR dataset, including all engineered features
:return: A pandas dataframe with all data loaded
"""
print("Loading pull requests file ", file)
pullreqs = pd.read_csv(file)
pullreqs.set_index(['project_name', 'github_id'])
return pullreqs
def ensure_diffs():
"""
Make sure that the PR diffs have been downloaded in the appropriate dir
"""
if not os.path.exists(DIFFS_DIR):
print("Downloading pull request diffs")
import tarfile
urllib.urlretrieve(DIFFS_DATA_URL, DIFFS_FILE)
tar = tarfile.open(DIFFS_FILE, "r:gz")
tar.extractall()
tar.close()
def read_title_and_comments(file):
str = open(file).read()
splitted = str.split("\n")
title = splitted[0]
# remove title and empty space
comment = str[2:]
return title, comment
@timeit
def create_code_tokenizer(code, vocabulary_size):
tokenizer = CodeTokenizer(nb_words=vocabulary_size)
tokenizer.fit_on_texts(code)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
return tokenizer
def create_text_tokenizer(texts, vocabulary_size):
tokenizer = MyTokenizer(nb_words=vocabulary_size)
tokenizer.fit_on_texts(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
return tokenizer
@timeit
def tokenize(tokenizer, texts, maxlen):
print("Tokenizing")
sequences = tokenizer.texts_to_sequences(texts)
return pad_sequences(sequences, maxlen=maxlen)
def load_data(pullreqs):
diffs = []
titles = []
comments = []
labels = []
successful = failed = 0
for i, row in pullreqs.iterrows():
try:
name = (row['project_name']).replace('/','@')+"@"+str(row['github_id'])+'.patch'
diff_file = os.path.join(DIFFS_DIR, name)
comment_file = os.path.join(TXTS_DIR, name.replace(".patch",".txt"))
diff = open(diff_file).read()
title, comment = read_title_and_comments(comment_file)
diffs.append(diff)
titles.append(title)
comments.append(comment)
labels.append(int(row['merged'] * 1))
successful += 1
except:
failed += 1
pass
print("%s diffs loaded, %s diffs failed" % (successful, failed), end='\r')
print("")
return diffs, comments, titles, labels
@timeit
def create_dataset(prefix="default",
diff_vocabulary_size=20000,
comment_vocabulary_size=20000,
title_vocabulary_size=20000,
max_diff_length=100,
max_comment_length=100,
max_title_length=100):
"""
Create a dataset for further processing
:param prefix: Name for the dataset
:param balance_ratio: The ratio between merged and unmerged PRs to include
:param num_diffs: Total number of diffs to load. Any value below 1 means load all diffs.
:param langs: Only include PRs for repos whose primary language is within this array
:param diff_vocabulary_size: (Max) size of the diff vocabulary to use for tokenizing
:param comment_vocabulary_size: (Max) size of the comment vocabulary to use for tokenizing
:param title_vocabulary_size: (Max) size of the title vocabulary to use for tokenizing
:param max_diff_length: Maximum length of the input diff sequences
:param max_comment_length: Maximum length of the input comment sequences
:param max_title_length: Maximum length of the input title sequences
:return: A training and testing dataset, along with the config used to produce it
"""
config = locals()
pullreqs_train = load_pr_csv(train_csv_file % prefix)
pullreqs_test = load_pr_csv(test_csv_file % prefix)
pullreqs_validation = load_pr_csv(validation_csv_file % prefix)
ensure_diffs()
tr_diffs, tr_comments, tr_titles, tr_labels = load_data(pullreqs_train)
val_diffs, val_comments, val_titles, val_labels = load_data(pullreqs_validation)
te_diffs, te_comments, te_titles, te_labels = load_data(pullreqs_test)
code_tokenizer = create_code_tokenizer(tr_diffs+val_diffs, diff_vocabulary_size)
diff_train = tokenize(code_tokenizer, tr_diffs, max_diff_length)
diff_val = tokenize(code_tokenizer, val_diffs, max_diff_length)
diff_test = tokenize(code_tokenizer, te_diffs, max_diff_length)
comment_tokenizer = create_text_tokenizer(tr_comments+val_comments, comment_vocabulary_size)
comment_train = tokenize(comment_tokenizer, tr_comments, max_comment_length)
comment_val = tokenize(code_tokenizer, val_comments, max_comment_length)
comment_test = tokenize(comment_tokenizer, te_comments, max_comment_length)
title_tokenizer = create_text_tokenizer(tr_titles+val_titles, title_vocabulary_size)
title_train = tokenize(title_tokenizer, tr_titles, max_title_length)
title_val = tokenize(code_tokenizer, val_titles, max_title_length)
title_test = tokenize(title_tokenizer, te_titles, max_title_length)
y_train = np.asarray(tr_labels)
y_val = np.asarray(val_labels)
y_test = np.asarray(te_labels)
print('Shape of diff tensor:', diff_train.shape)
print('Shape of comment tensor:', comment_train.shape)
print('Shape of title tensor:', title_train.shape)
print('Shape of label tensor:', y_train.shape)
# Save dataset
with open(diff_vocab_file % prefix, 'w') as f:
pickle.dump(code_tokenizer, f)
with open(comment_vocab_file % prefix, 'w') as f:
pickle.dump(comment_tokenizer, f)
with open(title_vocab_file % prefix, 'w') as f:
pickle.dump(title_tokenizer, f)
with open(diff_train_file % prefix, 'w') as f:
pickle.dump(diff_train, f)
with open(comment_train_file % prefix, 'w') as f:
pickle.dump(comment_train, f)
with open(title_train_file % prefix, 'w') as f:
pickle.dump(title_train, f)
with open(y_train_file % prefix, 'w') as f:
pickle.dump(y_train, f)
with open(diff_val_file % prefix, 'w') as f:
pickle.dump(diff_val, f)
with open(comment_val_file % prefix, 'w') as f:
pickle.dump(comment_val, f)
with open(title_val_file % prefix, 'w') as f:
pickle.dump(title_val, f)
with open(y_val_file % prefix, 'w') as f:
pickle.dump(y_val, f)
# save testdata
with open(diff_test_file % prefix, 'w') as f:
pickle.dump(diff_test, f)
with open(comment_test_file % prefix, 'w') as f:
pickle.dump(comment_test, f)
with open(title_test_file % prefix, 'w') as f:
pickle.dump(title_test, f)
with open(y_test_file % prefix, 'w') as f:
pickle.dump(y_test, f)
with open(config_file % prefix, 'w') as f:
pickle.dump(config, f)
return diff_train, comment_train, title_train, y_train, diff_val, comment_val, title_val, y_val, diff_test, comment_test, title_test, y_test, config
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', default='default')
parser.add_argument('--diff_vocabulary_size', type=int, default=50000)
parser.add_argument('--comment_vocabulary_size', type=int, default=50000)
parser.add_argument('--title_vocabulary_size', type=int, default=10000)
parser.add_argument('--max_diff_sequence_length', type=int, default=150)
parser.add_argument('--max_comment_sequence_length', type=int, default=150)
parser.add_argument('--max_title_sequence_length', type=int, default=150)
args = parser.parse_args()
if __name__ == '__main__':
create_dataset(args.prefix, args.diff_vocabulary_size, args.comment_vocabulary_size, args.title_vocabulary_size, args.max_diff_sequence_length, args.max_comment_sequence_length, args.max_title_sequence_length)
| 34.161826
| 213
| 0.703146
|
from __future__ import print_function
import pickle
import random
import urllib
import numpy as np
import argparse
from config import *
from code_tokenizer import CodeTokenizer
from my_tokenizer import MyTokenizer
from keras.preprocessing.sequence import pad_sequences
@timeit
def load_pr_csv(file):
print("Loading pull requests file ", file)
pullreqs = pd.read_csv(file)
pullreqs.set_index(['project_name', 'github_id'])
return pullreqs
def ensure_diffs():
if not os.path.exists(DIFFS_DIR):
print("Downloading pull request diffs")
import tarfile
urllib.urlretrieve(DIFFS_DATA_URL, DIFFS_FILE)
tar = tarfile.open(DIFFS_FILE, "r:gz")
tar.extractall()
tar.close()
def read_title_and_comments(file):
str = open(file).read()
splitted = str.split("\n")
title = splitted[0]
comment = str[2:]
return title, comment
@timeit
def create_code_tokenizer(code, vocabulary_size):
tokenizer = CodeTokenizer(nb_words=vocabulary_size)
tokenizer.fit_on_texts(code)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
return tokenizer
def create_text_tokenizer(texts, vocabulary_size):
tokenizer = MyTokenizer(nb_words=vocabulary_size)
tokenizer.fit_on_texts(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
return tokenizer
@timeit
def tokenize(tokenizer, texts, maxlen):
print("Tokenizing")
sequences = tokenizer.texts_to_sequences(texts)
return pad_sequences(sequences, maxlen=maxlen)
def load_data(pullreqs):
diffs = []
titles = []
comments = []
labels = []
successful = failed = 0
for i, row in pullreqs.iterrows():
try:
name = (row['project_name']).replace('/','@')+"@"+str(row['github_id'])+'.patch'
diff_file = os.path.join(DIFFS_DIR, name)
comment_file = os.path.join(TXTS_DIR, name.replace(".patch",".txt"))
diff = open(diff_file).read()
title, comment = read_title_and_comments(comment_file)
diffs.append(diff)
titles.append(title)
comments.append(comment)
labels.append(int(row['merged'] * 1))
successful += 1
except:
failed += 1
pass
print("%s diffs loaded, %s diffs failed" % (successful, failed), end='\r')
print("")
return diffs, comments, titles, labels
@timeit
def create_dataset(prefix="default",
diff_vocabulary_size=20000,
comment_vocabulary_size=20000,
title_vocabulary_size=20000,
max_diff_length=100,
max_comment_length=100,
max_title_length=100):
config = locals()
pullreqs_train = load_pr_csv(train_csv_file % prefix)
pullreqs_test = load_pr_csv(test_csv_file % prefix)
pullreqs_validation = load_pr_csv(validation_csv_file % prefix)
ensure_diffs()
tr_diffs, tr_comments, tr_titles, tr_labels = load_data(pullreqs_train)
val_diffs, val_comments, val_titles, val_labels = load_data(pullreqs_validation)
te_diffs, te_comments, te_titles, te_labels = load_data(pullreqs_test)
code_tokenizer = create_code_tokenizer(tr_diffs+val_diffs, diff_vocabulary_size)
diff_train = tokenize(code_tokenizer, tr_diffs, max_diff_length)
diff_val = tokenize(code_tokenizer, val_diffs, max_diff_length)
diff_test = tokenize(code_tokenizer, te_diffs, max_diff_length)
comment_tokenizer = create_text_tokenizer(tr_comments+val_comments, comment_vocabulary_size)
comment_train = tokenize(comment_tokenizer, tr_comments, max_comment_length)
comment_val = tokenize(code_tokenizer, val_comments, max_comment_length)
comment_test = tokenize(comment_tokenizer, te_comments, max_comment_length)
title_tokenizer = create_text_tokenizer(tr_titles+val_titles, title_vocabulary_size)
title_train = tokenize(title_tokenizer, tr_titles, max_title_length)
title_val = tokenize(code_tokenizer, val_titles, max_title_length)
title_test = tokenize(title_tokenizer, te_titles, max_title_length)
y_train = np.asarray(tr_labels)
y_val = np.asarray(val_labels)
y_test = np.asarray(te_labels)
print('Shape of diff tensor:', diff_train.shape)
print('Shape of comment tensor:', comment_train.shape)
print('Shape of title tensor:', title_train.shape)
print('Shape of label tensor:', y_train.shape)
with open(diff_vocab_file % prefix, 'w') as f:
pickle.dump(code_tokenizer, f)
with open(comment_vocab_file % prefix, 'w') as f:
pickle.dump(comment_tokenizer, f)
with open(title_vocab_file % prefix, 'w') as f:
pickle.dump(title_tokenizer, f)
with open(diff_train_file % prefix, 'w') as f:
pickle.dump(diff_train, f)
with open(comment_train_file % prefix, 'w') as f:
pickle.dump(comment_train, f)
with open(title_train_file % prefix, 'w') as f:
pickle.dump(title_train, f)
with open(y_train_file % prefix, 'w') as f:
pickle.dump(y_train, f)
with open(diff_val_file % prefix, 'w') as f:
pickle.dump(diff_val, f)
with open(comment_val_file % prefix, 'w') as f:
pickle.dump(comment_val, f)
with open(title_val_file % prefix, 'w') as f:
pickle.dump(title_val, f)
with open(y_val_file % prefix, 'w') as f:
pickle.dump(y_val, f)
with open(diff_test_file % prefix, 'w') as f:
pickle.dump(diff_test, f)
with open(comment_test_file % prefix, 'w') as f:
pickle.dump(comment_test, f)
with open(title_test_file % prefix, 'w') as f:
pickle.dump(title_test, f)
with open(y_test_file % prefix, 'w') as f:
pickle.dump(y_test, f)
with open(config_file % prefix, 'w') as f:
pickle.dump(config, f)
return diff_train, comment_train, title_train, y_train, diff_val, comment_val, title_val, y_val, diff_test, comment_test, title_test, y_test, config
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', default='default')
parser.add_argument('--diff_vocabulary_size', type=int, default=50000)
parser.add_argument('--comment_vocabulary_size', type=int, default=50000)
parser.add_argument('--title_vocabulary_size', type=int, default=10000)
parser.add_argument('--max_diff_sequence_length', type=int, default=150)
parser.add_argument('--max_comment_sequence_length', type=int, default=150)
parser.add_argument('--max_title_sequence_length', type=int, default=150)
args = parser.parse_args()
if __name__ == '__main__':
create_dataset(args.prefix, args.diff_vocabulary_size, args.comment_vocabulary_size, args.title_vocabulary_size, args.max_diff_sequence_length, args.max_comment_sequence_length, args.max_title_sequence_length)
| true
| true
|
f71601eb739410c4a90886b6aae0725f85a7eaed
| 5,879
|
py
|
Python
|
test/functional/p2p_fingerprint.py
|
PitTxid/bitgreen
|
5168cb2db2a3f9d4f32b14c4224e1f41f0e69566
|
[
"MIT"
] | 14
|
2019-08-02T21:00:14.000Z
|
2020-06-22T17:23:05.000Z
|
test/functional/p2p_fingerprint.py
|
PitTxid/bitgreen
|
5168cb2db2a3f9d4f32b14c4224e1f41f0e69566
|
[
"MIT"
] | 7
|
2019-08-05T23:43:17.000Z
|
2020-07-17T17:26:54.000Z
|
test/functional/p2p_fingerprint.py
|
PitTxid/bitgreen
|
5168cb2db2a3f9d4f32b14c4224e1f41f0e69566
|
[
"MIT"
] | 25
|
2019-05-21T01:59:54.000Z
|
2020-10-18T14:09:38.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If a stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv
from test_framework.mininode import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
)
from test_framework.test_framework import BitGreenTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
class P2PFingerprintTest(BitGreenTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(2, block_hash)) # 2 == "Block"
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Check whether last block received from node has a given hash
def last_block_equals(self, expected_hash, node):
block_msg = node.last_message.get("block")
return block_msg and block_msg.block.rehash() == expected_hash
# Check whether last block header received from node has a given hash
def last_header_equals(self, expected_hash, node):
headers_msg = node.last_message.get("headers")
return (headers_msg and
headers_msg.headers and
headers_msg.headers[0].rehash() == expected_hash)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata()
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
test_function = lambda: self.last_block_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
test_function = lambda: self.last_header_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
tip = self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[0]
assert_equal(self.nodes[0].getblockcount(), 14)
# Send getdata & getheaders to refresh last received getheader message
block_hash = int(tip, 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
# Request for very old stale block should now fail
self.send_block_request(stale_hash, node0)
time.sleep(3)
assert not self.last_block_equals(stale_hash, node0)
# Request for very old stale block header should now fail
self.send_header_request(stale_hash, node0)
time.sleep(3)
assert not self.last_header_equals(stale_hash, node0)
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
test_function = lambda: self.last_block_equals(block_hash, node0)
wait_until(test_function, timeout=3)
self.send_header_request(block_hash, node0)
test_function = lambda: self.last_header_equals(block_hash, node0)
wait_until(test_function, timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
| 39.456376
| 110
| 0.691274
|
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv
from test_framework.mininode import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
)
from test_framework.test_framework import BitGreenTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
class P2PFingerprintTest(BitGreenTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(2, block_hash))
node.send_message(msg)
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
def last_block_equals(self, expected_hash, node):
block_msg = node.last_message.get("block")
return block_msg and block_msg.block.rehash() == expected_hash
def last_header_equals(self, expected_hash, node):
headers_msg = node.last_message.get("headers")
return (headers_msg and
headers_msg.headers and
headers_msg.headers[0].rehash() == expected_hash)
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata()
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
test_function = lambda: self.last_block_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
test_function = lambda: self.last_header_equals(stale_hash, node0)
wait_until(test_function, timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
tip = self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[0]
assert_equal(self.nodes[0].getblockcount(), 14)
# Send getdata & getheaders to refresh last received getheader message
block_hash = int(tip, 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
# Request for very old stale block should now fail
self.send_block_request(stale_hash, node0)
time.sleep(3)
assert not self.last_block_equals(stale_hash, node0)
# Request for very old stale block header should now fail
self.send_header_request(stale_hash, node0)
time.sleep(3)
assert not self.last_header_equals(stale_hash, node0)
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
test_function = lambda: self.last_block_equals(block_hash, node0)
wait_until(test_function, timeout=3)
self.send_header_request(block_hash, node0)
test_function = lambda: self.last_header_equals(block_hash, node0)
wait_until(test_function, timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
| true
| true
|
f71603f6109caf0554c9841dcf750730f5a4c731
| 760
|
py
|
Python
|
backend/gardenator_backend/urls.py
|
maany/gardenator
|
0dd02a323a71d996aeb970c730a48306c280d29e
|
[
"Apache-2.0"
] | null | null | null |
backend/gardenator_backend/urls.py
|
maany/gardenator
|
0dd02a323a71d996aeb970c730a48306c280d29e
|
[
"Apache-2.0"
] | null | null | null |
backend/gardenator_backend/urls.py
|
maany/gardenator
|
0dd02a323a71d996aeb970c730a48306c280d29e
|
[
"Apache-2.0"
] | null | null | null |
"""gardenator_backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.545455
| 77
| 0.713158
|
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| true
| true
|
f716050ed51f07345c48ef2000b6e1a8b2a7e2de
| 17,065
|
py
|
Python
|
Ui_polkitex.py
|
Trapizomba/Polkit-Explorer
|
59c9662f07a65b0aa7197418d0036501fd533793
|
[
"0BSD"
] | null | null | null |
Ui_polkitex.py
|
Trapizomba/Polkit-Explorer
|
59c9662f07a65b0aa7197418d0036501fd533793
|
[
"0BSD"
] | null | null | null |
Ui_polkitex.py
|
Trapizomba/Polkit-Explorer
|
59c9662f07a65b0aa7197418d0036501fd533793
|
[
"0BSD"
] | null | null | null |
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'polkitex.ui'
##
## Created by: Qt User Interface Compiler version 6.2.3
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale,
QMetaObject, QObject, QPoint, QRect,
QSize, QTime, QUrl, Qt)
from PySide6.QtGui import (QAction, QBrush, QColor, QConicalGradient,
QCursor, QFont, QFontDatabase, QGradient,
QIcon, QImage, QKeySequence, QLinearGradient,
QPainter, QPalette, QPixmap, QRadialGradient,
QTransform)
from PySide6.QtWidgets import (QApplication, QComboBox, QFrame, QLCDNumber,
QLabel, QMainWindow, QMenu, QMenuBar,
QPlainTextEdit, QSizePolicy, QTabWidget, QToolButton,
QWidget)
class Ui_PolkitExplorer(object):
def setupUi(self, PolkitExplorer):
if not PolkitExplorer.objectName():
PolkitExplorer.setObjectName(u"PolkitExplorer")
PolkitExplorer.resize(910, 530)
PolkitExplorer.setMinimumSize(QSize(910, 530))
PolkitExplorer.setMaximumSize(QSize(910, 530))
PolkitExplorer.setTabShape(QTabWidget.Rounded)
self.actionOpen = QAction(PolkitExplorer)
self.actionOpen.setObjectName(u"actionOpen")
font = QFont()
font.setPointSize(12)
font.setBold(True)
self.actionOpen.setFont(font)
self.actionAbout = QAction(PolkitExplorer)
self.actionAbout.setObjectName(u"actionAbout")
self.actionAbout.setFont(font)
self.actionQuit = QAction(PolkitExplorer)
self.actionQuit.setObjectName(u"actionQuit")
self.actionShow_Glossary = QAction(PolkitExplorer)
self.actionShow_Glossary.setObjectName(u"actionShow_Glossary")
self.centralwidget = QWidget(PolkitExplorer)
self.centralwidget.setObjectName(u"centralwidget")
self.polkitActionDescription = QLabel(self.centralwidget)
self.polkitActionDescription.setObjectName(u"polkitActionDescription")
self.polkitActionDescription.setGeometry(QRect(100, 150, 791, 31))
font1 = QFont()
font1.setPointSize(11)
font1.setBold(True)
self.polkitActionDescription.setFont(font1)
self.polkitActionDescription.setAutoFillBackground(True)
self.polkitActionDescription.setFrameShape(QFrame.Box)
self.polkitActionDescription.setFrameShadow(QFrame.Raised)
self.polkitActionDescription.setTextFormat(Qt.PlainText)
self.polkitActionDescription.setScaledContents(False)
self.polkitActionDescription.setTextInteractionFlags(Qt.TextSelectableByKeyboard|Qt.TextSelectableByMouse)
self.policyFileGrp = QLabel(self.centralwidget)
self.policyFileGrp.setObjectName(u"policyFileGrp")
self.policyFileGrp.setGeometry(QRect(10, 10, 891, 51))
font2 = QFont()
font2.setPointSize(10)
font2.setBold(True)
self.policyFileGrp.setFont(font2)
self.policyFileGrp.setFrameShape(QFrame.StyledPanel)
self.policyFileGrp.setFrameShadow(QFrame.Raised)
self.policyFileGrp.setTextFormat(Qt.PlainText)
self.policyFileGrp.setScaledContents(False)
self.policyFileGrp.setWordWrap(True)
self.policyFileGrp.setMargin(0)
self.actionDescriptionGrp = QLabel(self.centralwidget)
self.actionDescriptionGrp.setObjectName(u"actionDescriptionGrp")
self.actionDescriptionGrp.setEnabled(True)
self.actionDescriptionGrp.setGeometry(QRect(10, 70, 891, 131))
self.actionDescriptionGrp.setFont(font2)
self.actionDescriptionGrp.setFrameShape(QFrame.StyledPanel)
self.actionDescriptionGrp.setFrameShadow(QFrame.Raised)
self.actionDescriptionGrp.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignTop)
self.actionDescriptionGrp.setMargin(0)
self.actionComboBox = QComboBox(self.centralwidget)
self.actionComboBox.setObjectName(u"actionComboBox")
self.actionComboBox.setGeometry(QRect(100, 110, 791, 29))
self.actionComboBox.setFont(font2)
self.policiesPrivsGrp = QLabel(self.centralwidget)
self.policiesPrivsGrp.setObjectName(u"policiesPrivsGrp")
self.policiesPrivsGrp.setEnabled(True)
self.policiesPrivsGrp.setGeometry(QRect(10, 210, 471, 231))
self.policiesPrivsGrp.setFont(font2)
self.policiesPrivsGrp.setFrameShape(QFrame.StyledPanel)
self.policiesPrivsGrp.setFrameShadow(QFrame.Raised)
self.policiesPrivsGrp.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignTop)
self.actionsCounterDisplay = QLCDNumber(self.centralwidget)
self.actionsCounterDisplay.setObjectName(u"actionsCounterDisplay")
self.actionsCounterDisplay.setGeometry(QRect(20, 110, 71, 71))
font3 = QFont()
font3.setPointSize(12)
font3.setBold(True)
font3.setKerning(True)
self.actionsCounterDisplay.setFont(font3)
self.actionsCounterDisplay.setLayoutDirection(Qt.LeftToRight)
self.actionsCounterDisplay.setFrameShape(QFrame.Box)
self.actionsCounterDisplay.setFrameShadow(QFrame.Raised)
self.actionsCounterDisplay.setDigitCount(3)
self.actionsCounterDisplay.setSegmentStyle(QLCDNumber.Flat)
self.loadFileToolBtn = QToolButton(self.centralwidget)
self.loadFileToolBtn.setObjectName(u"loadFileToolBtn")
self.loadFileToolBtn.setGeometry(QRect(860, 20, 31, 31))
self.loadFileToolBtn.setFont(font)
self.loadFileToolBtn.setFocusPolicy(Qt.StrongFocus)
self.policyKitFullPath = QLabel(self.centralwidget)
self.policyKitFullPath.setObjectName(u"policyKitFullPath")
self.policyKitFullPath.setGeometry(QRect(10, 450, 891, 41))
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.policyKitFullPath.sizePolicy().hasHeightForWidth())
self.policyKitFullPath.setSizePolicy(sizePolicy)
font4 = QFont()
font4.setPointSize(10)
font4.setItalic(True)
self.policyKitFullPath.setFont(font4)
self.policyKitFullPath.setFrameShape(QFrame.Box)
self.policyKitFullPath.setFrameShadow(QFrame.Raised)
self.policyKitFullPath.setMidLineWidth(1)
self.policyKitFullPath.setTextFormat(Qt.PlainText)
self.policyKitFullPath.setMargin(1)
self.currentAllowActiveLabel = QLabel(self.centralwidget)
self.currentAllowActiveLabel.setObjectName(u"currentAllowActiveLabel")
self.currentAllowActiveLabel.setGeometry(QRect(210, 390, 250, 31))
self.currentAllowActiveLabel.setFont(font2)
self.currentAllowActiveLabel.setFrameShape(QFrame.Box)
self.currentAllowActiveLabel.setFrameShadow(QFrame.Raised)
self.currentAllowActiveLabel.setAlignment(Qt.AlignCenter)
self.allowInactiveGrp = QLabel(self.centralwidget)
self.allowInactiveGrp.setObjectName(u"allowInactiveGrp")
self.allowInactiveGrp.setGeometry(QRect(20, 310, 451, 51))
self.allowInactiveGrp.setFont(font2)
self.allowInactiveGrp.setFrameShape(QFrame.StyledPanel)
self.allowInactiveGrp.setFrameShadow(QFrame.Raised)
self.allowInactiveGrp.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.allowInactiveGrp.setMargin(10)
self.currentAllowInactiveLabel = QLabel(self.centralwidget)
self.currentAllowInactiveLabel.setObjectName(u"currentAllowInactiveLabel")
self.currentAllowInactiveLabel.setGeometry(QRect(210, 320, 250, 31))
self.currentAllowInactiveLabel.setFont(font2)
self.currentAllowInactiveLabel.setFrameShape(QFrame.Box)
self.currentAllowInactiveLabel.setFrameShadow(QFrame.Raised)
self.currentAllowInactiveLabel.setAlignment(Qt.AlignCenter)
self.currentAllowAnyLabel = QLabel(self.centralwidget)
self.currentAllowAnyLabel.setObjectName(u"currentAllowAnyLabel")
self.currentAllowAnyLabel.setGeometry(QRect(210, 250, 250, 31))
self.currentAllowAnyLabel.setFont(font2)
self.currentAllowAnyLabel.setFrameShape(QFrame.Box)
self.currentAllowAnyLabel.setFrameShadow(QFrame.Raised)
self.currentAllowAnyLabel.setAlignment(Qt.AlignCenter)
self.allowAnyGrp = QLabel(self.centralwidget)
self.allowAnyGrp.setObjectName(u"allowAnyGrp")
self.allowAnyGrp.setGeometry(QRect(20, 240, 451, 51))
self.allowAnyGrp.setFont(font2)
self.allowAnyGrp.setFrameShape(QFrame.StyledPanel)
self.allowAnyGrp.setFrameShadow(QFrame.Raised)
self.allowAnyGrp.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.allowAnyGrp.setMargin(10)
self.allowActiveGrp = QLabel(self.centralwidget)
self.allowActiveGrp.setObjectName(u"allowActiveGrp")
self.allowActiveGrp.setGeometry(QRect(20, 380, 451, 51))
self.allowActiveGrp.setFont(font2)
self.allowActiveGrp.setFrameShape(QFrame.StyledPanel)
self.allowActiveGrp.setFrameShadow(QFrame.Raised)
self.allowActiveGrp.setMargin(10)
self.policyKitFileName = QLabel(self.centralwidget)
self.policyKitFileName.setObjectName(u"policyKitFileName")
self.policyKitFileName.setGeometry(QRect(160, 20, 691, 31))
self.policyKitFileName.setMinimumSize(QSize(100, 20))
self.policyKitFileName.setFont(font2)
self.policyKitFileName.setAcceptDrops(False)
self.policyKitFileName.setAutoFillBackground(False)
self.policyKitFileName.setFrameShape(QFrame.Box)
self.policyKitFileName.setFrameShadow(QFrame.Raised)
self.policyKitFileName.setLineWidth(1)
self.policyKitFileName.setTextFormat(Qt.PlainText)
self.policyKitFileName.setScaledContents(False)
self.policyKitFileName.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.policyKitFileName.setMargin(0)
self.policyKitFileName.setIndent(10)
self.policyKitFileName.setTextInteractionFlags(Qt.LinksAccessibleByMouse|Qt.TextSelectableByMouse)
self.pteOutput = QPlainTextEdit(self.centralwidget)
self.pteOutput.setObjectName(u"pteOutput")
self.pteOutput.setGeometry(QRect(490, 210, 411, 231))
self.pteOutput.setFrameShadow(QFrame.Raised)
self.pteOutput.setUndoRedoEnabled(False)
self.pteOutput.setTextInteractionFlags(Qt.NoTextInteraction)
PolkitExplorer.setCentralWidget(self.centralwidget)
self.policiesPrivsGrp.raise_()
self.actionDescriptionGrp.raise_()
self.policyFileGrp.raise_()
self.actionsCounterDisplay.raise_()
self.actionComboBox.raise_()
self.polkitActionDescription.raise_()
self.loadFileToolBtn.raise_()
self.policyKitFullPath.raise_()
self.allowInactiveGrp.raise_()
self.currentAllowInactiveLabel.raise_()
self.allowAnyGrp.raise_()
self.allowActiveGrp.raise_()
self.currentAllowAnyLabel.raise_()
self.currentAllowActiveLabel.raise_()
self.pteOutput.raise_()
self.policyKitFileName.raise_()
self.menubar = QMenuBar(PolkitExplorer)
self.menubar.setObjectName(u"menubar")
self.menubar.setGeometry(QRect(0, 0, 910, 24))
self.menubar.setFont(font)
self.menuFile = QMenu(self.menubar)
self.menuFile.setObjectName(u"menuFile")
self.menuFile.setFont(font)
self.menuHelp = QMenu(self.menubar)
self.menuHelp.setObjectName(u"menuHelp")
self.menuHelp.setFont(font)
PolkitExplorer.setMenuBar(self.menubar)
QWidget.setTabOrder(self.loadFileToolBtn, self.actionComboBox)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.menuFile.addAction(self.actionOpen)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionAbout)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionShow_Glossary)
self.retranslateUi(PolkitExplorer)
self.actionComboBox.currentIndexChanged.connect(PolkitExplorer.actionComboBoxChanged)
self.actionOpen.triggered.connect(PolkitExplorer.fileOpen)
self.actionQuit.triggered.connect(PolkitExplorer.fileQuit)
self.actionAbout.triggered.connect(PolkitExplorer.fileAbout)
self.actionShow_Glossary.triggered.connect(PolkitExplorer.helpGlossary)
self.loadFileToolBtn.clicked.connect(PolkitExplorer.fileOpen)
QMetaObject.connectSlotsByName(PolkitExplorer)
# setupUi
def retranslateUi(self, PolkitExplorer):
self.actionOpen.setText(QCoreApplication.translate("PolkitExplorer", u"&Open", None))
self.actionAbout.setText(QCoreApplication.translate("PolkitExplorer", u"&About", None))
self.actionQuit.setText(QCoreApplication.translate("PolkitExplorer", u"&Quit", None))
self.actionShow_Glossary.setText(QCoreApplication.translate("PolkitExplorer", u"&Glossary", None))
#if QT_CONFIG(tooltip)
self.polkitActionDescription.setToolTip(QCoreApplication.translate("PolkitExplorer", u"The Description of the Action as entered in the Policy file loaded. If no description is found this will tell you that fact.", None))
#endif // QT_CONFIG(tooltip)
self.polkitActionDescription.setText("")
self.policyFileGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Policy File:", None))
self.actionDescriptionGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Action(s) & Description:", None))
#if QT_CONFIG(tooltip)
self.actionComboBox.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p><span style=\" font-weight:600;\">Drop-down list of all the actions within the policy file. Clicking on this will display the drop-down list, or you can use your scrollwheel to browse through them, too.</span></p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.policiesPrivsGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Policies:", None))
#if QT_CONFIG(tooltip)
self.actionsCounterDisplay.setToolTip(QCoreApplication.translate("PolkitExplorer", u"Displays the number of Actions within a Polkit policy file.", None))
#endif // QT_CONFIG(tooltip)
self.loadFileToolBtn.setText(QCoreApplication.translate("PolkitExplorer", u"...", None))
#if QT_CONFIG(tooltip)
self.policyKitFullPath.setToolTip(QCoreApplication.translate("PolkitExplorer", u"The full pathname of the currently opened Polkit policy file.", None))
#endif // QT_CONFIG(tooltip)
self.policyKitFullPath.setText("")
self.currentAllowActiveLabel.setText("")
#if QT_CONFIG(tooltip)
self.allowInactiveGrp.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p>"Inactive" users are ones who are not directly logged into the system's console. This includes anyone who is logged in remotely, whether it be via ssh, telnet, or even RDP.</p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.allowInactiveGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Allow Inactive", None))
self.currentAllowInactiveLabel.setText("")
self.currentAllowAnyLabel.setText("")
#if QT_CONFIG(tooltip)
self.allowAnyGrp.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p>If set to "yes" will give any user permission to perform the action as described in the Description above. </p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.allowAnyGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Allow Any", None))
#if QT_CONFIG(tooltip)
self.allowActiveGrp.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p>"Active" users are ones who are directly logged into a system's console, via a locally connected terminal. Users directly logged into a GUI at the system console, for example.</p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.allowActiveGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Allow Active", None))
#if QT_CONFIG(tooltip)
self.policyKitFileName.setToolTip(QCoreApplication.translate("PolkitExplorer", u"The full name of the currently opened Polkit policy file.", None))
#endif // QT_CONFIG(tooltip)
self.policyKitFileName.setText(QCoreApplication.translate("PolkitExplorer", u"Please open a policy file ->", None))
self.menuFile.setTitle(QCoreApplication.translate("PolkitExplorer", u"&File", None))
self.menuHelp.setTitle(QCoreApplication.translate("PolkitExplorer", u"&Help", None))
pass
# retranslateUi
| 58.242321
| 344
| 0.73097
|
iveLabel.setFont(font2)
self.currentAllowActiveLabel.setFrameShape(QFrame.Box)
self.currentAllowActiveLabel.setFrameShadow(QFrame.Raised)
self.currentAllowActiveLabel.setAlignment(Qt.AlignCenter)
self.allowInactiveGrp = QLabel(self.centralwidget)
self.allowInactiveGrp.setObjectName(u"allowInactiveGrp")
self.allowInactiveGrp.setGeometry(QRect(20, 310, 451, 51))
self.allowInactiveGrp.setFont(font2)
self.allowInactiveGrp.setFrameShape(QFrame.StyledPanel)
self.allowInactiveGrp.setFrameShadow(QFrame.Raised)
self.allowInactiveGrp.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.allowInactiveGrp.setMargin(10)
self.currentAllowInactiveLabel = QLabel(self.centralwidget)
self.currentAllowInactiveLabel.setObjectName(u"currentAllowInactiveLabel")
self.currentAllowInactiveLabel.setGeometry(QRect(210, 320, 250, 31))
self.currentAllowInactiveLabel.setFont(font2)
self.currentAllowInactiveLabel.setFrameShape(QFrame.Box)
self.currentAllowInactiveLabel.setFrameShadow(QFrame.Raised)
self.currentAllowInactiveLabel.setAlignment(Qt.AlignCenter)
self.currentAllowAnyLabel = QLabel(self.centralwidget)
self.currentAllowAnyLabel.setObjectName(u"currentAllowAnyLabel")
self.currentAllowAnyLabel.setGeometry(QRect(210, 250, 250, 31))
self.currentAllowAnyLabel.setFont(font2)
self.currentAllowAnyLabel.setFrameShape(QFrame.Box)
self.currentAllowAnyLabel.setFrameShadow(QFrame.Raised)
self.currentAllowAnyLabel.setAlignment(Qt.AlignCenter)
self.allowAnyGrp = QLabel(self.centralwidget)
self.allowAnyGrp.setObjectName(u"allowAnyGrp")
self.allowAnyGrp.setGeometry(QRect(20, 240, 451, 51))
self.allowAnyGrp.setFont(font2)
self.allowAnyGrp.setFrameShape(QFrame.StyledPanel)
self.allowAnyGrp.setFrameShadow(QFrame.Raised)
self.allowAnyGrp.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.allowAnyGrp.setMargin(10)
self.allowActiveGrp = QLabel(self.centralwidget)
self.allowActiveGrp.setObjectName(u"allowActiveGrp")
self.allowActiveGrp.setGeometry(QRect(20, 380, 451, 51))
self.allowActiveGrp.setFont(font2)
self.allowActiveGrp.setFrameShape(QFrame.StyledPanel)
self.allowActiveGrp.setFrameShadow(QFrame.Raised)
self.allowActiveGrp.setMargin(10)
self.policyKitFileName = QLabel(self.centralwidget)
self.policyKitFileName.setObjectName(u"policyKitFileName")
self.policyKitFileName.setGeometry(QRect(160, 20, 691, 31))
self.policyKitFileName.setMinimumSize(QSize(100, 20))
self.policyKitFileName.setFont(font2)
self.policyKitFileName.setAcceptDrops(False)
self.policyKitFileName.setAutoFillBackground(False)
self.policyKitFileName.setFrameShape(QFrame.Box)
self.policyKitFileName.setFrameShadow(QFrame.Raised)
self.policyKitFileName.setLineWidth(1)
self.policyKitFileName.setTextFormat(Qt.PlainText)
self.policyKitFileName.setScaledContents(False)
self.policyKitFileName.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.policyKitFileName.setMargin(0)
self.policyKitFileName.setIndent(10)
self.policyKitFileName.setTextInteractionFlags(Qt.LinksAccessibleByMouse|Qt.TextSelectableByMouse)
self.pteOutput = QPlainTextEdit(self.centralwidget)
self.pteOutput.setObjectName(u"pteOutput")
self.pteOutput.setGeometry(QRect(490, 210, 411, 231))
self.pteOutput.setFrameShadow(QFrame.Raised)
self.pteOutput.setUndoRedoEnabled(False)
self.pteOutput.setTextInteractionFlags(Qt.NoTextInteraction)
PolkitExplorer.setCentralWidget(self.centralwidget)
self.policiesPrivsGrp.raise_()
self.actionDescriptionGrp.raise_()
self.policyFileGrp.raise_()
self.actionsCounterDisplay.raise_()
self.actionComboBox.raise_()
self.polkitActionDescription.raise_()
self.loadFileToolBtn.raise_()
self.policyKitFullPath.raise_()
self.allowInactiveGrp.raise_()
self.currentAllowInactiveLabel.raise_()
self.allowAnyGrp.raise_()
self.allowActiveGrp.raise_()
self.currentAllowAnyLabel.raise_()
self.currentAllowActiveLabel.raise_()
self.pteOutput.raise_()
self.policyKitFileName.raise_()
self.menubar = QMenuBar(PolkitExplorer)
self.menubar.setObjectName(u"menubar")
self.menubar.setGeometry(QRect(0, 0, 910, 24))
self.menubar.setFont(font)
self.menuFile = QMenu(self.menubar)
self.menuFile.setObjectName(u"menuFile")
self.menuFile.setFont(font)
self.menuHelp = QMenu(self.menubar)
self.menuHelp.setObjectName(u"menuHelp")
self.menuHelp.setFont(font)
PolkitExplorer.setMenuBar(self.menubar)
QWidget.setTabOrder(self.loadFileToolBtn, self.actionComboBox)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.menuFile.addAction(self.actionOpen)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionAbout)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionShow_Glossary)
self.retranslateUi(PolkitExplorer)
self.actionComboBox.currentIndexChanged.connect(PolkitExplorer.actionComboBoxChanged)
self.actionOpen.triggered.connect(PolkitExplorer.fileOpen)
self.actionQuit.triggered.connect(PolkitExplorer.fileQuit)
self.actionAbout.triggered.connect(PolkitExplorer.fileAbout)
self.actionShow_Glossary.triggered.connect(PolkitExplorer.helpGlossary)
self.loadFileToolBtn.clicked.connect(PolkitExplorer.fileOpen)
QMetaObject.connectSlotsByName(PolkitExplorer)
def retranslateUi(self, PolkitExplorer):
self.actionOpen.setText(QCoreApplication.translate("PolkitExplorer", u"&Open", None))
self.actionAbout.setText(QCoreApplication.translate("PolkitExplorer", u"&About", None))
self.actionQuit.setText(QCoreApplication.translate("PolkitExplorer", u"&Quit", None))
self.actionShow_Glossary.setText(QCoreApplication.translate("PolkitExplorer", u"&Glossary", None))
self.polkitActionDescription.setToolTip(QCoreApplication.translate("PolkitExplorer", u"The Description of the Action as entered in the Policy file loaded. If no description is found this will tell you that fact.", None))
self.polkitActionDescription.setText("")
self.policyFileGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Policy File:", None))
self.actionDescriptionGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Action(s) & Description:", None))
self.actionComboBox.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p><span style=\" font-weight:600;\">Drop-down list of all the actions within the policy file. Clicking on this will display the drop-down list, or you can use your scrollwheel to browse through them, too.</span></p></body></html>", None))
self.policiesPrivsGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Policies:", None))
self.actionsCounterDisplay.setToolTip(QCoreApplication.translate("PolkitExplorer", u"Displays the number of Actions within a Polkit policy file.", None))
self.loadFileToolBtn.setText(QCoreApplication.translate("PolkitExplorer", u"...", None))
self.policyKitFullPath.setToolTip(QCoreApplication.translate("PolkitExplorer", u"The full pathname of the currently opened Polkit policy file.", None))
self.policyKitFullPath.setText("")
self.currentAllowActiveLabel.setText("")
self.allowInactiveGrp.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p>"Inactive" users are ones who are not directly logged into the system's console. This includes anyone who is logged in remotely, whether it be via ssh, telnet, or even RDP.</p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.allowInactiveGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Allow Inactive", None))
self.currentAllowInactiveLabel.setText("")
self.currentAllowAnyLabel.setText("")
#if QT_CONFIG(tooltip)
self.allowAnyGrp.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p>If set to "yes" will give any user permission to perform the action as described in the Description above. </p></body></html>", None))
#endif // QT_CONFIG(tooltip)
self.allowAnyGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Allow Any", None))
#if QT_CONFIG(tooltip)
self.allowActiveGrp.setToolTip(QCoreApplication.translate("PolkitExplorer", u"<html><head/><body><p>"Active" users are ones who are directly logged into a system's console, via a locally connected terminal. Users directly logged into a GUI at the system console, for example.</p></body></html>", None))
self.allowActiveGrp.setText(QCoreApplication.translate("PolkitExplorer", u"Allow Active", None))
self.policyKitFileName.setToolTip(QCoreApplication.translate("PolkitExplorer", u"The full name of the currently opened Polkit policy file.", None))
self.policyKitFileName.setText(QCoreApplication.translate("PolkitExplorer", u"Please open a policy file ->", None))
self.menuFile.setTitle(QCoreApplication.translate("PolkitExplorer", u"&File", None))
self.menuHelp.setTitle(QCoreApplication.translate("PolkitExplorer", u"&Help", None))
pass
| true
| true
|
f71605a096a836f32d317bfc1b1b9c580b670ceb
| 2,301
|
py
|
Python
|
pymatflow/scripts/nebmake.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 6
|
2020-03-06T16:13:08.000Z
|
2022-03-09T07:53:34.000Z
|
pymatflow/scripts/nebmake.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-10-02T02:23:08.000Z
|
2021-11-08T13:29:37.000Z
|
pymatflow/scripts/nebmake.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-07-10T16:28:14.000Z
|
2021-07-10T16:28:14.000Z
|
#!/usr/bin/env python
import os
import argparse
from pymatflow.structure.neb import interpolate
from pymatflow.cmd.structflow import read_structure
from pymatflow.cmd.structflow import write_structure
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--images", type=str, nargs=2,
required=True,
help="the initial and final structure file")
parser.add_argument("-n", "--nimage", type=int, default=None,
required=True,
help="number of inter images")
parser.add_argument("-m", "--moving-atom", type=int, nargs="+",
required=True,
help="specifying the moving atoms, index start from 0")
parser.add_argument("-d", "--directory", type=str, default="./",
help="directory to put the generated images")
parser.add_argument("--frac", type=int, default=1,
choices=[0, 1],
help="1(default): use faractional, 0: use cartesian")
# ==============================================================
args = parser.parse_args()
initial = read_structure(args.images[0])
final = read_structure(args.images[1])
inter_images = interpolate(initial=initial, final=final, nimage=args.nimage, moving_atom=args.moving_atom)
os.system("mkdir -p %s" % os.path.join(args.directory, "%.2d" % (0)))
write_structure(structure=initial, filepath=os.path.join(args.directory, "%.2d/POSCAR" % (0)), frac=args.frac)
os.system("mkdir -p %s" % os.path.join(args.directory, "%.2d" % (args.nimage+1)))
write_structure(structure=final, filepath=os.path.join(args.directory, "%.2d/POSCAR" % (args.nimage+1)), frac=args.frac)
for i in range(len(inter_images)):
os.system("mkdir -p %s" % os.path.join(args.directory, "%.2d" % (i+1)))
write_structure(structure=inter_images[i], filepath=os.path.join(args.directory, "%.2d/POSCAR" % (i+1)), frac=args.frac)
print("===========================================\n")
print("generate inter images for neb calculation\n")
print("===========================================\n")
print("-------------------------------------------\n")
if __name__ == "__main__":
main()
| 39.672414
| 129
| 0.567579
|
import os
import argparse
from pymatflow.structure.neb import interpolate
from pymatflow.cmd.structflow import read_structure
from pymatflow.cmd.structflow import write_structure
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--images", type=str, nargs=2,
required=True,
help="the initial and final structure file")
parser.add_argument("-n", "--nimage", type=int, default=None,
required=True,
help="number of inter images")
parser.add_argument("-m", "--moving-atom", type=int, nargs="+",
required=True,
help="specifying the moving atoms, index start from 0")
parser.add_argument("-d", "--directory", type=str, default="./",
help="directory to put the generated images")
parser.add_argument("--frac", type=int, default=1,
choices=[0, 1],
help="1(default): use faractional, 0: use cartesian")
args = parser.parse_args()
initial = read_structure(args.images[0])
final = read_structure(args.images[1])
inter_images = interpolate(initial=initial, final=final, nimage=args.nimage, moving_atom=args.moving_atom)
os.system("mkdir -p %s" % os.path.join(args.directory, "%.2d" % (0)))
write_structure(structure=initial, filepath=os.path.join(args.directory, "%.2d/POSCAR" % (0)), frac=args.frac)
os.system("mkdir -p %s" % os.path.join(args.directory, "%.2d" % (args.nimage+1)))
write_structure(structure=final, filepath=os.path.join(args.directory, "%.2d/POSCAR" % (args.nimage+1)), frac=args.frac)
for i in range(len(inter_images)):
os.system("mkdir -p %s" % os.path.join(args.directory, "%.2d" % (i+1)))
write_structure(structure=inter_images[i], filepath=os.path.join(args.directory, "%.2d/POSCAR" % (i+1)), frac=args.frac)
print("===========================================\n")
print("generate inter images for neb calculation\n")
print("===========================================\n")
print("-------------------------------------------\n")
if __name__ == "__main__":
main()
| true
| true
|
f716060870df940910dece369b5b6bf64ae01993
| 13,519
|
py
|
Python
|
flexget/components/ftp/sftp.py
|
gjhenrique/Flexget
|
2dae4c7e3d002600adcce3b67c399fda115d5ce2
|
[
"MIT"
] | null | null | null |
flexget/components/ftp/sftp.py
|
gjhenrique/Flexget
|
2dae4c7e3d002600adcce3b67c399fda115d5ce2
|
[
"MIT"
] | null | null | null |
flexget/components/ftp/sftp.py
|
gjhenrique/Flexget
|
2dae4c7e3d002600adcce3b67c399fda115d5ce2
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
from itertools import groupby
from pathlib import Path
from typing import List, Optional
from urllib.parse import unquote, urlparse
from loguru import logger
from flexget import plugin
from flexget.components.ftp.sftp_client import SftpClient, SftpError
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.task import Task
from flexget.utils.template import RenderError, render_from_entry
logger = logger.bind(name='sftp')
# Constants
DEFAULT_SFTP_PORT: int = 22
DEFAULT_CONNECT_TRIES: int = 3
DEFAULT_SOCKET_TIMEOUT_SEC: int = 15
SftpConfig = namedtuple(
'SftpConfig', ['host', 'port', 'username', 'password', 'private_key', 'private_key_pass']
)
class SftpList:
"""
Generate entries from SFTP. This plugin requires the pysftp Python module and its dependencies.
Configuration:
host: Host to connect to.
port: Port the remote SSH server is listening on (default 22).
username: Username to log in as.
password: The password to use. Optional if a private key is provided.
private_key: Path to the private key (if any) to log into the SSH server.
private_key_pass: Password for the private key (if needed).
recursive: Indicates whether the listing should be recursive.
get_size: Indicates whetern to calculate the size of the remote file/directory.
WARNING: This can be very slow when computing the size of directories!
files_only: Indicates wheter to omit diredtories from the results.
dirs: List of directories to download.
socket_timeout_sec: Socket timeout in seconds (default 15 seconds).
connection_tries: Number of times to attempt to connect before failing (default 3).
Example:
sftp_list:
host: example.com
username: Username
private_key: /Users/username/.ssh/id_rsa
recursive: False
get_size: True
files_only: False
dirs:
- '/path/to/list/'
- '/another/path/'
"""
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': DEFAULT_SFTP_PORT},
'files_only': {'type': 'boolean', 'default': True},
'recursive': {'type': 'boolean', 'default': False},
'get_size': {'type': 'boolean', 'default': True},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'dirs': one_or_more({'type': 'string'}),
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'additionProperties': False,
'required': ['host', 'username'],
}
@staticmethod
def prepare_config(config: dict) -> dict:
"""
Sets defaults for the provided configuration
"""
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('dirs', ['.'])
return config
@classmethod
def on_task_input(cls, task: Task, config: dict) -> List[Entry]:
"""
Input task handler
"""
config = cls.prepare_config(config)
files_only: bool = config['files_only']
recursive: bool = config['recursive']
get_size: bool = config['get_size']
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
directories: List[str] = []
if isinstance(config['dirs'], list):
directories.extend(config['dirs'])
else:
directories.append(config['dirs'])
sftp_config: SftpConfig = task_config_to_sftp_config(config)
sftp: SftpClient = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
entries: List[Entry] = sftp.list_directories(directories, recursive, get_size, files_only)
sftp.close()
return entries
class SftpDownload:
"""
Download files from a SFTP server. This plugin requires the pysftp Python module and its
dependencies.
Configuration:
to: Destination path; supports Jinja2 templating on the input entry. Fields such
as series_name must be populated prior to input into this plugin using
metainfo_series or similar.
recursive: Indicates whether to download directory contents recursively.
delete_origin: Indicates whether to delete the remote files(s) once they've been downloaded.
socket_timeout_sec: Socket timeout in seconds
connection_tries: Number of times to attempt to connect before failing (default 3).
Example:
sftp_download:
to: '/Volumes/External/Drobo/downloads'
delete_origin: False
"""
schema = {
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'recursive': {'type': 'boolean', 'default': True},
'delete_origin': {'type': 'boolean', 'default': False},
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'required': ['to'],
'additionalProperties': False,
}
@classmethod
def download_entry(cls, entry: Entry, config: dict, sftp: SftpClient) -> None:
"""
Downloads the file(s) described in entry
"""
path: str = unquote(urlparse(entry['url']).path) or '.'
delete_origin: bool = config['delete_origin']
recursive: bool = config['recursive']
to: str = config['to']
try:
sftp.download(path, to, recursive, delete_origin)
except SftpError as e:
entry.fail(e) # type: ignore
@classmethod
def on_task_output(cls, task: Task, config: dict) -> None:
"""Register this as an output plugin"""
@classmethod
def on_task_download(cls, task: Task, config: dict) -> None:
"""
Task handler for sftp_download plugin
"""
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
# Download entries by host so we can reuse the connection
for sftp_config, entries in groupby(task.accepted, cls._get_sftp_config):
if not sftp_config:
continue
error_message: Optional[str] = None
sftp: Optional[SftpClient] = None
try:
sftp = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
except Exception as e:
error_message = f'Failed to connect to {sftp_config.host} ({e})'
for entry in entries:
if sftp:
cls.download_entry(entry, config, sftp)
else:
entry.fail(error_message)
if sftp:
sftp.close()
@classmethod
def _get_sftp_config(cls, entry: Entry):
"""
Parses a url and returns a hashable config, source path, and destination path
"""
# parse url
parsed = urlparse(entry['url'])
host: str = parsed.hostname
username: str = parsed.username
password: str = parsed.password
port: int = parsed.port or DEFAULT_SFTP_PORT
# get private key info if it exists
private_key: str = entry.get('private_key')
private_key_pass: str = entry.get('private_key_pass')
config: Optional[SftpConfig] = None
if parsed.scheme == 'sftp':
config = SftpConfig(host, port, username, password, private_key, private_key_pass)
else:
logger.warning('Scheme does not match SFTP: {}', entry['url'])
return config
class SftpUpload:
"""
Upload files to a SFTP server. This plugin requires the pysftp Python module and its
dependencies.
host: Host to connect to
port: Port the remote SSH server is listening on. Defaults to port 22.
username: Username to log in as
password: The password to use. Optional if a private key is provided.
private_key: Path to the private key (if any) to log into the SSH server
private_key_pass: Password for the private key (if needed)
to: Path to upload the file to; supports Jinja2 templating on the input entry. Fields such
as series_name must be populated prior to input into this plugin using
metainfo_series or similar.
delete_origin: Indicates whether to delete the original file after a successful
upload.
socket_timeout_sec: Socket timeout in seconds
connection_tries: Number of times to attempt to connect before failing (default 3).
Example:
sftp_list:
host: example.com
username: Username
private_key: /Users/username/.ssh/id_rsa
to: /TV/{{series_name}}/Series {{series_season}}
delete_origin: False
"""
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': DEFAULT_SFTP_PORT},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'to': {'type': 'string'},
'delete_origin': {'type': 'boolean', 'default': False},
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'additionProperties': False,
'required': ['host', 'username'],
}
@staticmethod
def prepare_config(config: dict) -> dict:
"""
Sets defaults for the provided configuration
"""
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('to', None)
return config
@classmethod
def handle_entry(cls, entry: Entry, sftp: SftpClient, config: dict):
to: str = config['to']
location: str = entry['location']
delete_origin: bool = config['delete_origin']
if to:
try:
to = render_from_entry(to, entry)
except RenderError as e:
logger.error('Could not render path: {}', to)
entry.fail(str(e)) # type: ignore
return
try:
sftp.upload(location, to)
except SftpError as e:
entry.fail(str(e)) # type: ignore
if delete_origin and Path(location).is_file():
try:
Path(location).unlink()
except Exception as e:
logger.warning('Failed to delete file {} ({})', location, e) # type: ignore
@classmethod
def on_task_output(cls, task: Task, config: dict) -> None:
"""Uploads accepted entries to the specified SFTP server."""
config = cls.prepare_config(config)
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
sftp_config: SftpConfig = task_config_to_sftp_config(config)
sftp = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
for entry in task.accepted:
if sftp:
logger.debug('Uploading file: {}', entry['location'])
cls.handle_entry(entry, sftp, config)
else:
entry.fail('SFTP connection failed.')
def task_config_to_sftp_config(config: dict) -> SftpConfig:
"""
Creates an SFTP connection from a Flexget config object
"""
host: int = config['host']
port: int = config['port']
username: str = config['username']
password: str = config['password']
private_key: str = config['private_key']
private_key_pass: str = config['private_key_pass']
return SftpConfig(host, port, username, password, private_key, private_key_pass)
def sftp_connect(
sftp_config: SftpConfig, socket_timeout_sec: int, connection_tries: int
) -> SftpClient:
sftp_client: SftpClient = SftpClient(
host=sftp_config.host,
username=sftp_config.username,
private_key=sftp_config.private_key,
password=sftp_config.password,
port=sftp_config.port,
private_key_pass=sftp_config.private_key_pass,
connection_tries=connection_tries,
)
sftp_client.set_socket_timeout(socket_timeout_sec)
return sftp_client
@event('plugin.register')
def register_plugin() -> None:
plugin.register(SftpList, 'sftp_list', api_ver=2)
plugin.register(SftpDownload, 'sftp_download', api_ver=2)
plugin.register(SftpUpload, 'sftp_upload', api_ver=2)
| 36.050667
| 112
| 0.609734
|
from collections import namedtuple
from itertools import groupby
from pathlib import Path
from typing import List, Optional
from urllib.parse import unquote, urlparse
from loguru import logger
from flexget import plugin
from flexget.components.ftp.sftp_client import SftpClient, SftpError
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.task import Task
from flexget.utils.template import RenderError, render_from_entry
logger = logger.bind(name='sftp')
DEFAULT_SFTP_PORT: int = 22
DEFAULT_CONNECT_TRIES: int = 3
DEFAULT_SOCKET_TIMEOUT_SEC: int = 15
SftpConfig = namedtuple(
'SftpConfig', ['host', 'port', 'username', 'password', 'private_key', 'private_key_pass']
)
class SftpList:
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': DEFAULT_SFTP_PORT},
'files_only': {'type': 'boolean', 'default': True},
'recursive': {'type': 'boolean', 'default': False},
'get_size': {'type': 'boolean', 'default': True},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'dirs': one_or_more({'type': 'string'}),
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'additionProperties': False,
'required': ['host', 'username'],
}
@staticmethod
def prepare_config(config: dict) -> dict:
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('dirs', ['.'])
return config
@classmethod
def on_task_input(cls, task: Task, config: dict) -> List[Entry]:
config = cls.prepare_config(config)
files_only: bool = config['files_only']
recursive: bool = config['recursive']
get_size: bool = config['get_size']
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
directories: List[str] = []
if isinstance(config['dirs'], list):
directories.extend(config['dirs'])
else:
directories.append(config['dirs'])
sftp_config: SftpConfig = task_config_to_sftp_config(config)
sftp: SftpClient = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
entries: List[Entry] = sftp.list_directories(directories, recursive, get_size, files_only)
sftp.close()
return entries
class SftpDownload:
schema = {
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'recursive': {'type': 'boolean', 'default': True},
'delete_origin': {'type': 'boolean', 'default': False},
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'required': ['to'],
'additionalProperties': False,
}
@classmethod
def download_entry(cls, entry: Entry, config: dict, sftp: SftpClient) -> None:
path: str = unquote(urlparse(entry['url']).path) or '.'
delete_origin: bool = config['delete_origin']
recursive: bool = config['recursive']
to: str = config['to']
try:
sftp.download(path, to, recursive, delete_origin)
except SftpError as e:
entry.fail(e)
@classmethod
def on_task_output(cls, task: Task, config: dict) -> None:
@classmethod
def on_task_download(cls, task: Task, config: dict) -> None:
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
for sftp_config, entries in groupby(task.accepted, cls._get_sftp_config):
if not sftp_config:
continue
error_message: Optional[str] = None
sftp: Optional[SftpClient] = None
try:
sftp = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
except Exception as e:
error_message = f'Failed to connect to {sftp_config.host} ({e})'
for entry in entries:
if sftp:
cls.download_entry(entry, config, sftp)
else:
entry.fail(error_message)
if sftp:
sftp.close()
@classmethod
def _get_sftp_config(cls, entry: Entry):
parsed = urlparse(entry['url'])
host: str = parsed.hostname
username: str = parsed.username
password: str = parsed.password
port: int = parsed.port or DEFAULT_SFTP_PORT
private_key: str = entry.get('private_key')
private_key_pass: str = entry.get('private_key_pass')
config: Optional[SftpConfig] = None
if parsed.scheme == 'sftp':
config = SftpConfig(host, port, username, password, private_key, private_key_pass)
else:
logger.warning('Scheme does not match SFTP: {}', entry['url'])
return config
class SftpUpload:
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': DEFAULT_SFTP_PORT},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'to': {'type': 'string'},
'delete_origin': {'type': 'boolean', 'default': False},
'socket_timeout_sec': {'type': 'integer', 'default': DEFAULT_SOCKET_TIMEOUT_SEC},
'connection_tries': {'type': 'integer', 'default': DEFAULT_CONNECT_TRIES},
},
'additionProperties': False,
'required': ['host', 'username'],
}
@staticmethod
def prepare_config(config: dict) -> dict:
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('to', None)
return config
@classmethod
def handle_entry(cls, entry: Entry, sftp: SftpClient, config: dict):
to: str = config['to']
location: str = entry['location']
delete_origin: bool = config['delete_origin']
if to:
try:
to = render_from_entry(to, entry)
except RenderError as e:
logger.error('Could not render path: {}', to)
entry.fail(str(e))
return
try:
sftp.upload(location, to)
except SftpError as e:
entry.fail(str(e))
if delete_origin and Path(location).is_file():
try:
Path(location).unlink()
except Exception as e:
logger.warning('Failed to delete file {} ({})', location, e)
@classmethod
def on_task_output(cls, task: Task, config: dict) -> None:
config = cls.prepare_config(config)
socket_timeout_sec: int = config['socket_timeout_sec']
connection_tries: int = config['connection_tries']
sftp_config: SftpConfig = task_config_to_sftp_config(config)
sftp = sftp_connect(sftp_config, socket_timeout_sec, connection_tries)
for entry in task.accepted:
if sftp:
logger.debug('Uploading file: {}', entry['location'])
cls.handle_entry(entry, sftp, config)
else:
entry.fail('SFTP connection failed.')
def task_config_to_sftp_config(config: dict) -> SftpConfig:
host: int = config['host']
port: int = config['port']
username: str = config['username']
password: str = config['password']
private_key: str = config['private_key']
private_key_pass: str = config['private_key_pass']
return SftpConfig(host, port, username, password, private_key, private_key_pass)
def sftp_connect(
sftp_config: SftpConfig, socket_timeout_sec: int, connection_tries: int
) -> SftpClient:
sftp_client: SftpClient = SftpClient(
host=sftp_config.host,
username=sftp_config.username,
private_key=sftp_config.private_key,
password=sftp_config.password,
port=sftp_config.port,
private_key_pass=sftp_config.private_key_pass,
connection_tries=connection_tries,
)
sftp_client.set_socket_timeout(socket_timeout_sec)
return sftp_client
@event('plugin.register')
def register_plugin() -> None:
plugin.register(SftpList, 'sftp_list', api_ver=2)
plugin.register(SftpDownload, 'sftp_download', api_ver=2)
plugin.register(SftpUpload, 'sftp_upload', api_ver=2)
| true
| true
|
f71609666f0531ac8c06b96d69a4042a2ac3a5bc
| 853
|
py
|
Python
|
paperboy/resources/config.py
|
datalayer-externals/papermill-paperboy
|
b27bfdbb4ed27dea597ff1d6346eb831542ae81f
|
[
"Apache-2.0"
] | 233
|
2018-11-01T09:17:08.000Z
|
2022-03-22T08:27:24.000Z
|
paperboy/resources/config.py
|
datalayer-externals/papermill-paperboy
|
b27bfdbb4ed27dea597ff1d6346eb831542ae81f
|
[
"Apache-2.0"
] | 99
|
2018-10-17T21:48:42.000Z
|
2021-05-07T08:33:36.000Z
|
paperboy/resources/config.py
|
datalayer-externals/papermill-paperboy
|
b27bfdbb4ed27dea597ff1d6346eb831542ae81f
|
[
"Apache-2.0"
] | 29
|
2018-11-01T11:33:08.000Z
|
2022-01-12T22:12:19.000Z
|
import falcon
import json
from .base import BaseResource
class ConfigResource(BaseResource):
'''Falcon resource to get form entries'''
def __init__(self, *args, **kwargs):
super(ConfigResource, self).__init__(*args, **kwargs)
def on_get(self, req, resp):
'''Get configuration page to create a new notebook/job/report'''
resp.content_type = 'application/json'
type = req.params.get('type', None)
if type is None:
resp.body = json.dumps(self.config.to_dict())
elif type == 'notebooks':
resp.body = json.dumps(self.db.notebooks.form())
elif type == 'jobs':
resp.body = json.dumps(self.db.jobs.form())
elif type == 'reports':
resp.body = json.dumps(self.db.reports.form())
else:
resp.status = falcon.HTTP_404
| 32.807692
| 72
| 0.607268
|
import falcon
import json
from .base import BaseResource
class ConfigResource(BaseResource):
def __init__(self, *args, **kwargs):
super(ConfigResource, self).__init__(*args, **kwargs)
def on_get(self, req, resp):
resp.content_type = 'application/json'
type = req.params.get('type', None)
if type is None:
resp.body = json.dumps(self.config.to_dict())
elif type == 'notebooks':
resp.body = json.dumps(self.db.notebooks.form())
elif type == 'jobs':
resp.body = json.dumps(self.db.jobs.form())
elif type == 'reports':
resp.body = json.dumps(self.db.reports.form())
else:
resp.status = falcon.HTTP_404
| true
| true
|
f71609b6af41be400030f87cd3c4bfcdfc294a4a
| 10,236
|
py
|
Python
|
READMIT/alpha/fp_VBHC_READMIT_BEA_FIPS_alpha.py
|
andrewcistola/value-based-healthcare
|
12583c33bff8dee83a7daf5aaaf1e7c39883a279
|
[
"MIT"
] | 1
|
2021-03-12T07:11:14.000Z
|
2021-03-12T07:11:14.000Z
|
READMIT/alpha/fp_VBHC_READMIT_BEA_FIPS_alpha.py
|
andrewcistola/value-based-healthcare
|
12583c33bff8dee83a7daf5aaaf1e7c39883a279
|
[
"MIT"
] | null | null | null |
READMIT/alpha/fp_VBHC_READMIT_BEA_FIPS_alpha.py
|
andrewcistola/value-based-healthcare
|
12583c33bff8dee83a7daf5aaaf1e7c39883a279
|
[
"MIT"
] | null | null | null |
# FractureProof
## Value Based Healthcare Project
### Outcome
#### CMS Hospital Wiide Readmission Rate 2018
### Predictors
#### BEA 2018 County wide Economic Measures
### Table Key
#### State County FIPS
### Set working directory to project folder
os.chdir("C:/Users/drewc/GitHub/allocativ") # Set wd to project repository
### Set file title and path
title = "fp_VBHC_READMIT_BEA_FIPS_alpha"
path = "fp/VBHC/READMIT/"
## Section A: Collect Possible Predictors from Public Access Data
### Import Python Libraries
import os # Operating system navigation
import sqlite3 # SQLite database manager
### Import data science libraries
import pandas as pd # Widely used data manipulation library with R/Excel like tables named 'data frames'
import numpy as np # Widely used matrix library for numerical processes
### Import scikit-learn libraries: data preparation
from sklearn.preprocessing import StandardScaler # Standard scaling for easier use of machine learning algorithms
from sklearn.impute import SimpleImputer # Univariate imputation for missing data
### Step 1: Import and Join Data
### Import ACS
df_bea = pd.read_csv("hnb/BEA/2018/BEA_2018_FIPS_full.csv", low_memory = 'false') # Import dataset saved as csv in _data folder
### Import CMS Data and Join
df_cms = pd.read_csv("hnb/CMS/CMS_2018_FIPS_full.csv", low_memory = 'false') # Import dataset saved as csv in _data folder
df_cms = df_cms.filter(["Rate of readmission after discharge from hospital (hospital-wide)", "FIPS"]) # Keep only selected columns
df_join = pd.merge(df_cms, df_bea, on = "FIPS", how = "inner") # Join by column while keeping only items that exist in both, select outer or left for other options
df_cms = 0 # Clear variable
df_acs = 0 # Clear variable
### Rename and Verify
df_step1 = df_join
df_join = 0
df_step1.info() # Get class, memory, and column info: names, data types, obs.
df_step1.head() # Print first 5 observations
### Step 2: Data Manipulation
### Import Datasets
### Drop ID variables
df_man = df_step1.drop(columns = ["FIPS"]) # Drop Unwanted Columns
### Rename outcome and test
df_man = df_man.rename(columns = {"Rate of readmission after discharge from hospital (hospital-wide)": "outcome"}) # Rename multiple columns in place
### Rename and Verify
df_step2 = df_man
df_man = 0
df_step2.info() # Get class, memory, and column info: names, data types, obs.
df_step2.head() # Print first 5 observations
## Step 3: Data Standardization
### Remove outcome and test
df_NA = df_step2
outcome = df_NA.pop("outcome") # 'pop' column from df
### Drop features with less than 75% data
df_NA = df_NA.dropna(axis = 1, thresh = 0.75*len(df_NA)) # Drop features less than 75% non-NA count for all columns
### Impute missing values
df_NA = pd.DataFrame(SimpleImputer(strategy = "median").fit_transform(df_NA), columns = df_NA.columns) # Impute missing data
### Standard Scale Values
df_NA = pd.DataFrame(StandardScaler().fit_transform(df_NA.values), columns = df_NA.columns) # convert the normalized features into a tabular format with the help of DataFrame.
### Reattach outcome
df_NA.insert(0, "outcome", outcome) # reinsert in index
### Drop all remaining rows (should be none)
df_NA = df_NA.dropna() # Drop all rows with NA values
### Rename and Verify
df_step3 = df_NA
df_NA = 0
df_step3.info() # Get class, memory, and column info: names, data types, obs.
df_step3.head() # Print first 5 observations
## Section B: Identify Significant Predictors with Reduction Algorithms
### Import scikit-learn: machine learning
from sklearn.decomposition import PCA # Principal compnents analysis from sklearn
from sklearn.ensemble import RandomForestClassifier # Random Forest classification component
from sklearn.ensemble import RandomForestRegressor # Random Forest classification component
from sklearn.feature_selection import RFECV # Recursive Feature elimination with cross validation
from sklearn.linear_model import LinearRegression # Used for machine learning with quantitative outcome
### Step 4: Principal Component Analysis
### Setup initial PCA model
df_pca = df_step3.drop(columns = ["outcome"]) # Drop outcome variable
degree = len(df_step3.columns) - 2 # Save number of features -1 to get degrees of freedom
pca = PCA(n_components = degree) # you will pass the number of components to make PCA model based on degrees of freedom
### Fit initial PCA model
pca.fit(df_pca) # fit to data
### Setup final PCA model
df_ev = pd.DataFrame(pca.explained_variance_) # Print explained variance of components
df_ev = df_ev[(df_ev[0] > 1)] # Save eigenvalues above 1
components = len(df_ev.index) # Save count of values for Variable reduction
pca = PCA(n_components = components) # you will pass the number of components to make PCA model
### Fit final PCA model
pca.fit_transform(df_pca) # finally call fit_transform on the aggregate data to create PCA results object
### Collect feature list from PCA
df_pca2 = pd.DataFrame(pca.components_, columns = df_pca.columns) # Export eigenvectors to data frame
df_pca2["Variance"] = pca.explained_variance_ratio_ # Save eigenvalues as their own column
df_pca2 = df_pca2[df_pca2.Variance > df_pca2.Variance.mean()] # Susbet by eigenvalues with above average exlained variance ratio
df_pca2 = df_pca2.abs() # get absolute value for column or data frame
df_pca3 = pd.DataFrame(df_pca2.max(), columns = ["MaxEV"]) # select maximum eigenvector for each feature
df_pc = df_pca3[df_pca3.MaxEV > df_pca3.MaxEV.mean()] # Susbet by above average max eigenvalues
df_pc = df_pc.reset_index() # Add a new index of ascending values, existing index becomes column named "index"
df_pc = df_pc.rename(columns = {"index": "Features"}) # Rename multiple columns in place
### Rename and Verify
df_step4 = df_pc
df_step4.info() # Get class, memory, and column info: names, data types, obs.
df_step4.head() # Print first 5 observations
### Step 5: Random Forest Regressor
### Setup RF model
Y = df_step3["outcome"] # Isolate Outcome variable
X = df_step3.drop(columns = ["outcome"]) # Drop Unwanted Columns # Save features columns as predictor data frame
forest = RandomForestRegressor(n_estimators = 1000, max_depth = 10) #Use default values except for number of trees. For a further explanation see readme included in repository.
### Fit Forest model
forest.fit(X, Y) # This will take time
### Collect features from RF
gini = forest.feature_importances_ # Output importances of features
l_gini = list(zip(X, gini)) # Create list of variables alongside importance scores
df_gini = pd.DataFrame(l_gini, columns = ["Features", "Gini"]) # Create data frame of importances with variables and gini column names
df_gini = df_gini.sort_values(by = ["Gini"], ascending = False) # Sort data frame by gini value in desceding order
df_gini = df_gini[(df_gini["Gini"] > df_gini["Gini"].mean())] # Subset by Gini values higher than mean
### Rename and Verify
df_step5 = df_gini
df_step5.info() # Get class, memory, and column info: names, data types, obs.
df_step5.head() # Print first 5 observations
### Step 6: Recursive Feature Elimination
### Collect features from RF and PC
df_pc_gini = pd.merge(df_pc, df_gini, on = "Features", how = "inner") # Join by column while keeping only items that exist in both, select outer or left for other options
pc_gini_features = df_pc_gini["Features"].tolist() # Save features from data frame
df_rfecv = df_step3[pc_gini_features] # Add selected features to df
### Setup RFE model
X = df_rfecv # Save features columns as predictor data frame
Y = df_step3["outcome"] # Use outcome data frame
RFE = LinearRegression() # Use regression coefficient as estimator
selector = RFECV(estimator = RFE, min_features_to_select = 10) # define selection parameters, in this case all features are selected. See Readme for more ifo
### Fit RFE model
selected = selector.fit(X, Y) # This will take time
### Collect features from RFE model
ar_rfe = selected.support_ # Save Boolean values as numpy array
l_rfe = list(zip(X, ar_rfe)) # Create list of variables alongside RFE value
df_rfe = pd.DataFrame(l_rfe, columns = ["Features", "RFE"]) # Create data frame of importances with variables and gini column names
df_rfe = df_rfe[df_rfe.RFE == True] # Select Variables that were True
df_rfe = df_rfe.reset_index() # Reset Index
df_rfe = df_rfe.filter(["Features"]) # Keep only selected columns
### Rename and Verify
df_step6 = df_rfe
df_step6.info() # Get class, memory, and column info: names, data types, obs.
df_step6.head() # Print first 5 observations
## Section C: Evaluate Significant Features with Modeling and Prediction
### Import scikit-learn libraries: regression
from sklearn.linear_model import LogisticRegression # Used for machine learning with categorical outcome
from sklearn.linear_model import LinearRegression # Used for machine learning with quantitative outcome
### Import scikit-learn: neural network
from sklearn.neural_network import MLPRegressor
### Step 7: Multiple Regression
### Setup MR Model
features = list(df_step6["Features"]) # Save chosen featres as list
x = df_step3.filter(features) # Keep only selected columns from rfe
y = df_step3["outcome"] # Add outcome variable
LR = LinearRegression() # Linear Regression in scikit learn
### Fit MR model
regression = LR.fit(x, y) # Fit model
### Collect features from MR model
coef = regression.coef_ # Coefficient models as scipy array
l_reg = list(zip(x, coef)) # Create list of variables alongside coefficient
df_reg = pd.DataFrame(l_reg, columns = ["Features", "Coefficients"]) # Create data frame of importances with variables and gini column names
### Export feature attributes
df_pc_gini_reg = pd.merge(df_pc_gini, df_reg, on = "Features", how = "inner") # Join by column while keeping only items that exist in both, select outer or left for other options
df_pc_gini_reg.to_csv(r"fp/VBHC/READMIT/fp_VBHC_READMIT_BEA_FIPS_alpha.csv") # Export df as csv
print(df_pc_gini_reg)
### Collect prediction results
determination = regression.score(x, y) # rsq value, ceofficient of determination
print(determination)
### Rename and Verify
df_step7 = df_pc_gini_reg
df_step7.info() # Get class, memory, and column info: names, data types, obs.
df_step7.head() # Print first 5 observations
| 45.901345
| 178
| 0.762016
|
2.Variance.mean()]
df_pca2 = df_pca2.abs()
df_pca3 = pd.DataFrame(df_pca2.max(), columns = ["MaxEV"])
df_pc = df_pca3[df_pca3.MaxEV > df_pca3.MaxEV.mean()]
df_pc = df_pc.reset_index()
df_pc = df_pc.rename(columns = {"index": "Features"})
tep4.head()
d.DataFrame(l_gini, columns = ["Features", "Gini"])
df_gini = df_gini.sort_values(by = ["Gini"], ascending = False)
df_gini = df_gini[(df_gini["Gini"] > df_gini["Gini"].mean())]
_step5.head()
pc_gini_features]
e"]
RFE = LinearRegression()
selector = RFECV(estimator = RFE, min_features_to_select = 10)
pd.DataFrame(l_rfe, columns = ["Features", "RFE"])
df_rfe = df_rfe[df_rfe.RFE == True]
df_rfe = df_rfe.reset_index()
df_rfe = df_rfe.filter(["Features"])
step6.head()
oefficients"])
atures", how = "inner")
df_pc_gini_reg.to_csv(r"fp/VBHC/READMIT/fp_VBHC_READMIT_BEA_FIPS_alpha.csv")
print(df_pc_gini_reg)
ion)
o()
df_step7.head()
| true
| true
|
f71609b7fa048b07e8e6b96a59e540a8a2785e0a
| 2,731
|
py
|
Python
|
06_2-paino_buzzer/paino_buzzer.py
|
sujunmin/gpio-game-console
|
23cdde4ae16527993adb89f29f21616b3e12e837
|
[
"BSD-3-Clause"
] | null | null | null |
06_2-paino_buzzer/paino_buzzer.py
|
sujunmin/gpio-game-console
|
23cdde4ae16527993adb89f29f21616b3e12e837
|
[
"BSD-3-Clause"
] | null | null | null |
06_2-paino_buzzer/paino_buzzer.py
|
sujunmin/gpio-game-console
|
23cdde4ae16527993adb89f29f21616b3e12e837
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#|R|a|s|p|b|e|r|r|y|P|i|.|c|o|m|.|t|w|
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# Copyright (c) 2016, raspberrypi.com.tw
# All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# paino_buzzer.py
# Make the buzzer sound like a piano
#
# Author : sosorry
# Date : 06/22/2014
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
BUZZER_PIN = 7
BTN_PIN_0 = 11
BTN_PIN_1 = 12
BTN_PIN_2 = 13
BTN_PIN_3 = 15
BTN_PIN_4 = 16
BTN_PIN_5 = 18
BTN_PIN_6 = 22
MELODY_DO = 523
MELODY_RE = 587
MELODY_ME = 659
MELODY_FA = 698
MELODY_SO = 784
MELODY_LA = 880
MELODY_SI = 988
WAIT_TIME = 200
DURATION = 0.2
GPIO.setup(BUZZER_PIN, GPIO.OUT)
GPIO.setup(BTN_PIN_0, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_PIN_1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_PIN_2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_PIN_3, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_PIN_4, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_PIN_5, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_PIN_6, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def buzz(pitch) :
period = 1.0 / pitch
half_period = period / 2
cycles = int(DURATION * pitch)
for i in xrange(cycles) :
GPIO.output(BUZZER_PIN, GPIO.HIGH)
time.sleep(half_period)
GPIO.output(BUZZER_PIN, GPIO.LOW)
time.sleep(half_period)
def mycallback(channel):
print "Button pressed @:", channel, time.ctime()
if channel == BTN_PIN_0:
buzz(MELODY_DO)
elif channel == BTN_PIN_1:
buzz(MELODY_RE)
elif channel == BTN_PIN_2:
buzz(MELODY_ME)
elif channel == BTN_PIN_3:
buzz(MELODY_FA)
elif channel == BTN_PIN_4:
buzz(MELODY_SO)
elif channel == BTN_PIN_5:
buzz(MELODY_LA)
elif channel == BTN_PIN_6:
buzz(MELODY_SI)
try:
GPIO.add_event_detect(BTN_PIN_0, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN_1, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN_2, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN_3, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN_4, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN_5, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN_6, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Exception: KeyboardInterrupt"
finally:
GPIO.cleanup()
| 28.447917
| 93
| 0.697181
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
BUZZER_PIN = 7
BTN_PIN_0 = 11
BTN_PIN_1 = 12
BTN_PIN_2 = 13
BTN_PIN_3 = 15
BTN_PIN_4 = 16
BTN_PIN_5 = 18
BTN_PIN_6 = 22
MELODY_DO = 523
MELODY_RE = 587
MELODY_ME = 659
MELODY_FA = 698
MELODY_SO = 784
MELODY_LA = 880
MELODY_SI = 988
WAIT_TIME = 200
DURATION = 0.2
GPIO.setup(BUZZER_PIN, GPIO.OUT)
GPIO.setup(BTN_PIN_0, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_PIN_1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_PIN_2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_PIN_3, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_PIN_4, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_PIN_5, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_PIN_6, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def buzz(pitch) :
period = 1.0 / pitch
half_period = period / 2
cycles = int(DURATION * pitch)
for i in xrange(cycles) :
GPIO.output(BUZZER_PIN, GPIO.HIGH)
time.sleep(half_period)
GPIO.output(BUZZER_PIN, GPIO.LOW)
time.sleep(half_period)
def mycallback(channel):
print "Button pressed @:", channel, time.ctime()
if channel == BTN_PIN_0:
buzz(MELODY_DO)
elif channel == BTN_PIN_1:
buzz(MELODY_RE)
elif channel == BTN_PIN_2:
buzz(MELODY_ME)
elif channel == BTN_PIN_3:
buzz(MELODY_FA)
elif channel == BTN_PIN_4:
buzz(MELODY_SO)
elif channel == BTN_PIN_5:
buzz(MELODY_LA)
elif channel == BTN_PIN_6:
buzz(MELODY_SI)
try:
GPIO.add_event_detect(BTN_PIN_0, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN_1, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN_2, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN_3, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN_4, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN_5, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN_6, GPIO.FALLING, callback=mycallback, bouncetime=WAIT_TIME)
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Exception: KeyboardInterrupt"
finally:
GPIO.cleanup()
| false
| true
|
f7160b02cd2fe254d7a127f34fecad15c020c378
| 4,926
|
py
|
Python
|
optimus/engines/base/dataframe/columns.py
|
niallscc/Optimus
|
35218401556e5acc4beb2859084128ebcd1ab4e5
|
[
"Apache-2.0"
] | null | null | null |
optimus/engines/base/dataframe/columns.py
|
niallscc/Optimus
|
35218401556e5acc4beb2859084128ebcd1ab4e5
|
[
"Apache-2.0"
] | null | null | null |
optimus/engines/base/dataframe/columns.py
|
niallscc/Optimus
|
35218401556e5acc4beb2859084128ebcd1ab4e5
|
[
"Apache-2.0"
] | null | null | null |
from functools import reduce
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler
from optimus.engines.base.columns import BaseColumns
from optimus.helpers.columns import parse_columns, name_col
from optimus.helpers.constants import Actions
from optimus.helpers.raiseit import RaiseIt
class DataFrameBaseColumns(BaseColumns):
def __init__(self, df):
super(DataFrameBaseColumns, self).__init__(df)
@staticmethod
def exec_agg(exprs, compute=None):
"""
Exectute and aggregation
Expression in Non dask dataframe can not handle compute. See exec_agg dask implementation
:param exprs:
:param compute:
:return:
"""
return exprs
def qcut(self, columns, num_buckets, handle_invalid="skip"):
pass
@staticmethod
def correlation(input_cols, method="pearson", output="json"):
pass
@staticmethod
def scatter(columns, buckets=10):
pass
def standard_scaler(self, input_cols="*", output_cols=None):
df = self.root
def _standard_scaler(_value):
return StandardScaler().fit_transform(_value.values.reshape(-1, 1))
return df.cols.apply(input_cols, func=_standard_scaler, output_cols=output_cols, meta_action=Actions.STANDARD_SCALER.value)
def max_abs_scaler(self, input_cols="*", output_cols=None):
df = self.root
def _max_abs_scaler(_value):
return MaxAbsScaler().fit_transform(_value.values.reshape(-1, 1))
return df.cols.apply(input_cols, func=_max_abs_scaler, output_cols=output_cols,meta_action=Actions.MAX_ABS_SCALER.value )
def min_max_scaler(self, input_cols, output_cols=None):
# https://github.com/dask/dask/issues/2690
df = self.root
def _min_max_scaler(_value):
return MinMaxScaler().fit_transform(_value.values.reshape(-1, 1))
return df.cols.apply(input_cols, func=_min_max_scaler, output_cols=output_cols, meta_action=Actions.MIN_MAX_SCALER.value )
def replace_regex(self, input_cols, regex=None, value="", output_cols=None):
"""
Use a Regex to replace values
:param input_cols: '*', list of columns names or a single column name.
:param output_cols:
:param regex: values to look at to be replaced
:param value: new value to replace the old one
:return:
"""
df = self.root
def _replace_regex(_value, _regex, _replace):
return _value.replace(_regex, _replace, regex=True)
return df.cols.apply(input_cols, func=_replace_regex, args=(regex, value,), output_cols=output_cols,
filter_col_by_dtypes=df.constants.STRING_TYPES + df.constants.NUMERIC_TYPES)
def reverse(self, input_cols, output_cols=None):
def _reverse(value):
return str(value)[::-1]
df = self.root
return df.cols.apply(input_cols, _reverse, func_return_type=str,
filter_col_by_dtypes=df.constants.STRING_TYPES,
output_cols=output_cols, set_index=True)
@staticmethod
def astype(*args, **kwargs):
pass
@staticmethod
def apply_by_dtypes(columns, func, func_return_type, args=None, func_type=None, data_type=None):
pass
@staticmethod
def to_timestamp(input_cols, date_format=None, output_cols=None):
pass
def nest(self, input_cols, separator="", output_col=None, shape="string", drop=False):
df = self.root
dfd = df.data
if output_col is None:
output_col = name_col(input_cols)
input_cols = parse_columns(df, input_cols)
output_ordered_columns = df.cols.names()
# cudfd do nor support apply or agg join for this operation
if shape == "vector" or shape == "array":
raise NotImplementedError("Not implemented yet")
# https://stackoverflow.com/questions/43898035/pandas-combine-column-values-into-a-list-in-a-new-column/43898233
# t['combined'] = t.values.tolist()
# dfds = [dfd[input_col] for input_col in input_cols]
# dfd[output_col] = dfd[input_cols].values.tolist()
elif shape == "string":
dfds = [dfd[input_col].astype(str) for input_col in input_cols]
dfd = dfd.assign(**{output_col:reduce((lambda x, y: x + separator + y), dfds)})
if output_col not in output_ordered_columns:
col_index = output_ordered_columns.index(input_cols[-1]) + 1
output_ordered_columns[col_index:col_index] = [output_col]
if drop is True:
for input_col in input_cols:
if input_col in output_ordered_columns and input_col != output_col:
output_ordered_columns.remove(input_col)
return self.root.new(dfd).cols.select(output_ordered_columns)
| 35.695652
| 131
| 0.661186
|
from functools import reduce
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler
from optimus.engines.base.columns import BaseColumns
from optimus.helpers.columns import parse_columns, name_col
from optimus.helpers.constants import Actions
from optimus.helpers.raiseit import RaiseIt
class DataFrameBaseColumns(BaseColumns):
def __init__(self, df):
super(DataFrameBaseColumns, self).__init__(df)
@staticmethod
def exec_agg(exprs, compute=None):
return exprs
def qcut(self, columns, num_buckets, handle_invalid="skip"):
pass
@staticmethod
def correlation(input_cols, method="pearson", output="json"):
pass
@staticmethod
def scatter(columns, buckets=10):
pass
def standard_scaler(self, input_cols="*", output_cols=None):
df = self.root
def _standard_scaler(_value):
return StandardScaler().fit_transform(_value.values.reshape(-1, 1))
return df.cols.apply(input_cols, func=_standard_scaler, output_cols=output_cols, meta_action=Actions.STANDARD_SCALER.value)
def max_abs_scaler(self, input_cols="*", output_cols=None):
df = self.root
def _max_abs_scaler(_value):
return MaxAbsScaler().fit_transform(_value.values.reshape(-1, 1))
return df.cols.apply(input_cols, func=_max_abs_scaler, output_cols=output_cols,meta_action=Actions.MAX_ABS_SCALER.value )
def min_max_scaler(self, input_cols, output_cols=None):
df = self.root
def _min_max_scaler(_value):
return MinMaxScaler().fit_transform(_value.values.reshape(-1, 1))
return df.cols.apply(input_cols, func=_min_max_scaler, output_cols=output_cols, meta_action=Actions.MIN_MAX_SCALER.value )
def replace_regex(self, input_cols, regex=None, value="", output_cols=None):
df = self.root
def _replace_regex(_value, _regex, _replace):
return _value.replace(_regex, _replace, regex=True)
return df.cols.apply(input_cols, func=_replace_regex, args=(regex, value,), output_cols=output_cols,
filter_col_by_dtypes=df.constants.STRING_TYPES + df.constants.NUMERIC_TYPES)
def reverse(self, input_cols, output_cols=None):
def _reverse(value):
return str(value)[::-1]
df = self.root
return df.cols.apply(input_cols, _reverse, func_return_type=str,
filter_col_by_dtypes=df.constants.STRING_TYPES,
output_cols=output_cols, set_index=True)
@staticmethod
def astype(*args, **kwargs):
pass
@staticmethod
def apply_by_dtypes(columns, func, func_return_type, args=None, func_type=None, data_type=None):
pass
@staticmethod
def to_timestamp(input_cols, date_format=None, output_cols=None):
pass
def nest(self, input_cols, separator="", output_col=None, shape="string", drop=False):
df = self.root
dfd = df.data
if output_col is None:
output_col = name_col(input_cols)
input_cols = parse_columns(df, input_cols)
output_ordered_columns = df.cols.names()
if shape == "vector" or shape == "array":
raise NotImplementedError("Not implemented yet")
elif shape == "string":
dfds = [dfd[input_col].astype(str) for input_col in input_cols]
dfd = dfd.assign(**{output_col:reduce((lambda x, y: x + separator + y), dfds)})
if output_col not in output_ordered_columns:
col_index = output_ordered_columns.index(input_cols[-1]) + 1
output_ordered_columns[col_index:col_index] = [output_col]
if drop is True:
for input_col in input_cols:
if input_col in output_ordered_columns and input_col != output_col:
output_ordered_columns.remove(input_col)
return self.root.new(dfd).cols.select(output_ordered_columns)
| true
| true
|
f7160b89bbc0f0135dfed20cc0e5c8f6d06c5128
| 2,407
|
py
|
Python
|
arviz/wrappers/wrap_pystan.py
|
brandonwillard/arviz
|
1358a04cbb7759a6a15459a3d4e4f7259626484c
|
[
"Apache-2.0"
] | null | null | null |
arviz/wrappers/wrap_pystan.py
|
brandonwillard/arviz
|
1358a04cbb7759a6a15459a3d4e4f7259626484c
|
[
"Apache-2.0"
] | null | null | null |
arviz/wrappers/wrap_pystan.py
|
brandonwillard/arviz
|
1358a04cbb7759a6a15459a3d4e4f7259626484c
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=arguments-differ
"""Base class for PyStan wrappers."""
from ..data import from_pystan
from .base import SamplingWrapper
class PyStanSamplingWrapper(SamplingWrapper):
"""PyStan sampling wrapper base class.
See the documentation on :class:`~arviz.SamplingWrapper` for a more detailed
description. An example of ``PyStanSamplingWrapper`` usage can be found
in the :ref:`pystan_refitting` notebook.
Warnings
--------
Sampling wrappers are an experimental feature in a very early stage. Please use them
with caution.
"""
def sel_observations(self, idx):
"""Select a subset of the observations in idata_orig.
**Not implemented**: This method must be implemented on a model basis.
It is documented here to show its format and call signature.
Parameters
----------
idx
Indexes to separate from the rest of the observed data.
Returns
-------
modified_observed_data : dict
Dictionary containing both excluded and included data but properly divided
in the different keys. Passed to ``data`` argument of ``model.sampling``.
excluded_observed_data : str
Variable name containing the pointwise log likelihood data of the excluded
data. As PyStan cannot call C++ functions and log_likelihood__i is already
calculated *during* the simultion, instead of the value on which to evaluate
the likelihood, ``log_likelihood__i`` expects a string so it can extract the
corresponding data from the InferenceData object.
"""
raise NotImplementedError("sel_observations must be implemented on a model basis")
def sample(self, modified_observed_data):
"""Resample the PyStan model stored in self.model on modified_observed_data."""
fit = self.model.sampling(data=modified_observed_data, **self.sample_kwargs)
return fit
def get_inference_data(self, fit):
"""Convert the fit object returned by ``self.sample`` to InferenceData."""
idata = from_pystan(posterior=fit, **self.idata_kwargs)
return idata
def log_likelihood__i(self, excluded_obs_log_like, idata__i):
"""Retrieve the log likelihood of the excluded observations from ``idata__i``."""
return idata__i.log_likelihood[excluded_obs_log_like]
| 41.5
| 90
| 0.687993
|
from ..data import from_pystan
from .base import SamplingWrapper
class PyStanSamplingWrapper(SamplingWrapper):
def sel_observations(self, idx):
raise NotImplementedError("sel_observations must be implemented on a model basis")
def sample(self, modified_observed_data):
fit = self.model.sampling(data=modified_observed_data, **self.sample_kwargs)
return fit
def get_inference_data(self, fit):
idata = from_pystan(posterior=fit, **self.idata_kwargs)
return idata
def log_likelihood__i(self, excluded_obs_log_like, idata__i):
return idata__i.log_likelihood[excluded_obs_log_like]
| true
| true
|
f7160d0ab638df715f56f4ffaaf4cc3e1943ef2c
| 1,835
|
py
|
Python
|
project/server/auth/wrapper.py
|
RaihanSabique/Flask-Restful-JWT-Auth
|
a6be0cc72d4f697ac3cdfa41551de9633f6feb35
|
[
"MIT"
] | null | null | null |
project/server/auth/wrapper.py
|
RaihanSabique/Flask-Restful-JWT-Auth
|
a6be0cc72d4f697ac3cdfa41551de9633f6feb35
|
[
"MIT"
] | null | null | null |
project/server/auth/wrapper.py
|
RaihanSabique/Flask-Restful-JWT-Auth
|
a6be0cc72d4f697ac3cdfa41551de9633f6feb35
|
[
"MIT"
] | null | null | null |
import functools
from flask import Flask, request, make_response, jsonify
from flask_restful import Resource, Api, abort
from project.server.models import User
def login_required(method):
@functools.wraps(method)
def wrapper(self):
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
abort(400, message='Bearer token malformed.')
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
print(resp)
if not isinstance(resp, str):
user = User.query.filter_by(id=resp).first()
if(user.is_active):
return method(self, user)
abort(400, message='Provide a valid auth token.')
else:
abort(400, message='No auth token')
return wrapper
def admin_required(method):
@functools.wraps(method)
def wrapper(self):
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
abort(400, message='Bearer token malformed.')
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
print(resp)
if not isinstance(resp, str):
user = User.query.filter_by(id=resp).first()
if(user.admin):
return method(self, user)
else:
abort(400, message='Admin required.')
abort(400, message='Provide a valid auth token.')
else:
abort(400, message='No auth token')
return wrapper
| 35.288462
| 61
| 0.559128
|
import functools
from flask import Flask, request, make_response, jsonify
from flask_restful import Resource, Api, abort
from project.server.models import User
def login_required(method):
@functools.wraps(method)
def wrapper(self):
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
abort(400, message='Bearer token malformed.')
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
print(resp)
if not isinstance(resp, str):
user = User.query.filter_by(id=resp).first()
if(user.is_active):
return method(self, user)
abort(400, message='Provide a valid auth token.')
else:
abort(400, message='No auth token')
return wrapper
def admin_required(method):
@functools.wraps(method)
def wrapper(self):
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
abort(400, message='Bearer token malformed.')
else:
auth_token = ''
if auth_token:
resp = User.decode_auth_token(auth_token)
print(resp)
if not isinstance(resp, str):
user = User.query.filter_by(id=resp).first()
if(user.admin):
return method(self, user)
else:
abort(400, message='Admin required.')
abort(400, message='Provide a valid auth token.')
else:
abort(400, message='No auth token')
return wrapper
| true
| true
|
f7160d32694f94438915434613085cbed64d24f9
| 4,686
|
py
|
Python
|
setup.py
|
itsalexis962/pycroscopy
|
8a6557408ffdc332cef102616be16e26a396532f
|
[
"MIT"
] | 191
|
2016-06-19T18:34:40.000Z
|
2022-03-28T08:30:30.000Z
|
setup.py
|
itsalexis962/pycroscopy
|
8a6557408ffdc332cef102616be16e26a396532f
|
[
"MIT"
] | 115
|
2016-09-20T22:07:52.000Z
|
2022-03-04T20:41:57.000Z
|
setup.py
|
itsalexis962/pycroscopy
|
8a6557408ffdc332cef102616be16e26a396532f
|
[
"MIT"
] | 72
|
2016-09-20T10:19:22.000Z
|
2022-03-05T12:18:48.000Z
|
from codecs import open
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
long_description = f.read()
with open(os.path.join(here, 'pycroscopy/__version__.py')) as f:
__version__ = f.read().split("'")[1]
# TODO: Move requirements to requirements.txt
requirements = ['numpy>=1.13.0',
'scipy>=0.17.1',
'scikit-image>=0.12.3',
'scikit-learn>=0.17.1',
'matplotlib>=2.0.0',
'torch>=1.0.0',
'tensorly>=0.6.0',
'psutil',
'six',
'pillow',
'joblib>=0.11.0',
'ipywidgets>=5.2.2',
'ipython>=5.1.0,<6;python_version<"3.3"', # IPython 6.0+ does not support Python 2.6, 2.7, 3.0, 3.1, or 3.2
'ipython>=6.0;python_version>="3.3"', # Beginning with IPython 6.0, Python 3.3 and above is required.
'unittest2;python_version<"3.0"',
'sidpy>=0.0.1',
'pyUSID>=0.0.8',
]
setup(
name='pycroscopy',
version=__version__,
description='Python library for scientific analysis of microscopy data',
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Information Analysis'],
keywords=['EELS', 'STEM', 'TEM', 'XRD', 'AFM', 'SPM', 'STS', 'band excitation', 'BE', 'BEPS', 'Raman', 'NanoIR',
'ptychography', 'g-mode', 'general mode', 'electron microscopy', ' scanning probe', ' x-rays', 'probe',
'atomic force microscopy', 'SIMS', 'energy', 'spectroscopy', 'imaging', 'microscopy', 'spectra'
'characterization', 'spectrogram', 'hyperspectral', 'multidimensional', 'data format', 'universal',
'clustering', 'decomposition', 'curve fitting', 'data analysis PCA', ' SVD', ' NMF', ' DBSCAN', ' kMeans',
'machine learning', 'bayesian inference', 'fft filtering', 'signal processing', 'image cleaning',
'denoising', 'model', 'msa', 'quantification',
'png', 'tiff', 'hdf5', 'igor', 'ibw', 'dm3', 'oneview', 'KPFM', 'FORC', 'ndata',
'Asylum', 'MFP3D', 'Cypher', 'Omicron', 'Nion', 'Nanonis', 'FEI'],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
url='https://pycroscopy.github.io/pycroscopy/about.html',
license='MIT',
author='S. Somnath, C. R. Smith, N. Laanait',
author_email='pycroscopy@gmail.com',
install_requires=requirements,
setup_requires=['pytest-runner'],
tests_require=['pytest'],
platforms=['Linux', 'Mac OSX', 'Windows 10/8.1/8/7'],
# package_data={'sample':['dataset_1.dat']}
test_suite='pytest',
extras_require={
'legacy_guis': ['pyqt5;python_version>="3.5"',
'pyqtgraph>=0.10']},
# dependency='',
# dependency_links=[''],
include_package_data=True,
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| 43.388889
| 124
| 0.588988
|
from codecs import open
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
long_description = f.read()
with open(os.path.join(here, 'pycroscopy/__version__.py')) as f:
__version__ = f.read().split("'")[1]
# TODO: Move requirements to requirements.txt
requirements = ['numpy>=1.13.0',
'scipy>=0.17.1',
'scikit-image>=0.12.3',
'scikit-learn>=0.17.1',
'matplotlib>=2.0.0',
'torch>=1.0.0',
'tensorly>=0.6.0',
'psutil',
'six',
'pillow',
'joblib>=0.11.0',
'ipywidgets>=5.2.2',
'ipython>=5.1.0,<6;python_version<"3.3"', # IPython 6.0+ does not support Python 2.6, 2.7, 3.0, 3.1, or 3.2
'ipython>=6.0;python_version>="3.3"', # Beginning with IPython 6.0, Python 3.3 and above is required.
'unittest2;python_version<"3.0"',
'sidpy>=0.0.1',
'pyUSID>=0.0.8',
]
setup(
name='pycroscopy',
version=__version__,
description='Python library for scientific analysis of microscopy data',
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Information Analysis'],
keywords=['EELS', 'STEM', 'TEM', 'XRD', 'AFM', 'SPM', 'STS', 'band excitation', 'BE', 'BEPS', 'Raman', 'NanoIR',
'ptychography', 'g-mode', 'general mode', 'electron microscopy', ' scanning probe', ' x-rays', 'probe',
'atomic force microscopy', 'SIMS', 'energy', 'spectroscopy', 'imaging', 'microscopy', 'spectra'
'characterization', 'spectrogram', 'hyperspectral', 'multidimensional', 'data format', 'universal',
'clustering', 'decomposition', 'curve fitting', 'data analysis PCA', ' SVD', ' NMF', ' DBSCAN', ' kMeans',
'machine learning', 'bayesian inference', 'fft filtering', 'signal processing', 'image cleaning',
'denoising', 'model', 'msa', 'quantification',
'png', 'tiff', 'hdf5', 'igor', 'ibw', 'dm3', 'oneview', 'KPFM', 'FORC', 'ndata',
'Asylum', 'MFP3D', 'Cypher', 'Omicron', 'Nion', 'Nanonis', 'FEI'],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
url='https://pycroscopy.github.io/pycroscopy/about.html',
license='MIT',
author='S. Somnath, C. R. Smith, N. Laanait',
author_email='pycroscopy@gmail.com',
install_requires=requirements,
setup_requires=['pytest-runner'],
tests_require=['pytest'],
platforms=['Linux', 'Mac OSX', 'Windows 10/8.1/8/7'],
# package_data={'sample':['dataset_1.dat']}
test_suite='pytest',
extras_require={
'legacy_guis': ['pyqt5;python_version>="3.5"',
'pyqtgraph>=0.10']},
# dependency='',
# dependency_links=[''],
include_package_data=True,
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| true
| true
|
f7160db0d5fb20f368cc9ea3007c25dccdf69f7c
| 4,197
|
py
|
Python
|
tests/clvm/coin_store.py
|
Plotter-Network/plotter-blockchain
|
13d10557496f37b9a001786ff837bdf34d8f1bcb
|
[
"Apache-2.0"
] | 1
|
2021-07-10T12:50:30.000Z
|
2021-07-10T12:50:30.000Z
|
tests/clvm/coin_store.py
|
Plotter-Network/plotter-blockchain
|
13d10557496f37b9a001786ff837bdf34d8f1bcb
|
[
"Apache-2.0"
] | null | null | null |
tests/clvm/coin_store.py
|
Plotter-Network/plotter-blockchain
|
13d10557496f37b9a001786ff837bdf34d8f1bcb
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from dataclasses import dataclass, replace
from typing import Dict, Iterator, Set
from plotter.full_node.mempool_check_conditions import mempool_check_conditions_dict
from plotter.types.blockchain_format.coin import Coin
from plotter.types.blockchain_format.sized_bytes import bytes32
from plotter.types.coin_record import CoinRecord
from plotter.types.spend_bundle import SpendBundle
from plotter.util.condition_tools import (
conditions_dict_for_solution,
coin_announcement_names_for_conditions_dict,
puzzle_announcement_names_for_conditions_dict,
)
from plotter.util.ints import uint32, uint64
class BadSpendBundleError(Exception):
pass
@dataclass
class CoinTimestamp:
seconds: int
height: int
class CoinStore:
def __init__(self):
self._db: Dict[bytes32, CoinRecord] = dict()
self._ph_index = defaultdict(list)
def farm_coin(self, puzzle_hash: bytes32, birthday: CoinTimestamp, amount: int = 1024) -> Coin:
parent = birthday.height.to_bytes(32, "big")
coin = Coin(parent, puzzle_hash, uint64(amount))
self._add_coin_entry(coin, birthday)
return coin
def validate_spend_bundle(
self,
spend_bundle: SpendBundle,
now: CoinTimestamp,
max_cost: int,
) -> int:
# this should use blockchain consensus code
coin_announcements: Set[bytes32] = set()
puzzle_announcements: Set[bytes32] = set()
conditions_dicts = []
for coin_solution in spend_bundle.coin_solutions:
err, conditions_dict, cost = conditions_dict_for_solution(
coin_solution.puzzle_reveal, coin_solution.solution, max_cost
)
if conditions_dict is None:
raise BadSpendBundleError(f"clvm validation failure {err}")
conditions_dicts.append(conditions_dict)
coin_announcements.update(
coin_announcement_names_for_conditions_dict(conditions_dict, coin_solution.coin.name())
)
puzzle_announcements.update(
puzzle_announcement_names_for_conditions_dict(conditions_dict, coin_solution.coin.puzzle_hash)
)
for coin_solution, conditions_dict in zip(spend_bundle.coin_solutions, conditions_dicts):
prev_transaction_block_height = now.height
timestamp = now.seconds
coin_record = self._db[coin_solution.coin.name()]
err = mempool_check_conditions_dict(
coin_record,
coin_announcements,
puzzle_announcements,
conditions_dict,
uint32(prev_transaction_block_height),
uint64(timestamp),
)
if err is not None:
raise BadSpendBundleError(f"condition validation failure {err}")
return 0
def update_coin_store_for_spend_bundle(self, spend_bundle: SpendBundle, now: CoinTimestamp, max_cost: int):
err = self.validate_spend_bundle(spend_bundle, now, max_cost)
if err != 0:
raise BadSpendBundleError(f"validation failure {err}")
for spent_coin in spend_bundle.removals():
coin_name = spent_coin.name()
coin_record = self._db[coin_name]
self._db[coin_name] = replace(coin_record, spent_block_index=now.height, spent=True)
for new_coin in spend_bundle.additions():
self._add_coin_entry(new_coin, now)
def coins_for_puzzle_hash(self, puzzle_hash: bytes32) -> Iterator[Coin]:
for coin_name in self._ph_index[puzzle_hash]:
coin_entry = self._db[coin_name]
assert coin_entry.coin.puzzle_hash == puzzle_hash
yield coin_entry.coin
def all_coins(self) -> Iterator[Coin]:
for coin_entry in self._db.values():
yield coin_entry.coin
def _add_coin_entry(self, coin: Coin, birthday: CoinTimestamp) -> None:
name = coin.name()
assert name not in self._db
self._db[name] = CoinRecord(coin, uint32(birthday.height), uint32(0), False, False, uint64(birthday.seconds))
self._ph_index[coin.puzzle_hash].append(name)
| 38.861111
| 117
| 0.681677
|
from collections import defaultdict
from dataclasses import dataclass, replace
from typing import Dict, Iterator, Set
from plotter.full_node.mempool_check_conditions import mempool_check_conditions_dict
from plotter.types.blockchain_format.coin import Coin
from plotter.types.blockchain_format.sized_bytes import bytes32
from plotter.types.coin_record import CoinRecord
from plotter.types.spend_bundle import SpendBundle
from plotter.util.condition_tools import (
conditions_dict_for_solution,
coin_announcement_names_for_conditions_dict,
puzzle_announcement_names_for_conditions_dict,
)
from plotter.util.ints import uint32, uint64
class BadSpendBundleError(Exception):
pass
@dataclass
class CoinTimestamp:
seconds: int
height: int
class CoinStore:
def __init__(self):
self._db: Dict[bytes32, CoinRecord] = dict()
self._ph_index = defaultdict(list)
def farm_coin(self, puzzle_hash: bytes32, birthday: CoinTimestamp, amount: int = 1024) -> Coin:
parent = birthday.height.to_bytes(32, "big")
coin = Coin(parent, puzzle_hash, uint64(amount))
self._add_coin_entry(coin, birthday)
return coin
def validate_spend_bundle(
self,
spend_bundle: SpendBundle,
now: CoinTimestamp,
max_cost: int,
) -> int:
coin_announcements: Set[bytes32] = set()
puzzle_announcements: Set[bytes32] = set()
conditions_dicts = []
for coin_solution in spend_bundle.coin_solutions:
err, conditions_dict, cost = conditions_dict_for_solution(
coin_solution.puzzle_reveal, coin_solution.solution, max_cost
)
if conditions_dict is None:
raise BadSpendBundleError(f"clvm validation failure {err}")
conditions_dicts.append(conditions_dict)
coin_announcements.update(
coin_announcement_names_for_conditions_dict(conditions_dict, coin_solution.coin.name())
)
puzzle_announcements.update(
puzzle_announcement_names_for_conditions_dict(conditions_dict, coin_solution.coin.puzzle_hash)
)
for coin_solution, conditions_dict in zip(spend_bundle.coin_solutions, conditions_dicts):
prev_transaction_block_height = now.height
timestamp = now.seconds
coin_record = self._db[coin_solution.coin.name()]
err = mempool_check_conditions_dict(
coin_record,
coin_announcements,
puzzle_announcements,
conditions_dict,
uint32(prev_transaction_block_height),
uint64(timestamp),
)
if err is not None:
raise BadSpendBundleError(f"condition validation failure {err}")
return 0
def update_coin_store_for_spend_bundle(self, spend_bundle: SpendBundle, now: CoinTimestamp, max_cost: int):
err = self.validate_spend_bundle(spend_bundle, now, max_cost)
if err != 0:
raise BadSpendBundleError(f"validation failure {err}")
for spent_coin in spend_bundle.removals():
coin_name = spent_coin.name()
coin_record = self._db[coin_name]
self._db[coin_name] = replace(coin_record, spent_block_index=now.height, spent=True)
for new_coin in spend_bundle.additions():
self._add_coin_entry(new_coin, now)
def coins_for_puzzle_hash(self, puzzle_hash: bytes32) -> Iterator[Coin]:
for coin_name in self._ph_index[puzzle_hash]:
coin_entry = self._db[coin_name]
assert coin_entry.coin.puzzle_hash == puzzle_hash
yield coin_entry.coin
def all_coins(self) -> Iterator[Coin]:
for coin_entry in self._db.values():
yield coin_entry.coin
def _add_coin_entry(self, coin: Coin, birthday: CoinTimestamp) -> None:
name = coin.name()
assert name not in self._db
self._db[name] = CoinRecord(coin, uint32(birthday.height), uint32(0), False, False, uint64(birthday.seconds))
self._ph_index[coin.puzzle_hash].append(name)
| true
| true
|
f7160e0c8e9137a12f16d8b789254f485f26bc0b
| 598
|
py
|
Python
|
src/lib/datasets/dataset_factory.py
|
nerminsamet/HPRNet
|
a23e691102ed50bd24391e6295c74f452592cdae
|
[
"MIT"
] | 34
|
2021-06-09T16:47:59.000Z
|
2022-03-29T08:03:46.000Z
|
src/lib/datasets/dataset_factory.py
|
nerminsamet/HPRNet
|
a23e691102ed50bd24391e6295c74f452592cdae
|
[
"MIT"
] | 3
|
2021-12-14T11:47:06.000Z
|
2022-03-17T04:08:39.000Z
|
src/lib/datasets/dataset_factory.py
|
nerminsamet/HPRNet
|
a23e691102ed50bd24391e6295c74f452592cdae
|
[
"MIT"
] | 4
|
2021-06-10T07:44:15.000Z
|
2021-08-30T07:12:40.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .sample.multi_pose import MultiPoseDataset
from .sample.landmark import LandmarkDataset
from src.lib.datasets.dataset.coco_hp import COCOHP
from src.lib.datasets.dataset.coco_body import COCOBODY
dataset_factory = {
'coco_hp': COCOHP,
'coco_body': COCOBODY
}
_sample_factory = {
'multi_pose': MultiPoseDataset,
'landmark': LandmarkDataset,
}
def get_dataset(dataset, task):
class Dataset(dataset_factory[dataset], _sample_factory[task]):
pass
return Dataset
| 21.357143
| 65
| 0.792642
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .sample.multi_pose import MultiPoseDataset
from .sample.landmark import LandmarkDataset
from src.lib.datasets.dataset.coco_hp import COCOHP
from src.lib.datasets.dataset.coco_body import COCOBODY
dataset_factory = {
'coco_hp': COCOHP,
'coco_body': COCOBODY
}
_sample_factory = {
'multi_pose': MultiPoseDataset,
'landmark': LandmarkDataset,
}
def get_dataset(dataset, task):
class Dataset(dataset_factory[dataset], _sample_factory[task]):
pass
return Dataset
| true
| true
|
f7160e1009ab83e8020f0a7d0f081242b48b6c74
| 1,089
|
py
|
Python
|
users/migrations/0001_initial.py
|
pollitosabroson/retoglobal
|
456af32516935fb834c9f78359754614635e9910
|
[
"Apache-2.0"
] | null | null | null |
users/migrations/0001_initial.py
|
pollitosabroson/retoglobal
|
456af32516935fb834c9f78359754614635e9910
|
[
"Apache-2.0"
] | null | null | null |
users/migrations/0001_initial.py
|
pollitosabroson/retoglobal
|
456af32516935fb834c9f78359754614635e9910
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-10 13:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('genres', '0001_initial'),
('hobbies', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('genre', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genres.Genre')),
('hobbies', models.ManyToManyField(to='hobbies.Hobbie')),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Userss',
},
),
]
| 31.114286
| 114
| 0.56933
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('genres', '0001_initial'),
('hobbies', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('age', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('genre', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genres.Genre')),
('hobbies', models.ManyToManyField(to='hobbies.Hobbie')),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Userss',
},
),
]
| true
| true
|
f7160e675c92028bb3c1d073fe9fa38bb5bb5e4a
| 1,365
|
py
|
Python
|
graduation/test/test.py
|
zhangsh950618/graduation
|
9951c3a382e97ec802b6d34aabd4b70011ea83e6
|
[
"Apache-2.0"
] | null | null | null |
graduation/test/test.py
|
zhangsh950618/graduation
|
9951c3a382e97ec802b6d34aabd4b70011ea83e6
|
[
"Apache-2.0"
] | null | null | null |
graduation/test/test.py
|
zhangsh950618/graduation
|
9951c3a382e97ec802b6d34aabd4b70011ea83e6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
import datetime
import math
import numpy as np
a = "赞[123]"
a_re = re.compile("赞.*")
print a_re.findall(a)
a = "今天"
if a == "今天":
print "yes"
print ((datetime.datetime.now() - datetime.timedelta(minutes=2)).strftime("%Y-%m-%d %H:%M"))
a = u"评论[10]"
a_re = re.compile(u'^评论\[\d+\]$')
if a_re.match(a):
print "yes"
print re.match(u'今天', u'今天 10:01')
# unicode测试
f = u""
L = [u"你好", u"北京", u"天安门"]
for l in L:
f += l
print f
a = [1, 2, 3, 4]
# a = np.array(a)
sum = 0
for aa in a:
sum += aa ** 2
norm = math.sqrt(sum)
for i in range(len(a)):
a[i] /= norm
print a
b = [0, 1, 1, 1]
sum = 0
for bb in b:
sum += bb ** 2
norm = math.sqrt(sum)
for i in range(len(b)):
b[i] /= norm
print np.vdot(np.array(a), np.array(b))
def normalize(segs):
sum = 0
for seg, weight in segs:
sum += weight ** 2
norm = math.sqrt(sum)
for i in range(len(segs)):
segs[1] /= norm
segs = [[u"你好", 2.3], [u"你好帅", 3.4]]
for seg, weight in segs:
print seg, weight
def normalize(segs):
sum = 0
for seg, weight in segs:
sum += weight ** 2
norm = math.sqrt(sum)
for i in range(len(segs)):
segs[i] = list(segs[i])
print segs[i]
segs[i][1] /= norm
return segs
print segs
print normalize(segs)
for seg, weight in segs:
print seg, weight
| 17.278481
| 92
| 0.552381
|
import re
import datetime
import math
import numpy as np
a = "赞[123]"
a_re = re.compile("赞.*")
print a_re.findall(a)
a = "今天"
if a == "今天":
print "yes"
print ((datetime.datetime.now() - datetime.timedelta(minutes=2)).strftime("%Y-%m-%d %H:%M"))
a = u"评论[10]"
a_re = re.compile(u'^评论\[\d+\]$')
if a_re.match(a):
print "yes"
print re.match(u'今天', u'今天 10:01')
f = u""
L = [u"你好", u"北京", u"天安门"]
for l in L:
f += l
print f
a = [1, 2, 3, 4]
sum = 0
for aa in a:
sum += aa ** 2
norm = math.sqrt(sum)
for i in range(len(a)):
a[i] /= norm
print a
b = [0, 1, 1, 1]
sum = 0
for bb in b:
sum += bb ** 2
norm = math.sqrt(sum)
for i in range(len(b)):
b[i] /= norm
print np.vdot(np.array(a), np.array(b))
def normalize(segs):
sum = 0
for seg, weight in segs:
sum += weight ** 2
norm = math.sqrt(sum)
for i in range(len(segs)):
segs[1] /= norm
segs = [[u"你好", 2.3], [u"你好帅", 3.4]]
for seg, weight in segs:
print seg, weight
def normalize(segs):
sum = 0
for seg, weight in segs:
sum += weight ** 2
norm = math.sqrt(sum)
for i in range(len(segs)):
segs[i] = list(segs[i])
print segs[i]
segs[i][1] /= norm
return segs
print segs
print normalize(segs)
for seg, weight in segs:
print seg, weight
| false
| true
|
f7160f1c838fc7c07e3729784983c83446032a75
| 8,147
|
py
|
Python
|
mctsPlayer.py
|
dspub99/betazero
|
b1adf9885166e6fb4974952292653efeea1b19dc
|
[
"MIT"
] | 11
|
2018-11-23T10:48:00.000Z
|
2020-11-24T07:51:32.000Z
|
mctsPlayer.py
|
dspub99/betazero
|
b1adf9885166e6fb4974952292653efeea1b19dc
|
[
"MIT"
] | null | null | null |
mctsPlayer.py
|
dspub99/betazero
|
b1adf9885166e6fb4974952292653efeea1b19dc
|
[
"MIT"
] | 1
|
2018-11-25T15:43:41.000Z
|
2018-11-25T15:43:41.000Z
|
#!/usr/bin/env python
import numpy as np
from randomPlayer import RandomPlayer
import game
import play
# Run MCTS with MC to estimate the rest of the game.
# http://mcts.ai/about/index.html
# http://ccg.doc.gold.ac.uk/wp-content/uploads/2016/10/browne_tciaig12_1.pdf
class UCT:
def __init__(self, c):
self._c = c
def parts(self, pNode, node):
return (node.sum/node.n, 2*self._c*np.sqrt(2*np.log(pNode.n) / node.n))
def __call__(self, pNode, node):
if node.n == 0:
return np.inf
(exploit, explore) = self.parts( pNode, node )
return exploit + explore
class UCTNegamax:
def __init__(self, c):
self._uct = UCT(c)
def __call__(self, pNode, node):
if node.n == 0:
return np.inf
# pNode.chi gives us negamax
# Actually, our scores (like node.sum/node.n) are in [0,1] not [-1,1].
# So to change to the opponent's perspective, we might prefer
# scoreOpponent_A = 1 - score
# to
# scoreOpponent_B = -score
# Note that scoreOpponent_B = scoreOpponent_A - 1. This offset of -1 in exploit
# won't affect which node maximizes exploit + explore.
(exploit, explore) = self._uct.parts( pNode, node )
return pNode.chi*exploit + explore
class Node:
def __init__(self, nprand, ttt, chi, maxPlies, parent=None, move=None):
self._nprand = nprand
# each Node has a clone of ttt with the Node's game state
self.maxPlies = maxPlies
self.chi = chi
self.parent = parent
self.ttt = ttt
self.move = move
self.sum = 0
self.n = 0
self.children = []
self._needMoves = list(self.ttt.validMoves())
def dump(self):
n = 0
queue = [self]
while len(queue) > 0:
# queue[0].ttt.dump()
s = [str(n), " "*n]
newQueue = []
n += 1
for node in queue:
s.append("%d/%d(%d)" % (2*node.sum, 2*node.n, node.maxPlies))
newQueue.extend(node.children)
print (' '.join(s))
queue = newQueue
def check_parentage(self):
# Am I may children's parent?
for c in self.children:
assert(c.parent == self)
c.check_parentage()
def bestChild(self, uct):
assert(len(self.children)>0)
phis = []
for c in self.children:
# print ("CHILD:", uct(self, c))
phis.append(uct(self, c))
phis = np.array(phis)
i = self._nprand.choice(np.where(phis > phis.max() - 1e-6)[0])
return self.children[i]
def findBoard(self, ttt):
# exactly one ply ahead
for c in self.children:
if ttt.equivBoard(c.ttt.board()):
return c
return None
def select(self, uct):
# "Starting at the root node, a child selection policy is recursively applied to descend
# through the tree until the most urgent expandable node is reached. A node is expandable if
# it represents a nonterminal state and has unvisited (i.e. unexpanded) children"
if len(self._needMoves) > 0:
return self
if len(self.children)==0:
return None
return self.bestChild(uct).select(uct)
def expand(self):
# "One (or more) child nodes are added to expand the tree, according to the
# available actions."
assert( len(self._needMoves) > 0 )
if self.maxPlies==0:
# just run another sim from here
return self
m = self._nprand.choice(self._needMoves)
self._needMoves.remove(m)
ttt = self.ttt.clone()
ttt.add(m)
c = Node(self._nprand, ttt, -self.chi, self.maxPlies - 1, self, m.clone())
self.children.append(c)
return c
def backpropagate(self, score):
# "The simulation result is “backed up” (i.e. backpropagated)
# through the selected nodes to update their statistics."
self.n += 1
self.sum += score
if self.parent is not None:
self.parent.backpropagate(score)
def __str__(self):
return "sum = %.4f n = %d nChildren = %d self = %s parent = %s" % (self.sum, self.n, len(self.children), id(self), id(self.parent))
class MCTSPlayer:
def __init__(self, nPlay, maxPlies, bNegamax, cUct = 1/np.sqrt(2), bDump=False):
self._nPlay = nPlay
self._maxPlies = maxPlies
if bNegamax:
self._uct = UCTNegamax(cUct)
else:
self._uct = UCT(cUct)
self._cUct = cUct
self._bNegamax = bNegamax
self._bDump = bDump
self._uctMove = UCT(0)
self._rp = RandomPlayer()
self._nprand = np.random.RandomState()
self._root = None
def __str__(self):
return ("%s nPlay = %d maxPlies = %d bNegamax = %s cUct = %.4f" %
(self.__class__.__name__, self._nPlay, self._maxPlies,
self._bNegamax, self._cUct))
def _simulate(self, node):
# "A simulation is run from the new node(s) according to the
# default policy to produce an outcome."
return play.playRest(self._rp, self._rp, node.ttt.clone(), False, 99999)[0]
def setSeed(self, seed):
self._nprand.seed(seed)
self._rp.setSeed(seed+1)
def move(self, ttt):
if self._root is not None:
self._root = self._root.findBoard(ttt)
if self._root is None:
self._root = Node(self._nprand, ttt, 1, maxPlies=self._maxPlies)
marker = ttt.whoseTurn()
for _ in range(self._nPlay):
nodeLeaf = self._root.select(self._uct)
if nodeLeaf is not None:
nodeSim = nodeLeaf.expand()
if nodeSim is not None:
# print ("START:", nodeSim.maxPlies, nodeSim.move)
w = self._simulate(nodeSim)
if w == ttt.whoseTurn():
score = 1
elif w == game.Draw:
score = .5
else:
score = 0
# print ("SCORE:", marker, w, score)
nodeSim.backpropagate(score)
if self._bDump:
self._root.dump()
self._root = self._root.bestChild(self._uctMove)
return self._root.move
def tests(self):
self._root.check_parentage()
if __name__ == "__main__":
from ticTacToe import TicTacToe
from mmPlayer import MMPlayer
from mcPlayer import MCPlayer
nPlay = 100
maxPlies = 1000
bNegamax = True
cUct = 1/np.sqrt(2)
if True:
mcts = MCTSPlayer(nPlay = nPlay, maxPlies = maxPlies, bNegamax = bNegamax,
cUct = cUct, bDump=True)
mcts.setSeed(1)
mc10 = MCPlayer(nPlay=10)
mc10.setSeed(2)
play.play(TicTacToe, mcts, mc10, bShow = True)
else:
score = []
for _ in range(100):
mcts = MCTSPlayer(nPlay = nPlay, maxPlies = maxPlies, bNegamax = bNegamax,
cUct = cUct)
# mc10 vs. mc10 gives .79, fyi
# mcts100_mp=1_c=1e6 vs. mc 10 gives .82
# mcts100_mp=1_c=1/sqrt(2) vs. mc 10 gives .82
# mcts100_mp=1_c=0 vs. mc 10 gives .82
# mcts100_mp=2_c=0 vs. mc 10 gives .855
# mcts100_mp=3_c=0 vs. mc 10 gives .83
# mcts100_mp=3_c=1/sqrt(2) vs. mc 10 gives .86
# mcts100_mp=3_c=1/sqrt(2)_negamax vs. mc 10 gives .86
# mcts100_mp=1000_c=1/sqrt(2)_negamax vs. mc 10 gives .83
# mcts1000_mp=1000_c=1/sqrt(2)_negamax vs. mc 10 gives .94
# mcts1000_mp=1000_c=1/sqrt(2) vs. mc 10 gives .83
w = play.play(TicTacToe, MCPlayer(nPlay=100), mcts, bShow = False)
if w == 'X':
score.append(1)
elif w == 'D':
score.append(.5)
else:
score.append(0)
print (np.array(score).mean())
| 31.577519
| 139
| 0.551737
|
import numpy as np
from randomPlayer import RandomPlayer
import game
import play
class UCT:
def __init__(self, c):
self._c = c
def parts(self, pNode, node):
return (node.sum/node.n, 2*self._c*np.sqrt(2*np.log(pNode.n) / node.n))
def __call__(self, pNode, node):
if node.n == 0:
return np.inf
(exploit, explore) = self.parts( pNode, node )
return exploit + explore
class UCTNegamax:
def __init__(self, c):
self._uct = UCT(c)
def __call__(self, pNode, node):
if node.n == 0:
return np.inf
# scoreOpponent_A = 1 - score
# to
# scoreOpponent_B = -score
# Note that scoreOpponent_B = scoreOpponent_A - 1. This offset of -1 in exploit
# won't affect which node maximizes exploit + explore.
(exploit, explore) = self._uct.parts( pNode, node )
return pNode.chi*exploit + explore
class Node:
def __init__(self, nprand, ttt, chi, maxPlies, parent=None, move=None):
self._nprand = nprand
self.maxPlies = maxPlies
self.chi = chi
self.parent = parent
self.ttt = ttt
self.move = move
self.sum = 0
self.n = 0
self.children = []
self._needMoves = list(self.ttt.validMoves())
def dump(self):
n = 0
queue = [self]
while len(queue) > 0:
# queue[0].ttt.dump()
s = [str(n), " "*n]
newQueue = []
n += 1
for node in queue:
s.append("%d/%d(%d)" % (2*node.sum, 2*node.n, node.maxPlies))
newQueue.extend(node.children)
print (' '.join(s))
queue = newQueue
def check_parentage(self):
# Am I may children's parent?
for c in self.children:
assert(c.parent == self)
c.check_parentage()
def bestChild(self, uct):
assert(len(self.children)>0)
phis = []
for c in self.children:
phis.append(uct(self, c))
phis = np.array(phis)
i = self._nprand.choice(np.where(phis > phis.max() - 1e-6)[0])
return self.children[i]
def findBoard(self, ttt):
for c in self.children:
if ttt.equivBoard(c.ttt.board()):
return c
return None
def select(self, uct):
# through the tree until the most urgent expandable node is reached. A node is expandable if
# it represents a nonterminal state and has unvisited (i.e. unexpanded) children"
if len(self._needMoves) > 0:
return self
if len(self.children)==0:
return None
return self.bestChild(uct).select(uct)
def expand(self):
# available actions."
assert( len(self._needMoves) > 0 )
if self.maxPlies==0:
return self
m = self._nprand.choice(self._needMoves)
self._needMoves.remove(m)
ttt = self.ttt.clone()
ttt.add(m)
c = Node(self._nprand, ttt, -self.chi, self.maxPlies - 1, self, m.clone())
self.children.append(c)
return c
def backpropagate(self, score):
# through the selected nodes to update their statistics."
self.n += 1
self.sum += score
if self.parent is not None:
self.parent.backpropagate(score)
def __str__(self):
return "sum = %.4f n = %d nChildren = %d self = %s parent = %s" % (self.sum, self.n, len(self.children), id(self), id(self.parent))
class MCTSPlayer:
def __init__(self, nPlay, maxPlies, bNegamax, cUct = 1/np.sqrt(2), bDump=False):
self._nPlay = nPlay
self._maxPlies = maxPlies
if bNegamax:
self._uct = UCTNegamax(cUct)
else:
self._uct = UCT(cUct)
self._cUct = cUct
self._bNegamax = bNegamax
self._bDump = bDump
self._uctMove = UCT(0)
self._rp = RandomPlayer()
self._nprand = np.random.RandomState()
self._root = None
def __str__(self):
return ("%s nPlay = %d maxPlies = %d bNegamax = %s cUct = %.4f" %
(self.__class__.__name__, self._nPlay, self._maxPlies,
self._bNegamax, self._cUct))
def _simulate(self, node):
# default policy to produce an outcome."
return play.playRest(self._rp, self._rp, node.ttt.clone(), False, 99999)[0]
def setSeed(self, seed):
self._nprand.seed(seed)
self._rp.setSeed(seed+1)
def move(self, ttt):
if self._root is not None:
self._root = self._root.findBoard(ttt)
if self._root is None:
self._root = Node(self._nprand, ttt, 1, maxPlies=self._maxPlies)
marker = ttt.whoseTurn()
for _ in range(self._nPlay):
nodeLeaf = self._root.select(self._uct)
if nodeLeaf is not None:
nodeSim = nodeLeaf.expand()
if nodeSim is not None:
w = self._simulate(nodeSim)
if w == ttt.whoseTurn():
score = 1
elif w == game.Draw:
score = .5
else:
score = 0
nodeSim.backpropagate(score)
if self._bDump:
self._root.dump()
self._root = self._root.bestChild(self._uctMove)
return self._root.move
def tests(self):
self._root.check_parentage()
if __name__ == "__main__":
from ticTacToe import TicTacToe
from mmPlayer import MMPlayer
from mcPlayer import MCPlayer
nPlay = 100
maxPlies = 1000
bNegamax = True
cUct = 1/np.sqrt(2)
if True:
mcts = MCTSPlayer(nPlay = nPlay, maxPlies = maxPlies, bNegamax = bNegamax,
cUct = cUct, bDump=True)
mcts.setSeed(1)
mc10 = MCPlayer(nPlay=10)
mc10.setSeed(2)
play.play(TicTacToe, mcts, mc10, bShow = True)
else:
score = []
for _ in range(100):
mcts = MCTSPlayer(nPlay = nPlay, maxPlies = maxPlies, bNegamax = bNegamax,
cUct = cUct)
w = play.play(TicTacToe, MCPlayer(nPlay=100), mcts, bShow = False)
if w == 'X':
score.append(1)
elif w == 'D':
score.append(.5)
else:
score.append(0)
print (np.array(score).mean())
| true
| true
|
f7161020c4bf4dad2c0c0ebf7e4bb050b02a52e1
| 15,471
|
py
|
Python
|
tests/controllers/test_api_controller.py
|
Moesif/moesifapi-python
|
c1e8b0feab51fdd830154bf981a102c5162943ac
|
[
"Apache-2.0"
] | 5
|
2017-01-28T17:09:28.000Z
|
2020-03-10T19:59:31.000Z
|
tests/controllers/test_api_controller.py
|
Moesif/moesifapi-python
|
c1e8b0feab51fdd830154bf981a102c5162943ac
|
[
"Apache-2.0"
] | null | null | null |
tests/controllers/test_api_controller.py
|
Moesif/moesifapi-python
|
c1e8b0feab51fdd830154bf981a102c5162943ac
|
[
"Apache-2.0"
] | 1
|
2019-05-12T18:37:28.000Z
|
2019-05-12T18:37:28.000Z
|
# -*- coding: utf-8 -*-
"""
tests.controllers.test_api_controller
"""
import jsonpickle
from .controller_test_base import *
from moesifapi.models import *
from datetime import *
class ApiControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(ApiControllerTests, cls).setUpClass()
cls.controller = cls.api_client.api
# Add Single Event via Injestion API
def test_add_event(self):
# Parameters for the API call
req_headers = APIHelper.json_deserialize(""" {
"Host": "api.acmeinc.com",
"Accept": "*/*",
"Connection": "Keep-Alive",
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)",
"Content-Type": "application/json",
"Content-Length": "126",
"Accept-Encoding": "gzip"
} """)
req_body = APIHelper.json_deserialize( """{
"items": [
{
"type": 1,
"id": "fwfrf"
},
{
"type": 2,
"id": "d43d3f"
}
]
}""")
rsp_headers = APIHelper.json_deserialize(""" {
"Date": "Tue, 20 Aug 2019 23:46:49 GMT",
"Vary": "Accept-Encoding",
"Pragma": "no-cache",
"Expires": "-1",
"Content-Type": "application/json; charset=utf-8",
"Cache-Control": "no-cache"
} """)
rsp_body = APIHelper.json_deserialize( """{
"Error": "InvalidArgumentException",
"Message": "Missing field field_a"
}""")
metadata = APIHelper.json_deserialize("""{
"field1": "foo",
"field2": "bar"
}""")
event_req = EventRequestModel(time = datetime.utcnow() - timedelta(seconds=1),
uri = "https://api.acmeinc.com/items/reviews?&page=0&page_size=12®ion[]=Overig&sort=relevance",
verb = "PATCH",
api_version = "1.1.0",
ip_address = "61.48.220.123",
headers = req_headers,
body = req_body)
event_rsp = EventResponseModel(time = datetime.utcnow(),
status = 200,
headers = rsp_headers,
body = rsp_body)
event_model = EventModel(request = event_req,
response = event_rsp,
user_id = "my_user_id",
company_id = "my_company_id",
session_token = "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata = metadata)
# Perform the API call through the SDK function
self.controller.create_event(event_model)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Add Batched Events via Ingestion API
def test_add_batched_events(self):
# Parameters for the API call
body = APIHelper.json_deserialize('[{ "metadata": { "foo" : "bar" }, "request": { "time": "2016-09-09T04:45:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:45:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:46:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:46:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:47:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:47:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:48:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:48:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "exfzweachxjgznvKUYrxFcxv]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:49:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:49:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:50:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:50:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "recvreedfef", "session_token": "xcvkrjmcfghwuignrmcmhxdhaaezse4w]s98y18cx98q3yhwmnhcfx43f" } ]', EventModel.from_dictionary)
for val in body:
val.request.time = datetime.utcnow() - timedelta(seconds=1)
val.response.time = datetime.utcnow()
# Perform the API call through the SDK function
self.controller.create_events_batch(body)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Update Single User via Injestion API
def test_update_user(self):
# Parameters for the API call
metadata = APIHelper.json_deserialize(""" {
"email": "pythonapiuser@email.com",
"name": "pythonapiuser",
"custom": "testdata"
} """)
user_model = UserModel(
user_id="12345",
company_id="67890",
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
modified_time=datetime.utcnow(),
metadata=metadata,
campaign=CampaignModel(utm_source="Newsletter", utm_medium="Email"))
# Perform the API call through the SDK function
self.controller.update_user(user_model)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Update Batched Users via Ingestion API
def test_update_users_batch(self):
# Parameter for the API call
body = [UserModel(user_id="1234", company_id="6789", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f", ),
UserModel(user_id="12345", company_id="67890", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata=APIHelper.json_deserialize(""" {"email": "pythonapiuser@email.com",
"name": "pythonapiuser", "string_field": "value_1", "number_field": 0 } """))]
# Perform the API call through the SDK function
self.controller.update_users_batch(body)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Get Application configuration
def test_get_app_config(self):
# Perform the API call through the SDK function
response = self.controller.get_app_config().__dict__
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 200)
self.assertIsNotNone(response["raw_body"])
self.assertIsNotNone(response["headers"]["X-Moesif-Config-ETag"])
# Add Single company via Injestion API
def test_update_company(self):
# Parameter for the API call
company_model = CompanyModel(
company_id="67890",
modified_time=datetime.utcnow(),
campaign=CampaignModel(utm_source="Adwords", utm_medium="Twitter"))
# Perform the API call through the SDK function
self.controller.update_company(company_model)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
# Add Batched Companies via Ingestion API
def test_update_companies_batch(self):
# Parameter for the API call
body = [CompanyModel(company_id="67890", modified_time=datetime.utcnow(), company_domain="moesif"),
CompanyModel(company_id="6789", modified_time=datetime.utcnow(), company_domain="moesif",
metadata=APIHelper.json_deserialize(""" {"string_field": "value_1", "number_field": 0 } """))]
# Perform the API call through the SDK function
self.controller.update_companies_batch(body)
# Test Response code
self.assertEquals(self.response_catcher.response.status_code, 201)
| 82.73262
| 8,873
| 0.548316
|
import jsonpickle
from .controller_test_base import *
from moesifapi.models import *
from datetime import *
class ApiControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(ApiControllerTests, cls).setUpClass()
cls.controller = cls.api_client.api
def test_add_event(self):
req_headers = APIHelper.json_deserialize(""" {
"Host": "api.acmeinc.com",
"Accept": "*/*",
"Connection": "Keep-Alive",
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)",
"Content-Type": "application/json",
"Content-Length": "126",
"Accept-Encoding": "gzip"
} """)
req_body = APIHelper.json_deserialize( """{
"items": [
{
"type": 1,
"id": "fwfrf"
},
{
"type": 2,
"id": "d43d3f"
}
]
}""")
rsp_headers = APIHelper.json_deserialize(""" {
"Date": "Tue, 20 Aug 2019 23:46:49 GMT",
"Vary": "Accept-Encoding",
"Pragma": "no-cache",
"Expires": "-1",
"Content-Type": "application/json; charset=utf-8",
"Cache-Control": "no-cache"
} """)
rsp_body = APIHelper.json_deserialize( """{
"Error": "InvalidArgumentException",
"Message": "Missing field field_a"
}""")
metadata = APIHelper.json_deserialize("""{
"field1": "foo",
"field2": "bar"
}""")
event_req = EventRequestModel(time = datetime.utcnow() - timedelta(seconds=1),
uri = "https://api.acmeinc.com/items/reviews?&page=0&page_size=12®ion[]=Overig&sort=relevance",
verb = "PATCH",
api_version = "1.1.0",
ip_address = "61.48.220.123",
headers = req_headers,
body = req_body)
event_rsp = EventResponseModel(time = datetime.utcnow(),
status = 200,
headers = rsp_headers,
body = rsp_body)
event_model = EventModel(request = event_req,
response = event_rsp,
user_id = "my_user_id",
company_id = "my_company_id",
session_token = "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata = metadata)
self.controller.create_event(event_model)
self.assertEquals(self.response_catcher.response.status_code, 201)
def test_add_batched_events(self):
body = APIHelper.json_deserialize('[{ "metadata": { "foo" : "bar" }, "request": { "time": "2016-09-09T04:45:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:45:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:46:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:46:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:47:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:47:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:48:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:48:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "exfzweachxjgznvKUYrxFcxv]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:49:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:49:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "mndug437f43", "session_token": "23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f" }, { "request": { "time": "2016-09-09T04:50:42.914", "uri": "https://api.acmeinc.com/items/reviews/", "verb": "PATCH", "api_version": "1.1.0", "ip_address": "61.48.220.123", "headers": { "Host": "api.acmeinc.com", "Accept": "*/*", "Connection": "Keep-Alive", "User-Agent": "Dalvik/2.1.0 (Linux; U; Android 5.0.2; C6906 Build/14.5.A.0.242)", "Content-Type": "application/json", "Content-Length": "126", "Accept-Encoding": "gzip" }, "body": { "items": [ { "direction_type": 1, "discovery_id": "fwfrf", "liked": false }, { "direction_type": 2, "discovery_id": "d43d3f", "liked": true } ] } }, "response": { "time": "2016-09-09T04:50:42.914", "status": 500, "headers": { "Date": "Tue, 23 Aug 2016 23:46:49 GMT", "Vary": "Accept-Encoding", "Pragma": "no-cache", "Expires": "-1", "Content-Type": "application/json; charset=utf-8", "X-Powered-By": "ARR/3.0", "Cache-Control": "no-cache", "Arr-Disable-Session-Affinity": "true" }, "body": { "Error": "InvalidArgumentException", "Message": "Missing field field_a" } }, "user_id": "recvreedfef", "session_token": "xcvkrjmcfghwuignrmcmhxdhaaezse4w]s98y18cx98q3yhwmnhcfx43f" } ]', EventModel.from_dictionary)
for val in body:
val.request.time = datetime.utcnow() - timedelta(seconds=1)
val.response.time = datetime.utcnow()
self.controller.create_events_batch(body)
self.assertEquals(self.response_catcher.response.status_code, 201)
def test_update_user(self):
metadata = APIHelper.json_deserialize(""" {
"email": "pythonapiuser@email.com",
"name": "pythonapiuser",
"custom": "testdata"
} """)
user_model = UserModel(
user_id="12345",
company_id="67890",
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
modified_time=datetime.utcnow(),
metadata=metadata,
campaign=CampaignModel(utm_source="Newsletter", utm_medium="Email"))
self.controller.update_user(user_model)
self.assertEquals(self.response_catcher.response.status_code, 201)
def test_update_users_batch(self):
body = [UserModel(user_id="1234", company_id="6789", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f", ),
UserModel(user_id="12345", company_id="67890", modified_time=datetime.utcnow(),
session_token="23jdf0owekfmcn4u3qypxg09w4d8ayrcdx8nu2ng]s98y18cx98q3yhwmnhcfx43f",
metadata=APIHelper.json_deserialize(""" {"email": "pythonapiuser@email.com",
"name": "pythonapiuser", "string_field": "value_1", "number_field": 0 } """))]
self.controller.update_users_batch(body)
self.assertEquals(self.response_catcher.response.status_code, 201)
def test_get_app_config(self):
response = self.controller.get_app_config().__dict__
self.assertEquals(self.response_catcher.response.status_code, 200)
self.assertIsNotNone(response["raw_body"])
self.assertIsNotNone(response["headers"]["X-Moesif-Config-ETag"])
def test_update_company(self):
company_model = CompanyModel(
company_id="67890",
modified_time=datetime.utcnow(),
campaign=CampaignModel(utm_source="Adwords", utm_medium="Twitter"))
self.controller.update_company(company_model)
self.assertEquals(self.response_catcher.response.status_code, 201)
def test_update_companies_batch(self):
body = [CompanyModel(company_id="67890", modified_time=datetime.utcnow(), company_domain="moesif"),
CompanyModel(company_id="6789", modified_time=datetime.utcnow(), company_domain="moesif",
metadata=APIHelper.json_deserialize(""" {"string_field": "value_1", "number_field": 0 } """))]
self.controller.update_companies_batch(body)
self.assertEquals(self.response_catcher.response.status_code, 201)
| true
| true
|
f716105610f9aba80608e6aac525ea5bef34d12c
| 2,465
|
py
|
Python
|
lib/ruleset_apply.py
|
brennonyork/budget-buddy
|
f64dc5ab5248794f101cc704e3754b2882f1d3c3
|
[
"MIT"
] | null | null | null |
lib/ruleset_apply.py
|
brennonyork/budget-buddy
|
f64dc5ab5248794f101cc704e3754b2882f1d3c3
|
[
"MIT"
] | null | null | null |
lib/ruleset_apply.py
|
brennonyork/budget-buddy
|
f64dc5ab5248794f101cc704e3754b2882f1d3c3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Arg1 - ruleset file that contains all rulesets
# Arg2 - cleaned, sorted single file with all transactions
#
# Transforms a given file - Arg2 - into the column set form below
# with the rulesets written and applied for the given financial
# source - Arg1
import re
import sys
if len(sys.argv) < 3:
print("ERROR: need to supply a ruleset file and transaction file")
exit()
ruleset_file = sys.argv[1]
merge_file = sys.argv[2]
incl_history = None
# if extra arg passed then we include the historical transaction before
# we change it w the ruleset regexs
if len(sys.argv) == 4:
incl_history = sys.argv[3]
rule_map = []
with open(ruleset_file, 'r') as rules:
for rule in rules:
# if its only a newline we skip it or if the line starts with a '#' character then skip the line
if rule == "\n" or rule[0] == '#':
continue
else:
# else split by a '#' if it exists and take everything before it
category, regex = map(lambda x: x.strip(), rule.split('#')[0].split(','))
rule_map.append([category, regex])
with open(merge_file, 'r') as transactions:
for transaction in transactions:
d, m, c, p = map(lambda x: x.strip(), transaction.split(',', 4))
regex_matches = list(map(lambda x: re.search(x, m),
map(lambda y: y[1],
rule_map)))
if any(regex_matches):
# find longest match by taking the second element from the
# `span` regex method thus returning the length of the match as
# well as the index
longest_match = max([[i, j.span()[1]-j.span()[0]] for i, j in enumerate(regex_matches) if j],
key=lambda x: x[1])
# pull the new category by taking the index from the longest
# match, looking up that index in the rule_map, and then taking
# the first element from that list (ie the category, not the
# regex assigned to that category label)
new_category = rule_map[longest_match[0]][0]
if incl_history:
if not(c): c = "Empty"
sys.stdout.write(d+','+m+','+new_category+','+p+','+c+'\n')
else:
sys.stdout.write(d+','+m+','+new_category+','+p+'\n')
else:
sys.stdout.write(d+','+m+','+c+','+p+'\n')
| 37.923077
| 105
| 0.574037
|
import re
import sys
if len(sys.argv) < 3:
print("ERROR: need to supply a ruleset file and transaction file")
exit()
ruleset_file = sys.argv[1]
merge_file = sys.argv[2]
incl_history = None
if len(sys.argv) == 4:
incl_history = sys.argv[3]
rule_map = []
with open(ruleset_file, 'r') as rules:
for rule in rules:
if rule == "\n" or rule[0] == '#':
continue
else:
category, regex = map(lambda x: x.strip(), rule.split('#')[0].split(','))
rule_map.append([category, regex])
with open(merge_file, 'r') as transactions:
for transaction in transactions:
d, m, c, p = map(lambda x: x.strip(), transaction.split(',', 4))
regex_matches = list(map(lambda x: re.search(x, m),
map(lambda y: y[1],
rule_map)))
if any(regex_matches):
longest_match = max([[i, j.span()[1]-j.span()[0]] for i, j in enumerate(regex_matches) if j],
key=lambda x: x[1])
new_category = rule_map[longest_match[0]][0]
if incl_history:
if not(c): c = "Empty"
sys.stdout.write(d+','+m+','+new_category+','+p+','+c+'\n')
else:
sys.stdout.write(d+','+m+','+new_category+','+p+'\n')
else:
sys.stdout.write(d+','+m+','+c+','+p+'\n')
| true
| true
|
f71610eccde5fef5a72814ac19d392b5c15c9201
| 7,703
|
py
|
Python
|
src/sage/groups/matrix_gps/unitary.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | 5
|
2015-01-04T07:15:06.000Z
|
2022-03-04T15:15:18.000Z
|
src/sage/groups/matrix_gps/unitary.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | null | null | null |
src/sage/groups/matrix_gps/unitary.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | 10
|
2016-09-28T13:12:40.000Z
|
2022-02-12T09:28:34.000Z
|
r"""
Unitary Groups `GU(n,q)` and `SU(n,q)`
These are `n \times n` unitary matrices with entries in
`GF(q^2)`.
EXAMPLES::
sage: G = SU(3,5)
sage: G.order()
378000
sage: G
Special Unitary Group of degree 3 over Finite Field in a of size 5^2
sage: G.gens()
(
[ a 0 0] [4*a 4 1]
[ 0 2*a + 2 0] [ 4 4 0]
[ 0 0 3*a], [ 1 0 0]
)
sage: G.base_ring()
Finite Field in a of size 5^2
AUTHORS:
- David Joyner (2006-03): initial version, modified from
special_linear (by W. Stein)
- David Joyner (2006-05): minor additions (examples, _latex_, __str__,
gens)
- William Stein (2006-12): rewrite
- Volker Braun (2013-1) port to new Parent, libGAP, extreme refactoring.
"""
#*********************************************************************************
# Copyright (C) 2006 David Joyner and William Stein
# Copyright (C) 2013 Volker Braun <vbraun.name@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*********************************************************************************
from sage.rings.all import ZZ, is_FiniteField, GF
from sage.misc.latex import latex
from sage.groups.matrix_gps.named_group import (
normalize_args_vectorspace, NamedMatrixGroup_generic, NamedMatrixGroup_gap )
def finite_field_sqrt(ring):
"""
Helper function.
INPUT:
A ring.
OUTPUT:
Integer q such that ``ring`` is the finite field with `q^2` elements.
EXAMPLES::
sage: from sage.groups.matrix_gps.unitary import finite_field_sqrt
sage: finite_field_sqrt(GF(4, 'a'))
2
"""
if not is_FiniteField(ring):
raise ValueError('not a finite field')
q, rem = ring.cardinality().sqrtrem()
if rem != 0:
raise ValueError('cardinatity not a square')
return q
###############################################################################
# General Unitary Group
###############################################################################
def GU(n, R, var='a'):
r"""
Return the general unitary group.
The general unitary group `GU( d, R )` consists of all `d \times
d` matrices that preserve a nondegenerate sequilinear form over
the ring `R`.
.. note::
For a finite field the matrices that preserve a sesquilinear
form over `F_q` live over `F_{q^2}`. So ``GU(n,q)`` for
integer ``q`` constructs the matrix group over the base ring
``GF(q^2)``.
.. note::
This group is also available via ``groups.matrix.GU()``.
INPUT:
- ``n`` -- a positive integer.
- ``R`` -- ring or an integer. If an integer is specified, the
corresponding finite field is used.
- ``var`` -- variable used to represent generator of the finite
field, if needed.
OUTPUT:
Return the general unitary group.
EXAMPLES::
sage: G = GU(3, 7); G
General Unitary Group of degree 3 over Finite Field in a of size 7^2
sage: G.gens()
(
[ a 0 0] [6*a 6 1]
[ 0 1 0] [ 6 6 0]
[ 0 0 5*a], [ 1 0 0]
)
sage: GU(2,QQ)
General Unitary Group of degree 2 over Rational Field
sage: G = GU(3, 5, var='beta')
sage: G.base_ring()
Finite Field in beta of size 5^2
sage: G.gens()
(
[ beta 0 0] [4*beta 4 1]
[ 0 1 0] [ 4 4 0]
[ 0 0 3*beta], [ 1 0 0]
)
TESTS::
sage: groups.matrix.GU(2, 3)
General Unitary Group of degree 2 over Finite Field in a of size 3^2
"""
degree, ring = normalize_args_vectorspace(n, R, var=var)
if is_FiniteField(ring):
q = ring.cardinality()
ring = GF(q ** 2, name=var)
name = 'General Unitary Group of degree {0} over {1}'.format(degree, ring)
ltx = r'\text{{GU}}_{{{0}}}({1})'.format(degree, latex(ring))
if is_FiniteField(ring):
cmd = 'GU({0}, {1})'.format(degree, q)
return UnitaryMatrixGroup_gap(degree, ring, False, name, ltx, cmd)
else:
return UnitaryMatrixGroup_generic(degree, ring, False, name, ltx)
###############################################################################
# Special Unitary Group
###############################################################################
def SU(n, R, var='a'):
"""
The special unitary group `SU( d, R )` consists of all `d \times d`
matrices that preserve a nondegenerate sequilinear form over the
ring `R` and have determinant one.
.. note::
For a finite field the matrices that preserve a sesquilinear
form over `F_q` live over `F_{q^2}`. So ``SU(n,q)`` for
integer ``q`` constructs the matrix group over the base ring
``GF(q^2)``.
.. note::
This group is also available via ``groups.matrix.SU()``.
INPUT:
- ``n`` -- a positive integer.
- ``R`` -- ring or an integer. If an integer is specified, the
corresponding finite field is used.
- ``var`` -- variable used to represent generator of the finite
field, if needed.
OUTPUT:
Return the special unitary group.
EXAMPLES::
sage: SU(3,5)
Special Unitary Group of degree 3 over Finite Field in a of size 5^2
sage: SU(3, GF(5))
Special Unitary Group of degree 3 over Finite Field in a of size 5^2
sage: SU(3,QQ)
Special Unitary Group of degree 3 over Rational Field
TESTS::
sage: groups.matrix.SU(2, 3)
Special Unitary Group of degree 2 over Finite Field in a of size 3^2
"""
degree, ring = normalize_args_vectorspace(n, R, var=var)
if is_FiniteField(ring):
q = ring.cardinality()
ring = GF(q ** 2, name=var)
name = 'Special Unitary Group of degree {0} over {1}'.format(degree, ring)
ltx = r'\text{{SU}}_{{{0}}}({1})'.format(degree, latex(ring))
if is_FiniteField(ring):
cmd = 'SU({0}, {1})'.format(degree, q)
return UnitaryMatrixGroup_gap(degree, ring, True, name, ltx, cmd)
else:
return UnitaryMatrixGroup_generic(degree, ring, True, name, ltx)
########################################################################
# Unitary Group class
########################################################################
class UnitaryMatrixGroup_generic(NamedMatrixGroup_generic):
r"""
General Unitary Group over arbitrary rings.
EXAMPLES::
sage: G = GU(3, GF(7)); G
General Unitary Group of degree 3 over Finite Field in a of size 7^2
sage: latex(G)
\text{GU}_{3}(\Bold{F}_{7^{2}})
sage: G = SU(3, GF(5)); G
Special Unitary Group of degree 3 over Finite Field in a of size 5^2
sage: latex(G)
\text{SU}_{3}(\Bold{F}_{5^{2}})
"""
def _check_matrix(self, x, *args):
"""a
Check whether the matrix ``x`` is unitary.
See :meth:`~sage.groups.matrix_gps.matrix_group._check_matrix`
for details.
EXAMPLES::
sage: G = GU(2, GF(5))
sage: G._check_matrix(G.an_element().matrix())
sage: G = SU(2, GF(5))
sage: G._check_matrix(G.an_element().matrix())
"""
if self._special and x.determinant() != 1:
raise TypeError('matrix must have determinant one')
if not x.is_unitary():
raise TypeError('matrix must be unitary')
class UnitaryMatrixGroup_gap(UnitaryMatrixGroup_generic, NamedMatrixGroup_gap):
pass
| 29.288973
| 82
| 0.537583
|
from sage.rings.all import ZZ, is_FiniteField, GF
from sage.misc.latex import latex
from sage.groups.matrix_gps.named_group import (
normalize_args_vectorspace, NamedMatrixGroup_generic, NamedMatrixGroup_gap )
def finite_field_sqrt(ring):
if not is_FiniteField(ring):
raise ValueError('not a finite field')
q, rem = ring.cardinality().sqrtrem()
if rem != 0:
raise ValueError('cardinatity not a square')
return q
| true
| true
|
f71611444874d1fdc566b5e40bd2782abdfab6c2
| 3,694
|
py
|
Python
|
more_one_memo/slack/model/response.py
|
nonylene/more-one-memo
|
2c1007bb0bbafe47cba1ac63f237cd4aa66c3374
|
[
"MIT"
] | 1
|
2018-06-07T01:20:42.000Z
|
2018-06-07T01:20:42.000Z
|
more_one_memo/slack/model/response.py
|
nonylene/more-one-memo
|
2c1007bb0bbafe47cba1ac63f237cd4aa66c3374
|
[
"MIT"
] | 5
|
2021-06-02T00:13:17.000Z
|
2022-02-26T23:38:56.000Z
|
more_one_memo/slack/model/response.py
|
nonylene/more-one-memo
|
2c1007bb0bbafe47cba1ac63f237cd4aa66c3374
|
[
"MIT"
] | null | null | null |
from typing import List, Optional
from dataclasses import dataclass
UserID = str
BotID = str
ChannelID = str
@dataclass
class Channel:
"""
https://api.slack.com/types/channel
"""
id: ChannelID
name: str
is_archived: bool
is_member: bool
@staticmethod
def from_json(json: dict):
return Channel(json['id'], json['name'], json['is_archived'], json['is_member'])
@dataclass
class User:
"""
https://api.slack.com/types/user
"""
@dataclass
class Profile:
image_72: Optional[str]
image_192: Optional[str]
def get_image(self) -> Optional[str]:
if self.image_192 is not None:
return self.image_192
if self.image_72 is not None:
return self.image_72
return None
@staticmethod
def from_json(json: dict):
return User.Profile(json['image_72'], json['image_192'])
id: UserID
name: str
profile: Profile
@staticmethod
def from_json(json: dict):
return User(json['id'], json['name'], User.Profile.from_json(json['profile']))
@dataclass
class Conversations:
# https://api.slack.com/methods/conversations.list
@dataclass
class ResponseMetadata:
next_cursor: Optional[str]
@staticmethod
def from_json(json: dict):
return Conversations.ResponseMetadata(json.get('next_cursor'))
channels: List[Channel] # Regard Conversation as Channel
response_metadata: ResponseMetadata
@staticmethod
def from_json(json: dict):
return Conversations(
[Channel.from_json(obj) for obj in json['channels']],
Conversations.ResponseMetadata.from_json(json['response_metadata'])
)
@dataclass
class Users:
# https://api.slack.com/methods/users.list
@dataclass
class ResponseMetadata:
next_cursor: Optional[str]
@staticmethod
def from_json(json: dict):
return Users.ResponseMetadata(json.get('next_cursor'))
members: List[User]
response_metadata: ResponseMetadata
@staticmethod
def from_json(json: dict):
return Users(
[User.from_json(obj) for obj in json['members']],
Users.ResponseMetadata.from_json(json['response_metadata'])
)
@dataclass
class RtmStart:
# https://api.slack.com/methods/rtm.start
@dataclass
class Self:
id: UserID
@dataclass
class Prefs:
muted_channels: List[str]
@staticmethod
def from_json(json: dict):
return RtmStart.Self.Prefs(json['muted_channels'])
prefs: Prefs
@staticmethod
def from_json(json: dict):
return RtmStart.Self(
json['id'],
RtmStart.Self.Prefs.from_json(json['prefs'])
)
@dataclass
class Team:
domain: str
@staticmethod
def from_json(json: dict):
return RtmStart.Team(json['domain'])
url: str
self_: Self
team: Team
users: List[User]
channels: List[Channel]
@staticmethod
def from_json(json: dict):
return RtmStart(
json['url'],
RtmStart.Self.from_json(json['self']),
RtmStart.Team.from_json(json['team']),
[User.from_json(user) for user in json['users']],
[Channel.from_json(channel) for channel in json['channels']],
)
@dataclass
class RtmConnect:
# https://api.slack.com/methods/rtm.connect
url: str
@staticmethod
def from_json(json: dict):
return RtmConnect(
json['url'],
)
| 22.387879
| 88
| 0.600433
|
from typing import List, Optional
from dataclasses import dataclass
UserID = str
BotID = str
ChannelID = str
@dataclass
class Channel:
id: ChannelID
name: str
is_archived: bool
is_member: bool
@staticmethod
def from_json(json: dict):
return Channel(json['id'], json['name'], json['is_archived'], json['is_member'])
@dataclass
class User:
@dataclass
class Profile:
image_72: Optional[str]
image_192: Optional[str]
def get_image(self) -> Optional[str]:
if self.image_192 is not None:
return self.image_192
if self.image_72 is not None:
return self.image_72
return None
@staticmethod
def from_json(json: dict):
return User.Profile(json['image_72'], json['image_192'])
id: UserID
name: str
profile: Profile
@staticmethod
def from_json(json: dict):
return User(json['id'], json['name'], User.Profile.from_json(json['profile']))
@dataclass
class Conversations:
@dataclass
class ResponseMetadata:
next_cursor: Optional[str]
@staticmethod
def from_json(json: dict):
return Conversations.ResponseMetadata(json.get('next_cursor'))
channels: List[Channel]
response_metadata: ResponseMetadata
@staticmethod
def from_json(json: dict):
return Conversations(
[Channel.from_json(obj) for obj in json['channels']],
Conversations.ResponseMetadata.from_json(json['response_metadata'])
)
@dataclass
class Users:
@dataclass
class ResponseMetadata:
next_cursor: Optional[str]
@staticmethod
def from_json(json: dict):
return Users.ResponseMetadata(json.get('next_cursor'))
members: List[User]
response_metadata: ResponseMetadata
@staticmethod
def from_json(json: dict):
return Users(
[User.from_json(obj) for obj in json['members']],
Users.ResponseMetadata.from_json(json['response_metadata'])
)
@dataclass
class RtmStart:
@dataclass
class Self:
id: UserID
@dataclass
class Prefs:
muted_channels: List[str]
@staticmethod
def from_json(json: dict):
return RtmStart.Self.Prefs(json['muted_channels'])
prefs: Prefs
@staticmethod
def from_json(json: dict):
return RtmStart.Self(
json['id'],
RtmStart.Self.Prefs.from_json(json['prefs'])
)
@dataclass
class Team:
domain: str
@staticmethod
def from_json(json: dict):
return RtmStart.Team(json['domain'])
url: str
self_: Self
team: Team
users: List[User]
channels: List[Channel]
@staticmethod
def from_json(json: dict):
return RtmStart(
json['url'],
RtmStart.Self.from_json(json['self']),
RtmStart.Team.from_json(json['team']),
[User.from_json(user) for user in json['users']],
[Channel.from_json(channel) for channel in json['channels']],
)
@dataclass
class RtmConnect:
url: str
@staticmethod
def from_json(json: dict):
return RtmConnect(
json['url'],
)
| true
| true
|
f716116e261e01c85b7274d3654d5b780989190b
| 2,740
|
py
|
Python
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/phys/Phys_Studio_LongRange.py
|
PascalGuenther/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 82
|
2016-06-29T17:24:43.000Z
|
2021-04-16T06:49:17.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/phys/Phys_Studio_LongRange.py
|
PascalGuenther/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 6
|
2022-01-12T18:22:08.000Z
|
2022-03-25T10:19:27.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/phys/Phys_Studio_LongRange.py
|
PascalGuenther/gecko_sdk
|
2e82050dc8823c9fe0e8908c1b2666fb83056230
|
[
"Zlib"
] | 56
|
2016-08-02T10:50:50.000Z
|
2021-07-19T08:57:34.000Z
|
from pyradioconfig.parts.ocelot.phys.Phys_Studio_LongRange import PHYS_OQPSK_LoRa_Ocelot
from pyradioconfig.calculator_model_framework.decorators.phy_decorators import do_not_inherit_phys
@do_not_inherit_phys
class PHYS_Studio_LongRange_Sol(PHYS_OQPSK_LoRa_Ocelot):
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-81
def PHY_Longrange_915M_OQPSK_DSSS8_80p0kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_80p0kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-80
def PHY_Longrange_915M_OQPSK_DSSS8_38p4kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_38p4kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-79
def PHY_Longrange_915M_OQPSK_DSSS8_19p2kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_19p2kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-78
def PHY_Longrange_915M_OQPSK_DSSS8_9p6kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_9p6kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-77
def PHY_Longrange_915M_OQPSK_DSSS8_4p8kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_4p8kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-76
def PHY_Longrange_490M_OQPSK_DSSS8_19p2kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_19p2kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-75
def PHY_Longrange_490M_OQPSK_DSSS8_9p6kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_9p6kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-74
def PHY_Longrange_490M_OQPSK_DSSS8_4p8kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_4p8kbps(model, phy_name=phy_name)
return phy
# Owner: Casey Weltzin
# Jira Link: https://jira.silabs.com/browse/PGSOLVALTEST-73
def PHY_Longrange_490M_OQPSK_DSSS8_2p4kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_2p4kbps(model, phy_name=phy_name)
return phy
| 46.440678
| 98
| 0.75219
|
from pyradioconfig.parts.ocelot.phys.Phys_Studio_LongRange import PHYS_OQPSK_LoRa_Ocelot
from pyradioconfig.calculator_model_framework.decorators.phy_decorators import do_not_inherit_phys
@do_not_inherit_phys
class PHYS_Studio_LongRange_Sol(PHYS_OQPSK_LoRa_Ocelot):
def PHY_Longrange_915M_OQPSK_DSSS8_80p0kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_80p0kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_915M_OQPSK_DSSS8_38p4kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_38p4kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_915M_OQPSK_DSSS8_19p2kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_19p2kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_915M_OQPSK_DSSS8_9p6kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_9p6kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_915M_OQPSK_DSSS8_4p8kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_915M_OQPSK_DSSS8_4p8kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_490M_OQPSK_DSSS8_19p2kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_19p2kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_490M_OQPSK_DSSS8_9p6kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_9p6kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_490M_OQPSK_DSSS8_4p8kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_4p8kbps(model, phy_name=phy_name)
return phy
def PHY_Longrange_490M_OQPSK_DSSS8_2p4kbps(self, model, phy_name=None):
phy = super().PHY_Longrange_490M_OQPSK_DSSS8_2p4kbps(model, phy_name=phy_name)
return phy
| true
| true
|
f716119887849d0bffc5971384860939823a8114
| 4,839
|
py
|
Python
|
tensorflow_datasets/core/dataset_utils.py
|
Global19-atlassian-net/datasets
|
db298928fe0e45907fcd61443d2319665a933afc
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/core/dataset_utils.py
|
Global19-atlassian-net/datasets
|
db298928fe0e45907fcd61443d2319665a933afc
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/core/dataset_utils.py
|
Global19-atlassian-net/datasets
|
db298928fe0e45907fcd61443d2319665a933afc
|
[
"Apache-2.0"
] | 1
|
2020-08-03T20:19:12.000Z
|
2020-08-03T20:19:12.000Z
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with tf.data.Dataset."""
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core import tf_compat
from tensorflow_datasets.core import utils
def _eager_dataset_iterator(dataset):
for item in dataset:
flat = tf.nest.flatten(item)
flat = [t if isinstance(t, tf.RaggedTensor) else t.numpy() for t in flat]
yield tf.nest.pack_sequence_as(item, flat)
def _graph_dataset_iterator(ds_iter, graph=None):
"""Constructs a Python generator from a tf.data.Iterator."""
with utils.maybe_with_graph(graph, create_if_none=False):
init = ds_iter.initializer
ds_item = ds_iter.get_next()
with utils.nogpu_session(graph) as sess:
sess.run(init)
while True:
try:
yield sess.run(ds_item)
except tf.errors.OutOfRangeError:
break
def as_numpy(dataset, *, graph=None):
"""Converts a `tf.data.Dataset` to an iterable of NumPy arrays.
`as_numpy` converts a possibly nested structure of `tf.data.Dataset`s
and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively.
Note that because TensorFlow has support for ragged tensors and NumPy has
no equivalent representation,
[`tf.RaggedTensor`s](https://www.tensorflow.org/api_docs/python/tf/RaggedTensor)
are left as-is for the user to deal with them (e.g. using `to_list()`).
In TF 1 (i.e. graph mode), `tf.RaggedTensor`s are returned as
`tf.ragged.RaggedTensorValue`s.
Example:
```
ds = tfds.load(name="mnist", split="train")
ds_numpy = tfds.as_numpy(ds) # Convert `tf.data.Dataset` to Python generator
for ex in ds_numpy:
# `{'image': np.array(shape=(28, 28, 1)), 'labels': np.array(shape=())}`
print(ex)
```
Args:
dataset: a possibly nested structure of `tf.data.Dataset`s and/or
`tf.Tensor`s.
graph: `tf.Graph`, optional, explicitly set the graph to use.
Returns:
A structure matching `dataset` where `tf.data.Dataset`s are converted to
generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.
"""
nested_ds = dataset
del dataset
# Flatten
flat_ds = tf.nest.flatten(nested_ds)
flat_np = []
# Type check for Tensors and Datasets
for ds_el in flat_ds:
types = [type(el) for el in flat_ds]
types = tf.nest.pack_sequence_as(nested_ds, types)
if not (
isinstance(ds_el, (tf.Tensor, tf.RaggedTensor)) or
tf_compat.is_dataset(ds_el)):
raise ValueError("Arguments to as_numpy must be tf.Tensors or "
"tf.data.Datasets. Got: %s" % types)
if tf.executing_eagerly():
# Eager mode
for ds_el in flat_ds:
if isinstance(ds_el, tf.Tensor):
np_el = ds_el.numpy()
elif isinstance(ds_el, tf.RaggedTensor):
np_el = ds_el
elif tf_compat.is_dataset(ds_el):
np_el = _eager_dataset_iterator(ds_el)
else:
assert False
flat_np.append(np_el)
else:
# Graph mode
# First create iterators for datasets
with utils.maybe_with_graph(graph, create_if_none=False):
ds_iters = [
tf.compat.v1.data.make_initializable_iterator(ds_el)
for ds_el in flat_ds if tf_compat.is_dataset(ds_el)
]
ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]
# Then create numpy arrays for tensors
with utils.nogpu_session(graph) as sess: # Shared session for tf.Tensor
# Calling sess.run once so that randomness is shared.
np_arrays = sess.run([tensor for tensor in flat_ds
if not tf_compat.is_dataset(tensor)])
# Merge the dataset iterators and np arrays
iter_ds = iter(ds_iters)
iter_array = iter(np_arrays)
flat_np = [
next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)
for ds_el in flat_ds
]
# Nest
return tf.nest.pack_sequence_as(nested_ds, flat_np)
def dataset_shape_is_fully_defined(ds):
output_shapes = tf.compat.v1.data.get_output_shapes(ds)
return all([ts.is_fully_defined() for ts in tf.nest.flatten(output_shapes)])
def features_shape_is_fully_defined(features):
return all([tf.TensorShape(info.shape).is_fully_defined() for info in
tf.nest.flatten(features.get_tensor_info())])
| 34.077465
| 82
| 0.701178
|
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core import tf_compat
from tensorflow_datasets.core import utils
def _eager_dataset_iterator(dataset):
for item in dataset:
flat = tf.nest.flatten(item)
flat = [t if isinstance(t, tf.RaggedTensor) else t.numpy() for t in flat]
yield tf.nest.pack_sequence_as(item, flat)
def _graph_dataset_iterator(ds_iter, graph=None):
with utils.maybe_with_graph(graph, create_if_none=False):
init = ds_iter.initializer
ds_item = ds_iter.get_next()
with utils.nogpu_session(graph) as sess:
sess.run(init)
while True:
try:
yield sess.run(ds_item)
except tf.errors.OutOfRangeError:
break
def as_numpy(dataset, *, graph=None):
nested_ds = dataset
del dataset
flat_ds = tf.nest.flatten(nested_ds)
flat_np = []
for ds_el in flat_ds:
types = [type(el) for el in flat_ds]
types = tf.nest.pack_sequence_as(nested_ds, types)
if not (
isinstance(ds_el, (tf.Tensor, tf.RaggedTensor)) or
tf_compat.is_dataset(ds_el)):
raise ValueError("Arguments to as_numpy must be tf.Tensors or "
"tf.data.Datasets. Got: %s" % types)
if tf.executing_eagerly():
for ds_el in flat_ds:
if isinstance(ds_el, tf.Tensor):
np_el = ds_el.numpy()
elif isinstance(ds_el, tf.RaggedTensor):
np_el = ds_el
elif tf_compat.is_dataset(ds_el):
np_el = _eager_dataset_iterator(ds_el)
else:
assert False
flat_np.append(np_el)
else:
with utils.maybe_with_graph(graph, create_if_none=False):
ds_iters = [
tf.compat.v1.data.make_initializable_iterator(ds_el)
for ds_el in flat_ds if tf_compat.is_dataset(ds_el)
]
ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters]
with utils.nogpu_session(graph) as sess:
np_arrays = sess.run([tensor for tensor in flat_ds
if not tf_compat.is_dataset(tensor)])
iter_ds = iter(ds_iters)
iter_array = iter(np_arrays)
flat_np = [
next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array)
for ds_el in flat_ds
]
return tf.nest.pack_sequence_as(nested_ds, flat_np)
def dataset_shape_is_fully_defined(ds):
output_shapes = tf.compat.v1.data.get_output_shapes(ds)
return all([ts.is_fully_defined() for ts in tf.nest.flatten(output_shapes)])
def features_shape_is_fully_defined(features):
return all([tf.TensorShape(info.shape).is_fully_defined() for info in
tf.nest.flatten(features.get_tensor_info())])
| true
| true
|
f71611a25dd8760de2e03dd6de23a77dc59b5b29
| 7,560
|
py
|
Python
|
datary/datasets/test/test_datasets.py
|
Datary/python-sdk
|
2790a50e1ad262cbe3210665dc34f497625e923d
|
[
"MIT"
] | null | null | null |
datary/datasets/test/test_datasets.py
|
Datary/python-sdk
|
2790a50e1ad262cbe3210665dc34f497625e923d
|
[
"MIT"
] | null | null | null |
datary/datasets/test/test_datasets.py
|
Datary/python-sdk
|
2790a50e1ad262cbe3210665dc34f497625e923d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Datary python sdk Datasets test file
"""
import mock
from datary.test.test_datary import DataryTestCase
from datary.test.mock_requests import MockRequestResponse
class DataryDatasetsTestCase(DataryTestCase):
"""
DataryDatasets Test case
"""
@mock.patch('datary.requests.requests.requests.get')
def test_get_kern(self, mock_request):
"""
Test Datary datasets get_kern
"""
mock_request.return_value = MockRequestResponse(
"", json=self.element.get('data', {}).get('kern'))
kern = self.datary.get_kern(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(kern, dict))
self.assertEqual(kern, self.element.get('data', {}).get('kern'))
mock_request.return_value = MockRequestResponse("", status_code=500)
kern2 = self.datary.get_kern(self.dataset_uuid, self.repo_uuid)
self.assertTrue(isinstance(kern2, dict))
self.assertEqual(kern2, {})
@mock.patch('datary.requests.requests.requests.get')
def test_get_metadata(self, mock_request):
"""
Test Datary datasets get_metadata
"""
mock_request.return_value = MockRequestResponse(
"", json=self.element.get('data', {}).get('meta'))
metadata = self.datary.get_metadata(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(metadata, dict))
self.assertEqual(metadata, self.element.get('data', {}).get('meta'))
mock_request.return_value = MockRequestResponse("", status_code=500)
metadata2 = self.datary.get_metadata(self.dataset_uuid, self.repo_uuid)
self.assertTrue(isinstance(metadata2, dict))
self.assertEqual(metadata2, {})
@mock.patch('datary.requests.requests.requests.get')
def test_get_original(self, mock_request):
"""
Test Datary datasets get_original
"""
mock_request.return_value = MockRequestResponse("", json=self.original)
original = self.datary.get_original(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original, dict))
self.assertEqual(original, self.original)
mock_request.reset_mock()
# not dataset_uuid, introduced
original2 = self.datary.get_original(
self.dataset_uuid, self.repo_uuid, self.wdir_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original2, dict))
self.assertEqual(original2, self.original)
mock_request.reset_mock()
# not dataset_uuid, introduced
original3 = self.datary.get_original(
self.dataset_uuid, wdir_uuid=self.wdir_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original3, dict))
self.assertEqual(original3, self.original)
mock_request.reset_mock()
mock_request.side_effect = iter([
MockRequestResponse("", status_code=500),
MockRequestResponse("", json=self.original)
])
original4 = self.datary.get_original(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 2)
self.assertTrue(isinstance(original4, dict))
self.assertEqual(original4, self.original)
mock_request.reset_mock()
mock_request.side_effect = iter([
MockRequestResponse("", status_code=500),
MockRequestResponse("", status_code=500)
])
original4b = self.datary.get_original(
self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 2)
self.assertTrue(isinstance(original4b, dict))
self.assertEqual(original4b, {})
mock_request.reset_mock()
# not dataset_uuid, introduced
original5 = self.datary.get_original(
MockRequestResponse("", status_code=500))
self.assertEqual(mock_request.call_count, 0)
self.assertTrue(isinstance(original5, dict))
self.assertEqual(original5, {})
mock_request.reset_mock()
# scope
mock_request.side_effect = iter(
[MockRequestResponse("", json=self.original),
MockRequestResponse("", json=self.original)])
original6 = self.datary.get_original(
self.dataset_uuid, self.repo_uuid, scope='repo')
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original6, dict))
self.assertEqual(original6, self.original)
@mock.patch('datary.workdirs.DataryWorkdirs.get_wdir_filetree')
@mock.patch('datary.workdirs.DataryWorkdirs.get_wdir_changes')
def test_get_dataset_uuid(self, mock_get_wdir_changes,
mock_get_wdir_filetree):
"""
Test Datary datasets get_datasaet_uuid
"""
mock_get_wdir_filetree.return_value = self.workdir
mock_get_wdir_changes.return_value = self.changes
path = 'b'
basename = 'bb'
empty_result = self.datary.get_dataset_uuid(self.wdir_uuid)
self.assertEqual(empty_result, None)
from_changes_result = self.datary.get_dataset_uuid(
self.wdir_uuid, path, basename)
self.assertEqual(from_changes_result, 'inode1_changes')
self.assertEqual(mock_get_wdir_filetree.call_count, 1)
self.assertEqual(mock_get_wdir_changes.call_count, 1)
mock_get_wdir_filetree.reset_mock()
mock_get_wdir_changes.reset_mock()
# retrive from workdir
path = ''
basename = 'c'
from_commit_result = self.datary.get_dataset_uuid(
self.wdir_uuid, path, basename)
self.assertEqual(from_commit_result, 'c_sha1')
self.assertEqual(mock_get_wdir_filetree.call_count, 1)
self.assertEqual(mock_get_wdir_changes.call_count, 1)
mock_get_wdir_filetree.reset_mock()
mock_get_wdir_changes.reset_mock()
# NOT exists
path = 'bb'
basename = 'b'
no_result = self.datary.get_dataset_uuid(
self.wdir_uuid, path, basename)
self.assertEqual(no_result, None)
self.assertEqual(mock_get_wdir_filetree.call_count, 1)
self.assertEqual(mock_get_wdir_changes.call_count, 1)
@mock.patch('datary.requests.requests.requests.get')
def test_get_commited_dataset_uuid(self, mock_request):
"""
Test Datary get_commited_dataset_uuid
"""
# no args path and basename introduced
mock_request.return_value = MockRequestResponse(
"", json=self.dataset_uuid)
result_no_pathname = self.datary.get_commited_dataset_uuid(
self.wdir_uuid)
self.assertEqual(result_no_pathname, {})
self.assertEqual(mock_request.call_count, 0)
# good case
result = self.datary.get_commited_dataset_uuid(
self.wdir_uuid, 'path', 'basename')
self.assertEqual(result, self.dataset_uuid)
self.assertEqual(mock_request.call_count, 1)
# datary request return None
mock_request.reset_mock()
mock_request.return_value = MockRequestResponse("", status_code=500)
no_response_result = self.datary.get_commited_dataset_uuid(
self.wdir_uuid, 'path', 'basename')
self.assertEqual(no_response_result, {})
self.assertEqual(mock_request.call_count, 1)
| 37.98995
| 79
| 0.668915
|
import mock
from datary.test.test_datary import DataryTestCase
from datary.test.mock_requests import MockRequestResponse
class DataryDatasetsTestCase(DataryTestCase):
@mock.patch('datary.requests.requests.requests.get')
def test_get_kern(self, mock_request):
mock_request.return_value = MockRequestResponse(
"", json=self.element.get('data', {}).get('kern'))
kern = self.datary.get_kern(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(kern, dict))
self.assertEqual(kern, self.element.get('data', {}).get('kern'))
mock_request.return_value = MockRequestResponse("", status_code=500)
kern2 = self.datary.get_kern(self.dataset_uuid, self.repo_uuid)
self.assertTrue(isinstance(kern2, dict))
self.assertEqual(kern2, {})
@mock.patch('datary.requests.requests.requests.get')
def test_get_metadata(self, mock_request):
mock_request.return_value = MockRequestResponse(
"", json=self.element.get('data', {}).get('meta'))
metadata = self.datary.get_metadata(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(metadata, dict))
self.assertEqual(metadata, self.element.get('data', {}).get('meta'))
mock_request.return_value = MockRequestResponse("", status_code=500)
metadata2 = self.datary.get_metadata(self.dataset_uuid, self.repo_uuid)
self.assertTrue(isinstance(metadata2, dict))
self.assertEqual(metadata2, {})
@mock.patch('datary.requests.requests.requests.get')
def test_get_original(self, mock_request):
mock_request.return_value = MockRequestResponse("", json=self.original)
original = self.datary.get_original(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original, dict))
self.assertEqual(original, self.original)
mock_request.reset_mock()
original2 = self.datary.get_original(
self.dataset_uuid, self.repo_uuid, self.wdir_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original2, dict))
self.assertEqual(original2, self.original)
mock_request.reset_mock()
original3 = self.datary.get_original(
self.dataset_uuid, wdir_uuid=self.wdir_uuid)
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original3, dict))
self.assertEqual(original3, self.original)
mock_request.reset_mock()
mock_request.side_effect = iter([
MockRequestResponse("", status_code=500),
MockRequestResponse("", json=self.original)
])
original4 = self.datary.get_original(self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 2)
self.assertTrue(isinstance(original4, dict))
self.assertEqual(original4, self.original)
mock_request.reset_mock()
mock_request.side_effect = iter([
MockRequestResponse("", status_code=500),
MockRequestResponse("", status_code=500)
])
original4b = self.datary.get_original(
self.dataset_uuid, self.repo_uuid)
self.assertEqual(mock_request.call_count, 2)
self.assertTrue(isinstance(original4b, dict))
self.assertEqual(original4b, {})
mock_request.reset_mock()
original5 = self.datary.get_original(
MockRequestResponse("", status_code=500))
self.assertEqual(mock_request.call_count, 0)
self.assertTrue(isinstance(original5, dict))
self.assertEqual(original5, {})
mock_request.reset_mock()
mock_request.side_effect = iter(
[MockRequestResponse("", json=self.original),
MockRequestResponse("", json=self.original)])
original6 = self.datary.get_original(
self.dataset_uuid, self.repo_uuid, scope='repo')
self.assertEqual(mock_request.call_count, 1)
self.assertTrue(isinstance(original6, dict))
self.assertEqual(original6, self.original)
@mock.patch('datary.workdirs.DataryWorkdirs.get_wdir_filetree')
@mock.patch('datary.workdirs.DataryWorkdirs.get_wdir_changes')
def test_get_dataset_uuid(self, mock_get_wdir_changes,
mock_get_wdir_filetree):
mock_get_wdir_filetree.return_value = self.workdir
mock_get_wdir_changes.return_value = self.changes
path = 'b'
basename = 'bb'
empty_result = self.datary.get_dataset_uuid(self.wdir_uuid)
self.assertEqual(empty_result, None)
from_changes_result = self.datary.get_dataset_uuid(
self.wdir_uuid, path, basename)
self.assertEqual(from_changes_result, 'inode1_changes')
self.assertEqual(mock_get_wdir_filetree.call_count, 1)
self.assertEqual(mock_get_wdir_changes.call_count, 1)
mock_get_wdir_filetree.reset_mock()
mock_get_wdir_changes.reset_mock()
path = ''
basename = 'c'
from_commit_result = self.datary.get_dataset_uuid(
self.wdir_uuid, path, basename)
self.assertEqual(from_commit_result, 'c_sha1')
self.assertEqual(mock_get_wdir_filetree.call_count, 1)
self.assertEqual(mock_get_wdir_changes.call_count, 1)
mock_get_wdir_filetree.reset_mock()
mock_get_wdir_changes.reset_mock()
path = 'bb'
basename = 'b'
no_result = self.datary.get_dataset_uuid(
self.wdir_uuid, path, basename)
self.assertEqual(no_result, None)
self.assertEqual(mock_get_wdir_filetree.call_count, 1)
self.assertEqual(mock_get_wdir_changes.call_count, 1)
@mock.patch('datary.requests.requests.requests.get')
def test_get_commited_dataset_uuid(self, mock_request):
mock_request.return_value = MockRequestResponse(
"", json=self.dataset_uuid)
result_no_pathname = self.datary.get_commited_dataset_uuid(
self.wdir_uuid)
self.assertEqual(result_no_pathname, {})
self.assertEqual(mock_request.call_count, 0)
result = self.datary.get_commited_dataset_uuid(
self.wdir_uuid, 'path', 'basename')
self.assertEqual(result, self.dataset_uuid)
self.assertEqual(mock_request.call_count, 1)
mock_request.reset_mock()
mock_request.return_value = MockRequestResponse("", status_code=500)
no_response_result = self.datary.get_commited_dataset_uuid(
self.wdir_uuid, 'path', 'basename')
self.assertEqual(no_response_result, {})
self.assertEqual(mock_request.call_count, 1)
| true
| true
|
f71612467e9b5dc259949c2813d2f39841a075f0
| 78
|
py
|
Python
|
src/main.py
|
jadmz/pygame-box2d-template
|
cd5ef75940b1c919aade5acb11924cbfba8e7c60
|
[
"MIT"
] | null | null | null |
src/main.py
|
jadmz/pygame-box2d-template
|
cd5ef75940b1c919aade5acb11924cbfba8e7c60
|
[
"MIT"
] | null | null | null |
src/main.py
|
jadmz/pygame-box2d-template
|
cd5ef75940b1c919aade5acb11924cbfba8e7c60
|
[
"MIT"
] | 1
|
2020-03-22T18:20:54.000Z
|
2020-03-22T18:20:54.000Z
|
from game import Game
game = Game("Pygame with Box2d Template")
game.run()
| 11.142857
| 41
| 0.717949
|
from game import Game
game = Game("Pygame with Box2d Template")
game.run()
| true
| true
|
f716125d67e85c57e3e02321d8def2b0570ba241
| 1,953
|
py
|
Python
|
TwitchApiPy/TwitchApiPy.py
|
xegepa/Twitch-Api-Py
|
84613dd32654315422481d24bb9afc1ab3967d3d
|
[
"MIT"
] | 2
|
2020-08-16T12:54:23.000Z
|
2021-02-11T20:43:42.000Z
|
TwitchApiPy/TwitchApiPy.py
|
xegepa/Twitch-Api-Py
|
84613dd32654315422481d24bb9afc1ab3967d3d
|
[
"MIT"
] | null | null | null |
TwitchApiPy/TwitchApiPy.py
|
xegepa/Twitch-Api-Py
|
84613dd32654315422481d24bb9afc1ab3967d3d
|
[
"MIT"
] | null | null | null |
import requests
class TwitchApiPy():
def __init__(self):
self.ClientID = ""
self.OAuth = ""
"""
You don't really use this its for other requests
"""
def GetUserID(self,name):
r = requests.get(url = "https://api.twitch.tv/helix/users?login={}".format(name), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
id = r["data"][0]['id']
return id
"""
This part will get you number of followers of asked channel
"""
def GetFollowerCount(self,name):
id = self.GetUserID(name)
r = requests.get(url="https://api.twitch.tv/helix/users/follows?to_id={}".format(id), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
return r['total']
"""
This part will say that if the streamer is online or not and the language the streamer streams
"""
def GetChannelStatus(self, name):
r = requests.get(url="https://api.twitch.tv/helix/search/channels?query={}".format(name), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
is_live=r["data"][0]['is_live']
lang =r["data"][0]['broadcaster_language']
total_info = {
"islive": is_live,
"language": lang,
}
return total_info
"""
This part will get you general info about channel
"""
def GetChannelInfo(self,name):
id = self.GetUserID(name)
r = requests.get(url="https://api.twitch.tv/helix/channels?broadcaster_id={}".format(id), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
name= r["data"][0]["broadcaster_name"]
game = r["data"][0]["game_name"]
title = r["data"][0]["title"]
total_info = {
"name" : name,
"game" : game,
"title" : title
}
return total_info
| 34.875
| 165
| 0.573989
|
import requests
class TwitchApiPy():
def __init__(self):
self.ClientID = ""
self.OAuth = ""
def GetUserID(self,name):
r = requests.get(url = "https://api.twitch.tv/helix/users?login={}".format(name), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
id = r["data"][0]['id']
return id
def GetFollowerCount(self,name):
id = self.GetUserID(name)
r = requests.get(url="https://api.twitch.tv/helix/users/follows?to_id={}".format(id), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
return r['total']
def GetChannelStatus(self, name):
r = requests.get(url="https://api.twitch.tv/helix/search/channels?query={}".format(name), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
is_live=r["data"][0]['is_live']
lang =r["data"][0]['broadcaster_language']
total_info = {
"islive": is_live,
"language": lang,
}
return total_info
def GetChannelInfo(self,name):
id = self.GetUserID(name)
r = requests.get(url="https://api.twitch.tv/helix/channels?broadcaster_id={}".format(id), headers = {'Client-ID': self.ClientID,'Authorization': self.OAuth})
r = r.json()
name= r["data"][0]["broadcaster_name"]
game = r["data"][0]["game_name"]
title = r["data"][0]["title"]
total_info = {
"name" : name,
"game" : game,
"title" : title
}
return total_info
| true
| true
|
f71612ddef304fe8e27a1500d0a1c4bde6565bb6
| 35,689
|
py
|
Python
|
fhirclient/models/medicationrequest.py
|
mdx-dev/client-py
|
f6c16c9bd386c5b05d69753b89c6519d568814ac
|
[
"Apache-2.0"
] | null | null | null |
fhirclient/models/medicationrequest.py
|
mdx-dev/client-py
|
f6c16c9bd386c5b05d69753b89c6519d568814ac
|
[
"Apache-2.0"
] | null | null | null |
fhirclient/models/medicationrequest.py
|
mdx-dev/client-py
|
f6c16c9bd386c5b05d69753b89c6519d568814ac
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/MedicationRequest) on 2019-01-22.
# 2019, SMART Health IT.
from . import domainresource
class MedicationRequest(domainresource.DomainResource):
"""
O
r
d
e
r
i
n
g
o
f
m
e
d
i
c
a
t
i
o
n
f
o
r
p
a
t
i
e
n
t
o
r
g
r
o
u
p
.
A
n
o
r
d
e
r
o
r
r
e
q
u
e
s
t
f
o
r
b
o
t
h
s
u
p
p
l
y
o
f
t
h
e
m
e
d
i
c
a
t
i
o
n
a
n
d
t
h
e
i
n
s
t
r
u
c
t
i
o
n
s
f
o
r
a
d
m
i
n
i
s
t
r
a
t
i
o
n
o
f
t
h
e
m
e
d
i
c
a
t
i
o
n
t
o
a
p
a
t
i
e
n
t
.
T
h
e
r
e
s
o
u
r
c
e
i
s
c
a
l
l
e
d
"
M
e
d
i
c
a
t
i
o
n
R
e
q
u
e
s
t
"
r
a
t
h
e
r
t
h
a
n
"
M
e
d
i
c
a
t
i
o
n
P
r
e
s
c
r
i
p
t
i
o
n
"
o
r
"
M
e
d
i
c
a
t
i
o
n
O
r
d
e
r
"
t
o
g
e
n
e
r
a
l
i
z
e
t
h
e
u
s
e
a
c
r
o
s
s
i
n
p
a
t
i
e
n
t
a
n
d
o
u
t
p
a
t
i
e
n
t
s
e
t
t
i
n
g
s
,
i
n
c
l
u
d
i
n
g
c
a
r
e
p
l
a
n
s
,
e
t
c
.
,
a
n
d
t
o
h
a
r
m
o
n
i
z
e
w
i
t
h
w
o
r
k
f
l
o
w
p
a
t
t
e
r
n
s
.
"""
resource_type = "MedicationRequest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.authoredOn = None
"""
W
h
e
n
r
e
q
u
e
s
t
w
a
s
i
n
i
t
i
a
l
l
y
a
u
t
h
o
r
e
d
.
Type `FHIRDate` (represented as `str` in JSON). """
self.basedOn = None
"""
W
h
a
t
r
e
q
u
e
s
t
f
u
l
f
i
l
l
s
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.category = None
"""
T
y
p
e
o
f
m
e
d
i
c
a
t
i
o
n
u
s
a
g
e
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.courseOfTherapyType = None
"""
O
v
e
r
a
l
l
p
a
t
t
e
r
n
o
f
m
e
d
i
c
a
t
i
o
n
a
d
m
i
n
i
s
t
r
a
t
i
o
n
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.detectedIssue = None
"""
C
l
i
n
i
c
a
l
I
s
s
u
e
w
i
t
h
a
c
t
i
o
n
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.dispenseRequest = None
"""
M
e
d
i
c
a
t
i
o
n
s
u
p
p
l
y
a
u
t
h
o
r
i
z
a
t
i
o
n
.
Type `MedicationRequestDispenseRequest` (represented as `dict` in JSON). """
self.doNotPerform = None
"""
T
r
u
e
i
f
r
e
q
u
e
s
t
i
s
p
r
o
h
i
b
i
t
i
n
g
a
c
t
i
o
n
.
Type `bool`. """
self.dosageInstruction = None
"""
H
o
w
t
h
e
m
e
d
i
c
a
t
i
o
n
s
h
o
u
l
d
b
e
t
a
k
e
n
.
List of `Dosage` items (represented as `dict` in JSON). """
self.encounter = None
"""
E
n
c
o
u
n
t
e
r
c
r
e
a
t
e
d
a
s
p
a
r
t
o
f
e
n
c
o
u
n
t
e
r
/
a
d
m
i
s
s
i
o
n
/
s
t
a
y
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.eventHistory = None
"""
A
l
i
s
t
o
f
e
v
e
n
t
s
o
f
i
n
t
e
r
e
s
t
i
n
t
h
e
l
i
f
e
c
y
c
l
e
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.groupIdentifier = None
"""
C
o
m
p
o
s
i
t
e
r
e
q
u
e
s
t
t
h
i
s
i
s
p
a
r
t
o
f
.
Type `Identifier` (represented as `dict` in JSON). """
self.identifier = None
"""
E
x
t
e
r
n
a
l
i
d
s
f
o
r
t
h
i
s
r
e
q
u
e
s
t
.
List of `Identifier` items (represented as `dict` in JSON). """
self.instantiatesCanonical = None
"""
I
n
s
t
a
n
t
i
a
t
e
s
F
H
I
R
p
r
o
t
o
c
o
l
o
r
d
e
f
i
n
i
t
i
o
n
.
List of `str` items. """
self.instantiatesUri = None
"""
I
n
s
t
a
n
t
i
a
t
e
s
e
x
t
e
r
n
a
l
p
r
o
t
o
c
o
l
o
r
d
e
f
i
n
i
t
i
o
n
.
List of `str` items. """
self.insurance = None
"""
A
s
s
o
c
i
a
t
e
d
i
n
s
u
r
a
n
c
e
c
o
v
e
r
a
g
e
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.intent = None
"""
p
r
o
p
o
s
a
l
|
p
l
a
n
|
o
r
d
e
r
|
o
r
i
g
i
n
a
l
-
o
r
d
e
r
|
i
n
s
t
a
n
c
e
-
o
r
d
e
r
|
o
p
t
i
o
n
.
Type `str`. """
self.medicationCodeableConcept = None
"""
M
e
d
i
c
a
t
i
o
n
t
o
b
e
t
a
k
e
n
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.medicationReference = None
"""
M
e
d
i
c
a
t
i
o
n
t
o
b
e
t
a
k
e
n
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.note = None
"""
I
n
f
o
r
m
a
t
i
o
n
a
b
o
u
t
t
h
e
p
r
e
s
c
r
i
p
t
i
o
n
.
List of `Annotation` items (represented as `dict` in JSON). """
self.performer = None
"""
I
n
t
e
n
d
e
d
p
e
r
f
o
r
m
e
r
o
f
a
d
m
i
n
i
s
t
r
a
t
i
o
n
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.performerType = None
"""
D
e
s
i
r
e
d
k
i
n
d
o
f
p
e
r
f
o
r
m
e
r
o
f
t
h
e
m
e
d
i
c
a
t
i
o
n
a
d
m
i
n
i
s
t
r
a
t
i
o
n
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.priorPrescription = None
"""
A
n
o
r
d
e
r
/
p
r
e
s
c
r
i
p
t
i
o
n
t
h
a
t
i
s
b
e
i
n
g
r
e
p
l
a
c
e
d
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.priority = None
"""
r
o
u
t
i
n
e
|
u
r
g
e
n
t
|
a
s
a
p
|
s
t
a
t
.
Type `str`. """
self.reasonCode = None
"""
R
e
a
s
o
n
o
r
i
n
d
i
c
a
t
i
o
n
f
o
r
o
r
d
e
r
i
n
g
o
r
n
o
t
o
r
d
e
r
i
n
g
t
h
e
m
e
d
i
c
a
t
i
o
n
.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.reasonReference = None
"""
C
o
n
d
i
t
i
o
n
o
r
o
b
s
e
r
v
a
t
i
o
n
t
h
a
t
s
u
p
p
o
r
t
s
w
h
y
t
h
e
p
r
e
s
c
r
i
p
t
i
o
n
i
s
b
e
i
n
g
w
r
i
t
t
e
n
.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.recorder = None
"""
P
e
r
s
o
n
w
h
o
e
n
t
e
r
e
d
t
h
e
r
e
q
u
e
s
t
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.reportedBoolean = None
"""
R
e
p
o
r
t
e
d
r
a
t
h
e
r
t
h
a
n
p
r
i
m
a
r
y
r
e
c
o
r
d
.
Type `bool`. """
self.reportedReference = None
"""
R
e
p
o
r
t
e
d
r
a
t
h
e
r
t
h
a
n
p
r
i
m
a
r
y
r
e
c
o
r
d
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.requester = None
"""
W
h
o
/
W
h
a
t
r
e
q
u
e
s
t
e
d
t
h
e
R
e
q
u
e
s
t
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.status = None
"""
a
c
t
i
v
e
|
o
n
-
h
o
l
d
|
c
a
n
c
e
l
l
e
d
|
c
o
m
p
l
e
t
e
d
|
e
n
t
e
r
e
d
-
i
n
-
e
r
r
o
r
|
s
t
o
p
p
e
d
|
d
r
a
f
t
|
u
n
k
n
o
w
n
.
Type `str`. """
self.statusReason = None
"""
R
e
a
s
o
n
f
o
r
c
u
r
r
e
n
t
s
t
a
t
u
s
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.subject = None
"""
W
h
o
o
r
g
r
o
u
p
m
e
d
i
c
a
t
i
o
n
r
e
q
u
e
s
t
i
s
f
o
r
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.substitution = None
"""
A
n
y
r
e
s
t
r
i
c
t
i
o
n
s
o
n
m
e
d
i
c
a
t
i
o
n
s
u
b
s
t
i
t
u
t
i
o
n
.
Type `MedicationRequestSubstitution` (represented as `dict` in JSON). """
self.supportingInformation = None
"""
I
n
f
o
r
m
a
t
i
o
n
t
o
s
u
p
p
o
r
t
o
r
d
e
r
i
n
g
o
f
t
h
e
m
e
d
i
c
a
t
i
o
n
.
List of `FHIRReference` items (represented as `dict` in JSON). """
super(MedicationRequest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequest, self).elementProperties()
js.extend([
("authoredOn", "authoredOn", fhirdate.FHIRDate, False, None, False),
("basedOn", "basedOn", fhirreference.FHIRReference, True, None, False),
("category", "category", codeableconcept.CodeableConcept, True, None, False),
("courseOfTherapyType", "courseOfTherapyType", codeableconcept.CodeableConcept, False, None, False),
("detectedIssue", "detectedIssue", fhirreference.FHIRReference, True, None, False),
("dispenseRequest", "dispenseRequest", MedicationRequestDispenseRequest, False, None, False),
("doNotPerform", "doNotPerform", bool, False, None, False),
("dosageInstruction", "dosageInstruction", dosage.Dosage, True, None, False),
("encounter", "encounter", fhirreference.FHIRReference, False, None, False),
("eventHistory", "eventHistory", fhirreference.FHIRReference, True, None, False),
("groupIdentifier", "groupIdentifier", identifier.Identifier, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("instantiatesCanonical", "instantiatesCanonical", str, True, None, False),
("instantiatesUri", "instantiatesUri", str, True, None, False),
("insurance", "insurance", fhirreference.FHIRReference, True, None, False),
("intent", "intent", str, False, None, True),
("medicationCodeableConcept", "medicationCodeableConcept", codeableconcept.CodeableConcept, False, "medication", True),
("medicationReference", "medicationReference", fhirreference.FHIRReference, False, "medication", True),
("note", "note", annotation.Annotation, True, None, False),
("performer", "performer", fhirreference.FHIRReference, False, None, False),
("performerType", "performerType", codeableconcept.CodeableConcept, False, None, False),
("priorPrescription", "priorPrescription", fhirreference.FHIRReference, False, None, False),
("priority", "priority", str, False, None, False),
("reasonCode", "reasonCode", codeableconcept.CodeableConcept, True, None, False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, True, None, False),
("recorder", "recorder", fhirreference.FHIRReference, False, None, False),
("reportedBoolean", "reportedBoolean", bool, False, "reported", False),
("reportedReference", "reportedReference", fhirreference.FHIRReference, False, "reported", False),
("requester", "requester", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, True),
("statusReason", "statusReason", codeableconcept.CodeableConcept, False, None, False),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
("substitution", "substitution", MedicationRequestSubstitution, False, None, False),
("supportingInformation", "supportingInformation", fhirreference.FHIRReference, True, None, False),
])
return js
from . import backboneelement
class MedicationRequestDispenseRequest(backboneelement.BackboneElement):
"""
M
e
d
i
c
a
t
i
o
n
s
u
p
p
l
y
a
u
t
h
o
r
i
z
a
t
i
o
n
.
I
n
d
i
c
a
t
e
s
t
h
e
s
p
e
c
i
f
i
c
d
e
t
a
i
l
s
f
o
r
t
h
e
d
i
s
p
e
n
s
e
o
r
m
e
d
i
c
a
t
i
o
n
s
u
p
p
l
y
p
a
r
t
o
f
a
m
e
d
i
c
a
t
i
o
n
r
e
q
u
e
s
t
(
a
l
s
o
k
n
o
w
n
a
s
a
M
e
d
i
c
a
t
i
o
n
P
r
e
s
c
r
i
p
t
i
o
n
o
r
M
e
d
i
c
a
t
i
o
n
O
r
d
e
r
)
.
N
o
t
e
t
h
a
t
t
h
i
s
i
n
f
o
r
m
a
t
i
o
n
i
s
n
o
t
a
l
w
a
y
s
s
e
n
t
w
i
t
h
t
h
e
o
r
d
e
r
.
T
h
e
r
e
m
a
y
b
e
i
n
s
o
m
e
s
e
t
t
i
n
g
s
(
e
.
g
.
h
o
s
p
i
t
a
l
s
)
i
n
s
t
i
t
u
t
i
o
n
a
l
o
r
s
y
s
t
e
m
s
u
p
p
o
r
t
f
o
r
c
o
m
p
l
e
t
i
n
g
t
h
e
d
i
s
p
e
n
s
e
d
e
t
a
i
l
s
i
n
t
h
e
p
h
a
r
m
a
c
y
d
e
p
a
r
t
m
e
n
t
.
"""
resource_type = "MedicationRequestDispenseRequest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.dispenseInterval = None
"""
M
i
n
i
m
u
m
p
e
r
i
o
d
o
f
t
i
m
e
b
e
t
w
e
e
n
d
i
s
p
e
n
s
e
s
.
Type `Duration` (represented as `dict` in JSON). """
self.expectedSupplyDuration = None
"""
N
u
m
b
e
r
o
f
d
a
y
s
s
u
p
p
l
y
p
e
r
d
i
s
p
e
n
s
e
.
Type `Duration` (represented as `dict` in JSON). """
self.initialFill = None
"""
F
i
r
s
t
f
i
l
l
d
e
t
a
i
l
s
.
Type `MedicationRequestDispenseRequestInitialFill` (represented as `dict` in JSON). """
self.numberOfRepeatsAllowed = None
"""
N
u
m
b
e
r
o
f
r
e
f
i
l
l
s
a
u
t
h
o
r
i
z
e
d
.
Type `int`. """
self.performer = None
"""
I
n
t
e
n
d
e
d
d
i
s
p
e
n
s
e
r
.
Type `FHIRReference` (represented as `dict` in JSON). """
self.quantity = None
"""
A
m
o
u
n
t
o
f
m
e
d
i
c
a
t
i
o
n
t
o
s
u
p
p
l
y
p
e
r
d
i
s
p
e
n
s
e
.
Type `Quantity` (represented as `dict` in JSON). """
self.validityPeriod = None
"""
T
i
m
e
p
e
r
i
o
d
s
u
p
p
l
y
i
s
a
u
t
h
o
r
i
z
e
d
f
o
r
.
Type `Period` (represented as `dict` in JSON). """
super(MedicationRequestDispenseRequest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequestDispenseRequest, self).elementProperties()
js.extend([
("dispenseInterval", "dispenseInterval", duration.Duration, False, None, False),
("expectedSupplyDuration", "expectedSupplyDuration", duration.Duration, False, None, False),
("initialFill", "initialFill", MedicationRequestDispenseRequestInitialFill, False, None, False),
("numberOfRepeatsAllowed", "numberOfRepeatsAllowed", int, False, None, False),
("performer", "performer", fhirreference.FHIRReference, False, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
("validityPeriod", "validityPeriod", period.Period, False, None, False),
])
return js
class MedicationRequestDispenseRequestInitialFill(backboneelement.BackboneElement):
"""
F
i
r
s
t
f
i
l
l
d
e
t
a
i
l
s
.
I
n
d
i
c
a
t
e
s
t
h
e
q
u
a
n
t
i
t
y
o
r
d
u
r
a
t
i
o
n
f
o
r
t
h
e
f
i
r
s
t
d
i
s
p
e
n
s
e
o
f
t
h
e
m
e
d
i
c
a
t
i
o
n
.
"""
resource_type = "MedicationRequestDispenseRequestInitialFill"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.duration = None
"""
F
i
r
s
t
f
i
l
l
d
u
r
a
t
i
o
n
.
Type `Duration` (represented as `dict` in JSON). """
self.quantity = None
"""
F
i
r
s
t
f
i
l
l
q
u
a
n
t
i
t
y
.
Type `Quantity` (represented as `dict` in JSON). """
super(MedicationRequestDispenseRequestInitialFill, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequestDispenseRequestInitialFill, self).elementProperties()
js.extend([
("duration", "duration", duration.Duration, False, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
])
return js
class MedicationRequestSubstitution(backboneelement.BackboneElement):
"""
A
n
y
r
e
s
t
r
i
c
t
i
o
n
s
o
n
m
e
d
i
c
a
t
i
o
n
s
u
b
s
t
i
t
u
t
i
o
n
.
I
n
d
i
c
a
t
e
s
w
h
e
t
h
e
r
o
r
n
o
t
s
u
b
s
t
i
t
u
t
i
o
n
c
a
n
o
r
s
h
o
u
l
d
b
e
p
a
r
t
o
f
t
h
e
d
i
s
p
e
n
s
e
.
I
n
s
o
m
e
c
a
s
e
s
,
s
u
b
s
t
i
t
u
t
i
o
n
m
u
s
t
h
a
p
p
e
n
,
i
n
o
t
h
e
r
c
a
s
e
s
s
u
b
s
t
i
t
u
t
i
o
n
m
u
s
t
n
o
t
h
a
p
p
e
n
.
T
h
i
s
b
l
o
c
k
e
x
p
l
a
i
n
s
t
h
e
p
r
e
s
c
r
i
b
e
r
'
s
i
n
t
e
n
t
.
I
f
n
o
t
h
i
n
g
i
s
s
p
e
c
i
f
i
e
d
s
u
b
s
t
i
t
u
t
i
o
n
m
a
y
b
e
d
o
n
e
.
"""
resource_type = "MedicationRequestSubstitution"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.allowedBoolean = None
"""
W
h
e
t
h
e
r
s
u
b
s
t
i
t
u
t
i
o
n
i
s
a
l
l
o
w
e
d
o
r
n
o
t
.
Type `bool`. """
self.allowedCodeableConcept = None
"""
W
h
e
t
h
e
r
s
u
b
s
t
i
t
u
t
i
o
n
i
s
a
l
l
o
w
e
d
o
r
n
o
t
.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.reason = None
"""
W
h
y
s
h
o
u
l
d
(
n
o
t
)
s
u
b
s
t
i
t
u
t
i
o
n
b
e
m
a
d
e
.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationRequestSubstitution, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequestSubstitution, self).elementProperties()
js.extend([
("allowedBoolean", "allowedBoolean", bool, False, "allowed", True),
("allowedCodeableConcept", "allowedCodeableConcept", codeableconcept.CodeableConcept, False, "allowed", True),
("reason", "reason", codeableconcept.CodeableConcept, False, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import dosage
except ImportError:
dosage = sys.modules[__package__ + '.dosage']
try:
from . import duration
except ImportError:
duration = sys.modules[__package__ + '.duration']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
| 12.053023
| 131
| 0.304548
|
from . import domainresource
class MedicationRequest(domainresource.DomainResource):
resource_type = "MedicationRequest"
def __init__(self, jsondict=None, strict=True):
self.authoredOn = None
self.basedOn = None
self.category = None
self.courseOfTherapyType = None
self.detectedIssue = None
self.dispenseRequest = None
self.doNotPerform = None
self.dosageInstruction = None
self.encounter = None
self.eventHistory = None
self.groupIdentifier = None
self.identifier = None
self.instantiatesCanonical = None
self.instantiatesUri = None
self.insurance = None
self.intent = None
self.medicationCodeableConcept = None
self.medicationReference = None
self.note = None
self.performer = None
self.performerType = None
self.priorPrescription = None
self.priority = None
self.reasonCode = None
self.reasonReference = None
self.recorder = None
self.reportedBoolean = None
self.reportedReference = None
self.requester = None
self.status = None
self.statusReason = None
self.subject = None
self.substitution = None
self.supportingInformation = None
super(MedicationRequest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequest, self).elementProperties()
js.extend([
("authoredOn", "authoredOn", fhirdate.FHIRDate, False, None, False),
("basedOn", "basedOn", fhirreference.FHIRReference, True, None, False),
("category", "category", codeableconcept.CodeableConcept, True, None, False),
("courseOfTherapyType", "courseOfTherapyType", codeableconcept.CodeableConcept, False, None, False),
("detectedIssue", "detectedIssue", fhirreference.FHIRReference, True, None, False),
("dispenseRequest", "dispenseRequest", MedicationRequestDispenseRequest, False, None, False),
("doNotPerform", "doNotPerform", bool, False, None, False),
("dosageInstruction", "dosageInstruction", dosage.Dosage, True, None, False),
("encounter", "encounter", fhirreference.FHIRReference, False, None, False),
("eventHistory", "eventHistory", fhirreference.FHIRReference, True, None, False),
("groupIdentifier", "groupIdentifier", identifier.Identifier, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("instantiatesCanonical", "instantiatesCanonical", str, True, None, False),
("instantiatesUri", "instantiatesUri", str, True, None, False),
("insurance", "insurance", fhirreference.FHIRReference, True, None, False),
("intent", "intent", str, False, None, True),
("medicationCodeableConcept", "medicationCodeableConcept", codeableconcept.CodeableConcept, False, "medication", True),
("medicationReference", "medicationReference", fhirreference.FHIRReference, False, "medication", True),
("note", "note", annotation.Annotation, True, None, False),
("performer", "performer", fhirreference.FHIRReference, False, None, False),
("performerType", "performerType", codeableconcept.CodeableConcept, False, None, False),
("priorPrescription", "priorPrescription", fhirreference.FHIRReference, False, None, False),
("priority", "priority", str, False, None, False),
("reasonCode", "reasonCode", codeableconcept.CodeableConcept, True, None, False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, True, None, False),
("recorder", "recorder", fhirreference.FHIRReference, False, None, False),
("reportedBoolean", "reportedBoolean", bool, False, "reported", False),
("reportedReference", "reportedReference", fhirreference.FHIRReference, False, "reported", False),
("requester", "requester", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, True),
("statusReason", "statusReason", codeableconcept.CodeableConcept, False, None, False),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
("substitution", "substitution", MedicationRequestSubstitution, False, None, False),
("supportingInformation", "supportingInformation", fhirreference.FHIRReference, True, None, False),
])
return js
from . import backboneelement
class MedicationRequestDispenseRequest(backboneelement.BackboneElement):
resource_type = "MedicationRequestDispenseRequest"
def __init__(self, jsondict=None, strict=True):
self.dispenseInterval = None
self.expectedSupplyDuration = None
self.initialFill = None
self.numberOfRepeatsAllowed = None
self.performer = None
self.quantity = None
self.validityPeriod = None
super(MedicationRequestDispenseRequest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequestDispenseRequest, self).elementProperties()
js.extend([
("dispenseInterval", "dispenseInterval", duration.Duration, False, None, False),
("expectedSupplyDuration", "expectedSupplyDuration", duration.Duration, False, None, False),
("initialFill", "initialFill", MedicationRequestDispenseRequestInitialFill, False, None, False),
("numberOfRepeatsAllowed", "numberOfRepeatsAllowed", int, False, None, False),
("performer", "performer", fhirreference.FHIRReference, False, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
("validityPeriod", "validityPeriod", period.Period, False, None, False),
])
return js
class MedicationRequestDispenseRequestInitialFill(backboneelement.BackboneElement):
resource_type = "MedicationRequestDispenseRequestInitialFill"
def __init__(self, jsondict=None, strict=True):
self.duration = None
self.quantity = None
super(MedicationRequestDispenseRequestInitialFill, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequestDispenseRequestInitialFill, self).elementProperties()
js.extend([
("duration", "duration", duration.Duration, False, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
])
return js
class MedicationRequestSubstitution(backboneelement.BackboneElement):
resource_type = "MedicationRequestSubstitution"
def __init__(self, jsondict=None, strict=True):
self.allowedBoolean = None
self.allowedCodeableConcept = None
self.reason = None
super(MedicationRequestSubstitution, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationRequestSubstitution, self).elementProperties()
js.extend([
("allowedBoolean", "allowedBoolean", bool, False, "allowed", True),
("allowedCodeableConcept", "allowedCodeableConcept", codeableconcept.CodeableConcept, False, "allowed", True),
("reason", "reason", codeableconcept.CodeableConcept, False, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import dosage
except ImportError:
dosage = sys.modules[__package__ + '.dosage']
try:
from . import duration
except ImportError:
duration = sys.modules[__package__ + '.duration']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
| true
| true
|
f716130a5e4aa592b5742e419a3914560c7330fc
| 1,320
|
py
|
Python
|
homeassistant/components/synology_dsm/const.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 1,635
|
2015-01-01T14:59:18.000Z
|
2016-04-13T02:36:16.000Z
|
homeassistant/components/synology_dsm/const.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 1,463
|
2015-01-06T06:18:07.000Z
|
2016-04-12T22:30:37.000Z
|
homeassistant/components/synology_dsm/const.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 659
|
2015-01-05T14:02:23.000Z
|
2016-04-12T23:39:31.000Z
|
"""Constants for Synology DSM."""
from __future__ import annotations
from synology_dsm.api.surveillance_station.const import SNAPSHOT_PROFILE_BALANCED
from homeassistant.const import Platform
DOMAIN = "synology_dsm"
ATTRIBUTION = "Data provided by Synology"
PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.BUTTON,
Platform.CAMERA,
Platform.SENSOR,
Platform.SWITCH,
Platform.UPDATE,
]
COORDINATOR_CAMERAS = "coordinator_cameras"
COORDINATOR_CENTRAL = "coordinator_central"
COORDINATOR_SWITCHES = "coordinator_switches"
SYSTEM_LOADED = "system_loaded"
EXCEPTION_DETAILS = "details"
EXCEPTION_UNKNOWN = "unknown"
# Entry keys
SYNO_API = "syno_api"
UNDO_UPDATE_LISTENER = "undo_update_listener"
# Configuration
CONF_SERIAL = "serial"
CONF_VOLUMES = "volumes"
CONF_DEVICE_TOKEN = "device_token"
CONF_SNAPSHOT_QUALITY = "snap_profile_type"
DEFAULT_USE_SSL = True
DEFAULT_VERIFY_SSL = False
DEFAULT_PORT = 5000
DEFAULT_PORT_SSL = 5001
# Options
DEFAULT_SCAN_INTERVAL = 15 # min
DEFAULT_TIMEOUT = 10 # sec
DEFAULT_SNAPSHOT_QUALITY = SNAPSHOT_PROFILE_BALANCED
ENTITY_UNIT_LOAD = "load"
# Signals
SIGNAL_CAMERA_SOURCE_CHANGED = "synology_dsm.camera_stream_source_changed"
# Services
SERVICE_REBOOT = "reboot"
SERVICE_SHUTDOWN = "shutdown"
SERVICES = [
SERVICE_REBOOT,
SERVICE_SHUTDOWN,
]
| 23.571429
| 81
| 0.79697
|
from __future__ import annotations
from synology_dsm.api.surveillance_station.const import SNAPSHOT_PROFILE_BALANCED
from homeassistant.const import Platform
DOMAIN = "synology_dsm"
ATTRIBUTION = "Data provided by Synology"
PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.BUTTON,
Platform.CAMERA,
Platform.SENSOR,
Platform.SWITCH,
Platform.UPDATE,
]
COORDINATOR_CAMERAS = "coordinator_cameras"
COORDINATOR_CENTRAL = "coordinator_central"
COORDINATOR_SWITCHES = "coordinator_switches"
SYSTEM_LOADED = "system_loaded"
EXCEPTION_DETAILS = "details"
EXCEPTION_UNKNOWN = "unknown"
SYNO_API = "syno_api"
UNDO_UPDATE_LISTENER = "undo_update_listener"
CONF_SERIAL = "serial"
CONF_VOLUMES = "volumes"
CONF_DEVICE_TOKEN = "device_token"
CONF_SNAPSHOT_QUALITY = "snap_profile_type"
DEFAULT_USE_SSL = True
DEFAULT_VERIFY_SSL = False
DEFAULT_PORT = 5000
DEFAULT_PORT_SSL = 5001
DEFAULT_SCAN_INTERVAL = 15
DEFAULT_TIMEOUT = 10
DEFAULT_SNAPSHOT_QUALITY = SNAPSHOT_PROFILE_BALANCED
ENTITY_UNIT_LOAD = "load"
SIGNAL_CAMERA_SOURCE_CHANGED = "synology_dsm.camera_stream_source_changed"
SERVICE_REBOOT = "reboot"
SERVICE_SHUTDOWN = "shutdown"
SERVICES = [
SERVICE_REBOOT,
SERVICE_SHUTDOWN,
]
| true
| true
|
f716137a258773159a3f46fb247a0224787d63af
| 85
|
py
|
Python
|
2020/09/30/Django Pagination Tutorial/library/library/books/apps.py
|
kenjitagawa/youtube_video_code
|
ef3c48b9e136b3745d10395d94be64cb0a1f1c97
|
[
"Unlicense"
] | 492
|
2019-06-25T12:54:31.000Z
|
2022-03-30T12:38:28.000Z
|
2020/09/30/Django Pagination Tutorial/library/library/books/apps.py
|
kenjitagawa/youtube_video_code
|
ef3c48b9e136b3745d10395d94be64cb0a1f1c97
|
[
"Unlicense"
] | 122
|
2018-10-06T21:31:24.000Z
|
2020-11-09T15:04:56.000Z
|
2020/09/30/Django Pagination Tutorial/library/library/books/apps.py
|
kenjitagawa/youtube_video_code
|
ef3c48b9e136b3745d10395d94be64cb0a1f1c97
|
[
"Unlicense"
] | 1,734
|
2019-06-03T06:25:13.000Z
|
2022-03-31T23:57:53.000Z
|
from django.apps import AppConfig
class BooksConfig(AppConfig):
name = 'books'
| 14.166667
| 33
| 0.741176
|
from django.apps import AppConfig
class BooksConfig(AppConfig):
name = 'books'
| true
| true
|
f71613f7207decd88d8c0d1641da8ad9b079d689
| 2,085
|
py
|
Python
|
code/generateTimeline.py
|
sahilmgandhi/sahilmgandhi.github.io
|
e2d6aba9d90f53a4ebfbbd36b6b1d301dce039d3
|
[
"CC-BY-3.0"
] | null | null | null |
code/generateTimeline.py
|
sahilmgandhi/sahilmgandhi.github.io
|
e2d6aba9d90f53a4ebfbbd36b6b1d301dce039d3
|
[
"CC-BY-3.0"
] | null | null | null |
code/generateTimeline.py
|
sahilmgandhi/sahilmgandhi.github.io
|
e2d6aba9d90f53a4ebfbbd36b6b1d301dce039d3
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/python
import random, sys, string, csv, argparse, subprocess
parser=argparse.ArgumentParser(
description='''This script generates the HTML code for the timeline boxes''',
epilog="""Have fun!""")
parser.add_argument('-i', default='movies.csv', dest='inputFile', help='Name of the csv file. Default is movies.csv')
parser.add_argument('-o', default='reviews.txt', dest='outputFile', help='Name of the output file. Default is reviews.txt')
args=parser.parse_args()
outputFile = open(args.outputFile, 'w')
currRating = 9
counter = 0
htmlFile = 'movieReviews.html'
htmlEndingLine = 112
htmlDesiredLine = 74
if args.outputFile != 'reviews.txt':
htmlFile
with open(args.inputFile, 'r') as movies:
movieEntries = csv.reader(movies)
outputFile.write("<div id=\"9\">")
outputFile.write("<div id=\"ratingsBanner\"><h2>%d.00/10 - %d/10</h2></div>" % (currRating, (currRating + 1)))
for row in movieEntries:
if int(float(row[0])) < currRating:
currRating = int(float(row[0]))
outputFile.write("</div>")
outputFile.write("<div id=\"%d\">" % (currRating))
outputFile.write("<div id=\"ratingsBanner\"><h2>%d.00/10 - %d.99/10</h2></div>" % (currRating, (currRating)))
if counter % 2 == 0:
outputFile.write("<div class=\"container left\">")
else:
outputFile.write("<div class=\"container right\">")
outputFile.write("<div class=\"timelineContent\">")
if row[1] == 'None':
outputFile.write("<p>No movies that are ranked in the %d's yet</p>" % (currRating))
else:
outputFile.write("<h2>%.2f</h2>" % (float(row[0])))
outputFile.write("<p>%s</p>" % (str(row[1])))
outputFile.write("</div></div>")
counter += 1
outputFile.write("</div>")
subprocess.call('sed -i \'/.*<div id="9">.*/d\' ../movieReviews.html', shell=True)
subprocess.call('cat %s >> ../movieReviews.html' % args.outputFile, shell=True)
subprocess.call('printf \'112m74\nw\n\' | ed ../movieReviews.html', shell=True)
| 40.882353
| 123
| 0.617746
|
import random, sys, string, csv, argparse, subprocess
parser=argparse.ArgumentParser(
description='''This script generates the HTML code for the timeline boxes''',
epilog="""Have fun!""")
parser.add_argument('-i', default='movies.csv', dest='inputFile', help='Name of the csv file. Default is movies.csv')
parser.add_argument('-o', default='reviews.txt', dest='outputFile', help='Name of the output file. Default is reviews.txt')
args=parser.parse_args()
outputFile = open(args.outputFile, 'w')
currRating = 9
counter = 0
htmlFile = 'movieReviews.html'
htmlEndingLine = 112
htmlDesiredLine = 74
if args.outputFile != 'reviews.txt':
htmlFile
with open(args.inputFile, 'r') as movies:
movieEntries = csv.reader(movies)
outputFile.write("<div id=\"9\">")
outputFile.write("<div id=\"ratingsBanner\"><h2>%d.00/10 - %d/10</h2></div>" % (currRating, (currRating + 1)))
for row in movieEntries:
if int(float(row[0])) < currRating:
currRating = int(float(row[0]))
outputFile.write("</div>")
outputFile.write("<div id=\"%d\">" % (currRating))
outputFile.write("<div id=\"ratingsBanner\"><h2>%d.00/10 - %d.99/10</h2></div>" % (currRating, (currRating)))
if counter % 2 == 0:
outputFile.write("<div class=\"container left\">")
else:
outputFile.write("<div class=\"container right\">")
outputFile.write("<div class=\"timelineContent\">")
if row[1] == 'None':
outputFile.write("<p>No movies that are ranked in the %d's yet</p>" % (currRating))
else:
outputFile.write("<h2>%.2f</h2>" % (float(row[0])))
outputFile.write("<p>%s</p>" % (str(row[1])))
outputFile.write("</div></div>")
counter += 1
outputFile.write("</div>")
subprocess.call('sed -i \'/.*<div id="9">.*/d\' ../movieReviews.html', shell=True)
subprocess.call('cat %s >> ../movieReviews.html' % args.outputFile, shell=True)
subprocess.call('printf \'112m74\nw\n\' | ed ../movieReviews.html', shell=True)
| true
| true
|
f71614d3da0d9da31c0fa08bb2b57c555c07181a
| 4,274
|
py
|
Python
|
deprecated/pycqed/instrument_drivers/physical_instruments/_controlbox/xiangs_timing_tape_code.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | 60
|
2016-08-03T10:00:18.000Z
|
2021-11-10T11:46:16.000Z
|
deprecated/pycqed/instrument_drivers/physical_instruments/_controlbox/xiangs_timing_tape_code.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | 512
|
2016-08-03T17:10:02.000Z
|
2022-03-31T14:03:43.000Z
|
deprecated/pycqed/instrument_drivers/physical_instruments/_controlbox/xiangs_timing_tape_code.py
|
nuttamas/PycQED_py3
|
1ee35c7428d36ed42ba4afb5d4bda98140b2283e
|
[
"MIT"
] | 34
|
2016-10-19T12:00:52.000Z
|
2022-03-19T04:43:26.000Z
|
def set_conditional_tape(self, awg_nr, tape_nr, tape):
'''
set the conditional tape content for an awg
@param awg : the awg of the dac, (0,1,2).
@param tape_nr : the number of the tape, integer ranging (0~6)
@param tape : the array of entries, with a maximum number of entries 512.
Every entry is an integer has the following structure:
|WaitingTime (9bits) | PUlse number (3 bits) | EndofSegment marker (1bit)|
WaitingTime: The waiting time before the end of last pulse or trigger, in ns.
Pulse number: 0~7, indicating which pulse to be output
EndofSegment marker: 1 if the entry is the last entry of the tape, otherwise 0.
@return stat : 0 if the upload succeeded and 1 if the upload failed.
'''
length = len(tape)
tape_addr_width = 9
entry_length = 9 + 3 + 1
# Check out of bounds
if awg_nr < 0 or awg_nr > 2:
raise ValueError
if tape_nr < 0 or tape_nr > 6:
raise ValueError
if length < 1 or length > 512:
raise ValueError
cmd = defHeaders.AwgCondionalTape
data_bytes = []
data_bytes.append(self.encode_byte(awg_nr, 4))
data_bytes.append(self.encode_byte(tape_nr, 4))
data_bytes.append(self.encode_byte(length-1, 7,
signed_integer_length=tape_addr_width,
expected_number_of_bytes=np.ceil(tape_addr_width/7.0)))
for sample_data in tape:
data_bytes.append(self.encode_byte(self.convert_to_signed(sample_data, entry_length), 7,
signed_integer_length=entry_length,
expected_number_of_bytes=np.ceil(entry_length/7.0)))
message = self.create_message(cmd, data_bytes)
(stat, mesg) = self.serial_write(message)
return (stat, mesg)
def set_segmented_tape(self, awg_nr, tape):
'''
set the conditional tape content for an awg
@param awg : the awg of the dac, (0,1,2).
@param tape : the array of entries, with a maximum number of entries 29184.
Every entry is an integer has the following structure:
|WaitingTime (9bits) | PUlse number (3 bits) | EndofSegment marker (1bit)|
WaitingTime: The waiting time before the end of last pulse or trigger, in ns.
Pulse number: 0~7, indicating which pulse to be output
EndofSegment marker: 1 if the entry is the last entry of a segment, otherwise 0.
@return stat : 0 if the upload succeeded and 1 if the upload failed.
'''
length = len(tape)
tape_addr_width = 15
entry_length = 9 + 3 + 1
# Check out of bounds
if awg_nr < 0 or awg_nr > 2:
raise ValueError
if length < 1 or length > 29184:
raise ValueError
cmd = defHeaders.AwgSegmentedTape
data_bytes = []
data_bytes.append(self.encode_byte(awg_nr, 4))
data_bytes.append(self.encode_byte(length-1, 7,
signed_integer_length=tape_addr_width,
expected_number_of_bytes=np.ceil(tape_addr_width / 7.0)))
for sample_data in tape:
data_bytes.append(self.encode_byte(self.convert_to_signed(sample_data, entry_length), 7,
signed_integer_length=entry_length,
expected_number_of_bytes=np.ceil(entry_length / 7.0)))
message = self.create_message(cmd, data_bytes)
(stat, mesg) = self.serial_write(message)
return (stat, mesg)
def create_entry(self, interval, pulse_num, end_of_marker):
'''
@param interval : The waiting time before the end of last pulse or trigger in ns,
ranging from 0ns to 2560ns with minimum step of 5ns.
@param pulse_num : 0~7, indicating which pulse to be output
@param end_of_marker : 1 if the entry is the last entry of a segment, otherwise 0.
'''
if interval < 0 or interval > 2560:
raise ValueError
if pulse_num < 0 or pulse_num > 7:
raise ValueError
if end_of_marker < 0 or end_of_marker > 1:
raise ValueError
entry_bits = BitArray(Bits(uint=interval, length=9))
entry_bits.append(BitArray(Bits(uint=pulse_num, length=3)))
entry_bits.append(BitArray(Bits(uint=end_of_marker, length=1)))
# print "The entry generated is: ",
# print entry_bits.uint
return entry_bits.uint
| 39.943925
| 96
| 0.662611
|
def set_conditional_tape(self, awg_nr, tape_nr, tape):
length = len(tape)
tape_addr_width = 9
entry_length = 9 + 3 + 1
if awg_nr < 0 or awg_nr > 2:
raise ValueError
if tape_nr < 0 or tape_nr > 6:
raise ValueError
if length < 1 or length > 512:
raise ValueError
cmd = defHeaders.AwgCondionalTape
data_bytes = []
data_bytes.append(self.encode_byte(awg_nr, 4))
data_bytes.append(self.encode_byte(tape_nr, 4))
data_bytes.append(self.encode_byte(length-1, 7,
signed_integer_length=tape_addr_width,
expected_number_of_bytes=np.ceil(tape_addr_width/7.0)))
for sample_data in tape:
data_bytes.append(self.encode_byte(self.convert_to_signed(sample_data, entry_length), 7,
signed_integer_length=entry_length,
expected_number_of_bytes=np.ceil(entry_length/7.0)))
message = self.create_message(cmd, data_bytes)
(stat, mesg) = self.serial_write(message)
return (stat, mesg)
def set_segmented_tape(self, awg_nr, tape):
length = len(tape)
tape_addr_width = 15
entry_length = 9 + 3 + 1
if awg_nr < 0 or awg_nr > 2:
raise ValueError
if length < 1 or length > 29184:
raise ValueError
cmd = defHeaders.AwgSegmentedTape
data_bytes = []
data_bytes.append(self.encode_byte(awg_nr, 4))
data_bytes.append(self.encode_byte(length-1, 7,
signed_integer_length=tape_addr_width,
expected_number_of_bytes=np.ceil(tape_addr_width / 7.0)))
for sample_data in tape:
data_bytes.append(self.encode_byte(self.convert_to_signed(sample_data, entry_length), 7,
signed_integer_length=entry_length,
expected_number_of_bytes=np.ceil(entry_length / 7.0)))
message = self.create_message(cmd, data_bytes)
(stat, mesg) = self.serial_write(message)
return (stat, mesg)
def create_entry(self, interval, pulse_num, end_of_marker):
if interval < 0 or interval > 2560:
raise ValueError
if pulse_num < 0 or pulse_num > 7:
raise ValueError
if end_of_marker < 0 or end_of_marker > 1:
raise ValueError
entry_bits = BitArray(Bits(uint=interval, length=9))
entry_bits.append(BitArray(Bits(uint=pulse_num, length=3)))
entry_bits.append(BitArray(Bits(uint=end_of_marker, length=1)))
return entry_bits.uint
| true
| true
|
f716155583711d06a3bf11dab07383b6f8697428
| 1,801
|
py
|
Python
|
securityheaders/checkers/cors/exposeheaders/test_exposesensitiveheaders.py
|
th3cyb3rc0p/securityheaders
|
941264be581dc01afe28f6416f2d7bed79aecfb3
|
[
"Apache-2.0"
] | 151
|
2018-07-29T22:34:43.000Z
|
2022-03-22T05:08:27.000Z
|
securityheaders/checkers/cors/exposeheaders/test_exposesensitiveheaders.py
|
th3cyb3rc0p/securityheaders
|
941264be581dc01afe28f6416f2d7bed79aecfb3
|
[
"Apache-2.0"
] | 5
|
2019-04-24T07:31:36.000Z
|
2021-04-15T14:31:23.000Z
|
securityheaders/checkers/cors/exposeheaders/test_exposesensitiveheaders.py
|
th3cyb3rc0p/securityheaders
|
941264be581dc01afe28f6416f2d7bed79aecfb3
|
[
"Apache-2.0"
] | 42
|
2018-07-31T08:18:59.000Z
|
2022-03-28T08:18:32.000Z
|
import unittest
from securityheaders.checkers.cors import AccessControlExposeHeadersSensitiveChecker
class AccessControlExposeHeadersSensitiveCheckerTest(unittest.TestCase):
def setUp(self):
self.x = AccessControlExposeHeadersSensitiveChecker()
def test_checkNoHeader(self):
nox = dict()
nox['test'] = 'value'
self.assertEqual(self.x.check(nox), [])
def test_checkNone(self):
nonex = None
self.assertEqual(self.x.check(nonex), [])
def test_checkNone2(self):
hasx = dict()
hasx['access-control-expose-headers'] = None
self.assertEqual(self.x.check(hasx), [])
def test_checkInvalid(self):
hasx2 = dict()
hasx2['access-control-expose-headers'] = "Authentication-Token"
result = self.x.check(hasx2)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkInvalid2(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "Authorization"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkInvalid3(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "Session"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkInvalid4(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "Session, Authentication-Token, PUT"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 2)
def test_checkValid2(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "PUT"
self.assertEqual(self.x.check(hasx5), [])
if __name__ == '__main__':
unittest.main()
| 31.051724
| 84
| 0.655747
|
import unittest
from securityheaders.checkers.cors import AccessControlExposeHeadersSensitiveChecker
class AccessControlExposeHeadersSensitiveCheckerTest(unittest.TestCase):
def setUp(self):
self.x = AccessControlExposeHeadersSensitiveChecker()
def test_checkNoHeader(self):
nox = dict()
nox['test'] = 'value'
self.assertEqual(self.x.check(nox), [])
def test_checkNone(self):
nonex = None
self.assertEqual(self.x.check(nonex), [])
def test_checkNone2(self):
hasx = dict()
hasx['access-control-expose-headers'] = None
self.assertEqual(self.x.check(hasx), [])
def test_checkInvalid(self):
hasx2 = dict()
hasx2['access-control-expose-headers'] = "Authentication-Token"
result = self.x.check(hasx2)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkInvalid2(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "Authorization"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkInvalid3(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "Session"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
def test_checkInvalid4(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "Session, Authentication-Token, PUT"
result = self.x.check(hasx5)
self.assertIsNotNone(result)
self.assertEqual(len(result), 2)
def test_checkValid2(self):
hasx5 = dict()
hasx5['access-control-expose-headers'] = "PUT"
self.assertEqual(self.x.check(hasx5), [])
if __name__ == '__main__':
unittest.main()
| true
| true
|
f71617895efc3dfd23246121c700461891099a24
| 6,196
|
py
|
Python
|
docs/conf.py
|
EVEprosper/ProsperDatareader
|
31f0d77074c21222161774f4d653326925611167
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
EVEprosper/ProsperDatareader
|
31f0d77074c21222161774f4d653326925611167
|
[
"MIT"
] | 14
|
2017-08-14T02:25:42.000Z
|
2018-11-16T19:15:52.000Z
|
docs/conf.py
|
EVEprosper/ProsperDatareader
|
31f0d77074c21222161774f4d653326925611167
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ProsperDatareader documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 31 09:30:33 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
## vv TODO vv: autodocs ##
import os
import sys
sys.path.insert(0, os.path.abspath('../prosper/datareader'))
sys.path.insert(0, os.path.abspath('../prosper'))
from _version import __version__
## ^^ TODO ^^ ##
import alabaster
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinxcontrib.napoleon',
'alabaster',
]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ProsperDatareader'
copyright = '2017, John Purcell'
author = 'John Purcell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.0'
# The full version, including alpha/beta/rc tags.
release = '0.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_static_path = ['_static']
templates_path = ['templates']
html_show_sourcelink = False
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'logo-colour-sm.png',
'description': 'Uniform Data Collection',
'description_font_style': 'italic',
'github_user': 'eveprosper',
'github_repo': 'prosperdatareader',
'github_banner': True,
}
html_favicon = "static/prosper.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'index': [
'about.html', 'patreon.html', 'globaltoc.html', 'searchbox.html',
],
'**': [
'about.html', 'patreon.html', 'globaltoc.html', 'searchbox.html'
]
}
#html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
#}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ProsperDatareaderdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ProsperDatareader.tex', 'ProsperDatareader Documentation',
'John Purcell', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'prosperdatareader', 'ProsperDatareader Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ProsperDatareader', 'ProsperDatareader Documentation',
author, 'ProsperDatareader', 'One line description of project.',
'Miscellaneous'),
]
| 29.788462
| 80
| 0.675274
|
th.insert(0, os.path.abspath('../prosper/datareader'))
sys.path.insert(0, os.path.abspath('../prosper'))
from _version import __version__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinxcontrib.napoleon',
'alabaster',
]
source_suffix = '.rst'
master_doc = 'index'
project = 'ProsperDatareader'
copyright = '2017, John Purcell'
author = 'John Purcell'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.0'
# The full version, including alpha/beta/rc tags.
release = '0.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_static_path = ['_static']
templates_path = ['templates']
html_show_sourcelink = False
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'logo-colour-sm.png',
'description': 'Uniform Data Collection',
'description_font_style': 'italic',
'github_user': 'eveprosper',
'github_repo': 'prosperdatareader',
'github_banner': True,
}
html_favicon = "static/prosper.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'index': [
'about.html', 'patreon.html', 'globaltoc.html', 'searchbox.html',
],
'**': [
'about.html', 'patreon.html', 'globaltoc.html', 'searchbox.html'
]
}
#html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
#}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ProsperDatareaderdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ProsperDatareader.tex', 'ProsperDatareader Documentation',
'John Purcell', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'prosperdatareader', 'ProsperDatareader Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ProsperDatareader', 'ProsperDatareader Documentation',
author, 'ProsperDatareader', 'One line description of project.',
'Miscellaneous'),
]
| true
| true
|
f716179af5712ede1126fe27e7a30594aafd8164
| 5,204
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/logic/latest/get_integration_account_batch_configuration.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/logic/latest/get_integration_account_batch_configuration.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/logic/latest/get_integration_account_batch_configuration.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetIntegrationAccountBatchConfigurationResult',
'AwaitableGetIntegrationAccountBatchConfigurationResult',
'get_integration_account_batch_configuration',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:logic:getIntegrationAccountBatchConfiguration'.""", DeprecationWarning)
@pulumi.output_type
class GetIntegrationAccountBatchConfigurationResult:
"""
The batch configuration resource definition.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The resource id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Gets the resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.BatchConfigurationPropertiesResponse':
"""
The batch configuration properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Gets the resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetIntegrationAccountBatchConfigurationResult(GetIntegrationAccountBatchConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIntegrationAccountBatchConfigurationResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_integration_account_batch_configuration(batch_configuration_name: Optional[str] = None,
integration_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIntegrationAccountBatchConfigurationResult:
"""
The batch configuration resource definition.
Latest API Version: 2019-05-01.
:param str batch_configuration_name: The batch configuration name.
:param str integration_account_name: The integration account name.
:param str resource_group_name: The resource group name.
"""
pulumi.log.warn("get_integration_account_batch_configuration is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:logic:getIntegrationAccountBatchConfiguration'.")
__args__ = dict()
__args__['batchConfigurationName'] = batch_configuration_name
__args__['integrationAccountName'] = integration_account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:logic/latest:getIntegrationAccountBatchConfiguration', __args__, opts=opts, typ=GetIntegrationAccountBatchConfigurationResult).value
return AwaitableGetIntegrationAccountBatchConfigurationResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| 37.438849
| 236
| 0.662183
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetIntegrationAccountBatchConfigurationResult',
'AwaitableGetIntegrationAccountBatchConfigurationResult',
'get_integration_account_batch_configuration',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:logic:getIntegrationAccountBatchConfiguration'.""", DeprecationWarning)
@pulumi.output_type
class GetIntegrationAccountBatchConfigurationResult:
def __init__(__self__, id=None, location=None, name=None, properties=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.BatchConfigurationPropertiesResponse':
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetIntegrationAccountBatchConfigurationResult(GetIntegrationAccountBatchConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIntegrationAccountBatchConfigurationResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_integration_account_batch_configuration(batch_configuration_name: Optional[str] = None,
integration_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIntegrationAccountBatchConfigurationResult:
pulumi.log.warn("get_integration_account_batch_configuration is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:logic:getIntegrationAccountBatchConfiguration'.")
__args__ = dict()
__args__['batchConfigurationName'] = batch_configuration_name
__args__['integrationAccountName'] = integration_account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:logic/latest:getIntegrationAccountBatchConfiguration', __args__, opts=opts, typ=GetIntegrationAccountBatchConfigurationResult).value
return AwaitableGetIntegrationAccountBatchConfigurationResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| true
| true
|
f7161889a0f2637bcacb6385931f3df8ce3d1eb6
| 2,639
|
py
|
Python
|
PYTHON/skyscrapper.py
|
iamsuryakant/100-days-of-code
|
eaf4863d98dc273f03a989fe87d010d201d91516
|
[
"MIT"
] | 1
|
2020-07-04T12:45:50.000Z
|
2020-07-04T12:45:50.000Z
|
PYTHON/skyscrapper.py
|
iamsuryakant/100-days-of-code
|
eaf4863d98dc273f03a989fe87d010d201d91516
|
[
"MIT"
] | 1
|
2020-08-08T02:23:46.000Z
|
2020-08-08T02:47:56.000Z
|
PYTHON/skyscrapper.py
|
iamsuryakant/100-days-of-code
|
eaf4863d98dc273f03a989fe87d010d201d91516
|
[
"MIT"
] | null | null | null |
class Solution:
def getSkyline(self, buildings: 'List[List[int]]') -> 'List[List[int]]':
"""
Divide-and-conquer algorithm to solve skyline problem,
which is similar with the merge sort algorithm.
"""
n = len(buildings)
# The base cases
if n == 0:
return []
if n == 1:
x_start, x_end, y = buildings[0]
return [[x_start, y], [x_end, 0]]
# If there is more than one building,
# recursively divide the input into two subproblems.
left_skyline = self.getSkyline(buildings[: n // 2])
right_skyline = self.getSkyline(buildings[n // 2:])
# Merge the results of subproblem together.
return self.merge_skylines(left_skyline, right_skyline)
def merge_skylines(self, left, right):
"""
Merge two skylines together.
"""
def update_output(x, y):
"""
Update the final output with the new element.
"""
# if skyline change is not vertical -
# add the new point
if not output or output[-1][0] != x:
output.append([x, y])
# if skyline change is vertical -
# update the last point
else:
output[-1][1] = y
def append_skyline(p, lst, n, y, curr_y):
"""
Append the rest of the skyline elements with indice (p, n)
to the final output.
"""
while p < n:
x, y = lst[p]
p += 1
if curr_y != y:
update_output(x, y)
curr_y = y
n_l, n_r = len(left), len(right)
p_l = p_r = 0
curr_y = left_y = right_y = 0
output = []
# while we're in the region where both skylines are present
while p_l < n_l and p_r < n_r:
point_l, point_r = left[p_l], right[p_r]
# pick up the smallest x
if point_l[0] < point_r[0]:
x, left_y = point_l
p_l += 1
else:
x, right_y = point_r
p_r += 1
# max height (i.e. y) between both skylines
max_y = max(left_y, right_y)
# if there is a skyline change
if curr_y != max_y:
update_output(x, max_y)
curr_y = max_y
# there is only left skyline
append_skyline(p_l, left, n_l, left_y, curr_y)
# there is only right skyline
append_skyline(p_r, right, n_r, right_y, curr_y)
return output
| 32.580247
| 76
| 0.497537
|
class Solution:
def getSkyline(self, buildings: 'List[List[int]]') -> 'List[List[int]]':
n = len(buildings)
if n == 0:
return []
if n == 1:
x_start, x_end, y = buildings[0]
return [[x_start, y], [x_end, 0]]
left_skyline = self.getSkyline(buildings[: n // 2])
right_skyline = self.getSkyline(buildings[n // 2:])
return self.merge_skylines(left_skyline, right_skyline)
def merge_skylines(self, left, right):
def update_output(x, y):
if not output or output[-1][0] != x:
output.append([x, y])
else:
output[-1][1] = y
def append_skyline(p, lst, n, y, curr_y):
while p < n:
x, y = lst[p]
p += 1
if curr_y != y:
update_output(x, y)
curr_y = y
n_l, n_r = len(left), len(right)
p_l = p_r = 0
curr_y = left_y = right_y = 0
output = []
while p_l < n_l and p_r < n_r:
point_l, point_r = left[p_l], right[p_r]
# pick up the smallest x
if point_l[0] < point_r[0]:
x, left_y = point_l
p_l += 1
else:
x, right_y = point_r
p_r += 1
# max height (i.e. y) between both skylines
max_y = max(left_y, right_y)
# if there is a skyline change
if curr_y != max_y:
update_output(x, max_y)
curr_y = max_y
# there is only left skyline
append_skyline(p_l, left, n_l, left_y, curr_y)
# there is only right skyline
append_skyline(p_r, right, n_r, right_y, curr_y)
return output
| true
| true
|
f71618cfbb589aebbaf72299adad2c12c7a31751
| 2,060
|
py
|
Python
|
lib/surface/dataproc/workflow_templates/instantiate.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/dataproc/workflow_templates/instantiate.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/dataproc/workflow_templates/instantiate.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:13:29.000Z
|
2020-07-24T20:13:29.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Instantiate a workflow template."""
import uuid
from googlecloudsdk.api_lib.dataproc import dataproc as dp
from googlecloudsdk.api_lib.dataproc import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.dataproc import flags
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class Instantiate(base.CreateCommand):
"""Instantiate a workflow template."""
@staticmethod
def Args(parser):
flags.AddTemplateFlag(parser, 'run')
flags.AddTimeoutFlag(parser, default='35m')
base.ASYNC_FLAG.AddToParser(parser)
def Run(self, args):
dataproc = dp.Dataproc(self.ReleaseTrack())
msgs = dataproc.messages
template = util.ParseWorkflowTemplates(args.template, dataproc)
instantiate_request = dataproc.messages.InstantiateWorkflowTemplateRequest()
instantiate_request.instanceId = uuid.uuid4().hex # request UUID
request = msgs.DataprocProjectsRegionsWorkflowTemplatesInstantiateRequest(
instantiateWorkflowTemplateRequest=instantiate_request,
name=template.RelativeName())
operation = dataproc.client.projects_regions_workflowTemplates.Instantiate(
request)
if args.async:
log.status.Print('Instantiating [{0}] with operation [{1}].'.format(
template.Name(), operation.name))
return
operation = util.WaitForWorkflowTemplateOperation(
dataproc, operation, timeout_s=args.timeout)
return operation
| 36.785714
| 80
| 0.762621
|
"""Instantiate a workflow template."""
import uuid
from googlecloudsdk.api_lib.dataproc import dataproc as dp
from googlecloudsdk.api_lib.dataproc import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.dataproc import flags
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class Instantiate(base.CreateCommand):
"""Instantiate a workflow template."""
@staticmethod
def Args(parser):
flags.AddTemplateFlag(parser, 'run')
flags.AddTimeoutFlag(parser, default='35m')
base.ASYNC_FLAG.AddToParser(parser)
def Run(self, args):
dataproc = dp.Dataproc(self.ReleaseTrack())
msgs = dataproc.messages
template = util.ParseWorkflowTemplates(args.template, dataproc)
instantiate_request = dataproc.messages.InstantiateWorkflowTemplateRequest()
instantiate_request.instanceId = uuid.uuid4().hex
request = msgs.DataprocProjectsRegionsWorkflowTemplatesInstantiateRequest(
instantiateWorkflowTemplateRequest=instantiate_request,
name=template.RelativeName())
operation = dataproc.client.projects_regions_workflowTemplates.Instantiate(
request)
if args.async:
log.status.Print('Instantiating [{0}] with operation [{1}].'.format(
template.Name(), operation.name))
return
operation = util.WaitForWorkflowTemplateOperation(
dataproc, operation, timeout_s=args.timeout)
return operation
| false
| true
|
f71618eae8454fc424b1ab0fecf5817c6c652137
| 31,411
|
py
|
Python
|
tests/models/bloom/test_modeling_bloom.py
|
JingyaHuang/transformers
|
6589e510fa4e6c442059de2fab84752535de9b23
|
[
"Apache-2.0"
] | null | null | null |
tests/models/bloom/test_modeling_bloom.py
|
JingyaHuang/transformers
|
6589e510fa4e6c442059de2fab84752535de9b23
|
[
"Apache-2.0"
] | null | null | null |
tests/models/bloom/test_modeling_bloom.py
|
JingyaHuang/transformers
|
6589e510fa4e6c442059de2fab84752535de9b23
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import unittest
from transformers import BloomConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomTokenizerFast,
)
@require_torch
class BloomModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=False,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = None
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
def get_large_model_config(self):
return BloomConfig.from_pretrained("bigscience/bloom")
def prepare_config_and_inputs(self, gradient_checkpointing=False):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
sequence_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config(gradient_checkpointing=gradient_checkpointing)
return (config, input_ids, input_mask, sequence_labels)
def get_config(self, gradient_checkpointing=False, slow_but_exact=True):
return BloomConfig(
vocab_size=self.vocab_size,
seq_length=self.seq_length,
hidden_size=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
resid_pdrop=self.hidden_dropout_prob,
attn_pdrop=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
num_labels=self.num_labels,
gradient_checkpointing=gradient_checkpointing,
slow_but_exact=slow_but_exact,
dtype="float32",
)
def create_and_check_bloom_model(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_bloom_model_past(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, attention_mask=torch.ones_like(input_ids), use_cache=True)
outputs_use_cache_conf = model(input_ids, attention_mask=torch.ones_like(input_ids))
outputs_no_past = model(input_ids, use_cache=False, attention_mask=torch.ones_like(input_ids))
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_bloom_model_attention_mask_past(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_bloom_model_past_large_inputs(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, attention_mask=input_mask, use_cache=True)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[
"last_hidden_state"
]
self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_lm_head_model(self, config, input_ids, input_mask, *args):
model = BloomForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_sequence_classification_model(self, config, input_ids, input_mask, *args):
config.num_labels = self.num_labels
model = BloomForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_token_classification_model(self, config, input_ids, input_mask, *args):
model = BloomForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_forward_and_backwards(
self, config, input_ids, input_mask, *args, gradient_checkpointing=False
):
model = BloomForCausalLM(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def create_and_check_bloom_weight_initialization(self, config, *args):
model = BloomModel(config)
model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layer)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask, sequence_labels = config_and_inputs
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
@require_torch
class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(
BloomModel,
BloomForCausalLM,
BloomForSequenceClassification,
BloomForTokenClassification,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (BloomForCausalLM,) if is_torch_available() else ()
fx_compatible = False
test_missing_keys = False
test_pruning = False
test_torchscript = True # torch.autograd functions seems to be not supported
def setUp(self):
self.model_tester = BloomModelTester(self)
self.config_tester = ConfigTester(self, config_class=BloomConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_bloom_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model(*config_and_inputs)
def test_bloom_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_past(*config_and_inputs)
def test_bloom_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_attention_mask_past(*config_and_inputs)
def test_bloom_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_past_large_inputs(*config_and_inputs)
def test_bloom_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_bloom_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_sequence_classification_model(*config_and_inputs)
def test_bloom_token_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_token_classification_model(*config_and_inputs)
def test_bloom_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)
def test_bloom_weight_initialization(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_weight_initialization(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BloomModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
@require_torch_gpu
def test_simple_generation(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m)
input_sentence = "I enjoy walking with my cute dog"
EXPECTED_OUTPUT = (
"I enjoy walking with my cute dog, and I love to watch the kids play. I am a very active person, and I am"
" a very good listener. I am a very good person, and I am a very good person. I am a"
)
input_ids = tokenizer.encode(input_sentence, return_tensors="pt")
greedy_output = model.generate(input_ids.cuda(), max_length=50)
self.assertEqual(tokenizer.decode(greedy_output[0], skip_special_tokens=True), EXPECTED_OUTPUT)
@slow
@require_torch_gpu
def test_batch_generation(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left")
input_sentence = ["I enjoy walking with my cute dog", "I enjoy walking with my cute dog"]
input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors="pt", padding=True)
greedy_output = model.generate(
input_ids["input_ids"].cuda(), attention_mask=input_ids["attention_mask"], max_length=50, do_sample=False
)
self.assertEqual(
tokenizer.decode(greedy_output[0], skip_special_tokens=True),
tokenizer.decode(greedy_output[1], skip_special_tokens=True),
)
@slow
@require_torch_gpu
def test_batch_generation_padd(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left")
input_sentence = ["I enjoy walking with my cute dog", "Hello my name is"]
input_sentence_without_pad = "Hello my name is"
input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors="pt", padding=True)
input_ids_without_pad = tokenizer.encode(input_sentence_without_pad, return_tensors="pt")
greedy_output = model.generate(
input_ids["input_ids"].cuda(), attention_mask=input_ids["attention_mask"], max_length=50, do_sample=False
)
greedy_output_without_pad = model.generate(input_ids_without_pad.cuda(), max_length=50, do_sample=False)
# test token values
self.assertEqual(greedy_output[-1, 3:].tolist(), greedy_output_without_pad[0, :-3].tolist())
# test reconstructions
self.assertEqual(
tokenizer.decode(greedy_output[-1, 3:], skip_special_tokens=True),
tokenizer.decode(greedy_output_without_pad[0, :-3], skip_special_tokens=True),
)
@require_torch
class BloomEmbeddingTest(unittest.TestCase):
"""
The goal here is to compare the embeddings generated by the model trained
using Megatron-LM with the one from the transformers library, with a small GPT2-like model
to ensure that the conversion from Megatron-LM to transformers has been done successfully.
The script compares the logits of the embedding layer and the transformer layers.
WARNING: It is expected that these logits will not have exactly the same statistics when running
the code on CPU or GPU. For more info, please visit:
- https://github.com/pytorch/pytorch/issues/76052#issuecomment-1103193548
- https://discuss.pytorch.org/t/reproducibility-issue-between-intel-and-amd-cpus/144779/9
You need to install tokenizers following this readme:
- https://huggingface.co/bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
Tokenizer used during training:
- https://huggingface.co/bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
# TODO change the script (or just add skip) when building the env with tokenizers 0.12.0
"""
def setUp(self):
super().setUp()
self.path_bigscience_model = "bigscience/bigscience-small-testing"
@require_torch
def test_embeddings(self):
model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, torch_dtype="auto") # load in fp32
model.eval()
EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN = {
3478: 0.0002307891845703125,
368: -0.000568389892578125,
109586: -0.0003910064697265625,
35433: -0.000194549560546875,
2: 0.0004138946533203125,
77: 0.000659942626953125,
132619: -0.00031280517578125,
2175: 0.000457763671875,
23714: 0.000263214111328125,
73173: -0.000286102294921875,
144252: 0.00052642822265625,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM = {"value": 0.08203125}
EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN = {
132619: -0.00031256675720214844,
3478: 0.00023090839385986328,
368: -0.0005702972412109375,
109586: -0.00039124488830566406,
35433: -0.000194549560546875,
2: 0.0004146099090576172,
2175: 0.0004572868347167969,
23714: 0.00026416778564453125,
73173: -0.0002865791320800781,
144252: 0.0005254745483398438,
77: 0.0006618499755859375,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_SUM = {"value": 0.0821533203125}
EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN = {
132619: -0.00031267106533050537,
3478: 0.00023087859153747559,
368: -0.0005701072514057159,
109586: -0.0003911703824996948,
35433: -0.0001944899559020996,
2: 0.0004146844148635864,
2175: 0.00045740045607089996,
23714: 0.0002641640603542328,
73173: -0.0002864748239517212,
144252: 0.0005256589502096176,
77: 0.0006617321632802486,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_SUM = {"value": 0.08217757940292358}
TEST_EMBEDDINGS = {
"torch.bfloat16": {
"mean": EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM,
},
"torch.float32": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,
},
"torch.float": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,
},
"torch.float16": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_16_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_16_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_16_SUM,
},
}
# fmt: off
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
# fmt: on
EMBEDDINGS_DS_AFTER_LN_MEAN = {
3478: -6.580352783203125e-05,
368: 0.0001316070556640625,
109586: -0.00030517578125,
35433: 4.00543212890625e-05,
2: -7.2479248046875e-05,
77: -8.96453857421875e-05,
132619: 0.0001583099365234375,
2175: 2.1219253540039062e-05,
23714: -0.000247955322265625,
73173: -0.00021839141845703125,
144252: -0.0001430511474609375,
}
EMBEDDINGS_DS_AFTER_LN_MIN = {
3478: -1.6953125,
368: -1.6875,
109586: -1.6875,
35433: -2.125,
2: -1.390625,
77: -1.5390625,
132619: -1.875,
2175: -1.4609375,
23714: -2.296875,
73173: -1.3515625,
144252: -1.78125,
}
EMBEDDINGS_DS_AFTER_LN_MAX = {
3478: 2.265625,
368: 2.28125,
109586: 1.953125,
35433: 1.90625,
2: 2.703125,
77: 2.828125,
132619: 1.65625,
2175: 2.015625,
23714: 2.234375,
73173: 2.171875,
144252: 1.828125,
}
EMBEDDINGS_DS_AFTER_LN = {
"mean": EMBEDDINGS_DS_AFTER_LN_MEAN,
"min": EMBEDDINGS_DS_AFTER_LN_MIN,
"max": EMBEDDINGS_DS_AFTER_LN_MAX,
}
tensor_ids = torch.LongTensor([EXAMPLE_IDS])
with torch.no_grad():
embeddings = model.transformer.word_embeddings(tensor_ids)
embeddings_ln = model.transformer.word_embeddings_layernorm(embeddings) #
# first check the embeddings before LN
output_dict = {"min": {}, "max": {}, "mean": {}, "sum": {"value": embeddings.sum().item()}}
for i, idx in enumerate(EXAMPLE_IDS):
output_dict["min"][idx] = embeddings.min(dim=-1).values[0][i].item()
output_dict["max"][idx] = embeddings.max(dim=-1).values[0][i].item()
output_dict["mean"][idx] = embeddings.mean(dim=-1)[0][i].item()
for key in TEST_EMBEDDINGS[str(model.dtype)].keys():
self.assertDictEqual(TEST_EMBEDDINGS[str(model.dtype)][key], output_dict[key])
output_dict_norm = {"min": {}, "max": {}, "mean": {}}
for i, idx in enumerate(EXAMPLE_IDS):
output_dict_norm["min"][idx] = embeddings_ln.min(dim=-1).values[0][i].item()
output_dict_norm["max"][idx] = embeddings_ln.max(dim=-1).values[0][i].item()
output_dict_norm["mean"][idx] = embeddings_ln.mean(dim=-1)[0][i].item()
# This test does not pass when places = 2
for i, key in enumerate(output_dict_norm.keys()):
for j, idx in enumerate(output_dict[key].keys()):
self.assertAlmostEqual(EMBEDDINGS_DS_AFTER_LN[key][idx], output_dict_norm[key][idx], places=1)
@require_torch
def test_hidden_states_transformers(self):
cuda_available = torch.cuda.is_available()
model = BloomModel.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to(
torch_device
)
model.eval()
# fmt: off
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
# fmt: on
MEAN_VALUE_LAST_LM = -4.3392181396484375e-05
MIN_MAX_DICT = {"min": -2.0625, "max": 2.75}
tensor_ids = torch.LongTensor([EXAMPLE_IDS])
with torch.no_grad():
logits = model(tensor_ids.to(torch_device))
output_dict = {
"min": logits.last_hidden_state.min(dim=-1).values[0][0].item(),
"max": logits.last_hidden_state.max(dim=-1).values[0][0].item(),
}
if cuda_available:
self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=4)
else:
self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=3)
self.assertDictEqual(MIN_MAX_DICT, output_dict)
@require_torch
def test_logits(self):
cuda_available = torch.cuda.is_available()
model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to(
torch_device
) # load in bf16
model.eval()
# fmt: off
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
# fmt: on
MEAN_LOGITS_GPU_1 = -1.823902130126953e-05
MEAN_LOGITS_GPU_2 = 1.9431114196777344e-05
tensor_ids = torch.LongTensor([EXAMPLE_IDS]).to(torch_device)
with torch.no_grad():
output = model(tensor_ids).logits
output_gpu_1, output_gpu_2 = output.split(125440, dim=-1)
if cuda_available:
self.assertEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1)
self.assertEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2)
else:
self.assertAlmostEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1, places=6) # 1e-06 precision!!
self.assertAlmostEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2, places=6)
| 41.439314
| 155
| 0.661934
|
import math
import unittest
from transformers import BloomConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomTokenizerFast,
)
@require_torch
class BloomModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=False,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = None
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
def get_large_model_config(self):
return BloomConfig.from_pretrained("bigscience/bloom")
def prepare_config_and_inputs(self, gradient_checkpointing=False):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
sequence_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config(gradient_checkpointing=gradient_checkpointing)
return (config, input_ids, input_mask, sequence_labels)
def get_config(self, gradient_checkpointing=False, slow_but_exact=True):
return BloomConfig(
vocab_size=self.vocab_size,
seq_length=self.seq_length,
hidden_size=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
resid_pdrop=self.hidden_dropout_prob,
attn_pdrop=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
num_labels=self.num_labels,
gradient_checkpointing=gradient_checkpointing,
slow_but_exact=slow_but_exact,
dtype="float32",
)
def create_and_check_bloom_model(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_bloom_model_past(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
outputs = model(input_ids, attention_mask=torch.ones_like(input_ids), use_cache=True)
outputs_use_cache_conf = model(input_ids, attention_mask=torch.ones_like(input_ids))
outputs_no_past = model(input_ids, use_cache=False, attention_mask=torch.ones_like(input_ids))
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past = outputs["past_key_values"]
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"]
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_bloom_model_attention_mask_past(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"]
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_bloom_model_past_large_inputs(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
outputs = model(input_ids, attention_mask=input_mask, use_cache=True)
output, past = outputs.to_tuple()
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[
"last_hidden_state"
]
self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_lm_head_model(self, config, input_ids, input_mask, *args):
model = BloomForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_sequence_classification_model(self, config, input_ids, input_mask, *args):
config.num_labels = self.num_labels
model = BloomForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_token_classification_model(self, config, input_ids, input_mask, *args):
model = BloomForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_forward_and_backwards(
self, config, input_ids, input_mask, *args, gradient_checkpointing=False
):
model = BloomForCausalLM(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def create_and_check_bloom_weight_initialization(self, config, *args):
model = BloomModel(config)
model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layer)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask, sequence_labels = config_and_inputs
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
@require_torch
class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(
BloomModel,
BloomForCausalLM,
BloomForSequenceClassification,
BloomForTokenClassification,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (BloomForCausalLM,) if is_torch_available() else ()
fx_compatible = False
test_missing_keys = False
test_pruning = False
test_torchscript = True
def setUp(self):
self.model_tester = BloomModelTester(self)
self.config_tester = ConfigTester(self, config_class=BloomConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_bloom_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model(*config_and_inputs)
def test_bloom_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_past(*config_and_inputs)
def test_bloom_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_attention_mask_past(*config_and_inputs)
def test_bloom_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_past_large_inputs(*config_and_inputs)
def test_bloom_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_bloom_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_sequence_classification_model(*config_and_inputs)
def test_bloom_token_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_token_classification_model(*config_and_inputs)
def test_bloom_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)
def test_bloom_weight_initialization(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_weight_initialization(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BloomModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
@require_torch_gpu
def test_simple_generation(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m)
input_sentence = "I enjoy walking with my cute dog"
EXPECTED_OUTPUT = (
"I enjoy walking with my cute dog, and I love to watch the kids play. I am a very active person, and I am"
" a very good listener. I am a very good person, and I am a very good person. I am a"
)
input_ids = tokenizer.encode(input_sentence, return_tensors="pt")
greedy_output = model.generate(input_ids.cuda(), max_length=50)
self.assertEqual(tokenizer.decode(greedy_output[0], skip_special_tokens=True), EXPECTED_OUTPUT)
@slow
@require_torch_gpu
def test_batch_generation(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left")
input_sentence = ["I enjoy walking with my cute dog", "I enjoy walking with my cute dog"]
input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors="pt", padding=True)
greedy_output = model.generate(
input_ids["input_ids"].cuda(), attention_mask=input_ids["attention_mask"], max_length=50, do_sample=False
)
self.assertEqual(
tokenizer.decode(greedy_output[0], skip_special_tokens=True),
tokenizer.decode(greedy_output[1], skip_special_tokens=True),
)
@slow
@require_torch_gpu
def test_batch_generation_padd(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left")
input_sentence = ["I enjoy walking with my cute dog", "Hello my name is"]
input_sentence_without_pad = "Hello my name is"
input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors="pt", padding=True)
input_ids_without_pad = tokenizer.encode(input_sentence_without_pad, return_tensors="pt")
greedy_output = model.generate(
input_ids["input_ids"].cuda(), attention_mask=input_ids["attention_mask"], max_length=50, do_sample=False
)
greedy_output_without_pad = model.generate(input_ids_without_pad.cuda(), max_length=50, do_sample=False)
self.assertEqual(greedy_output[-1, 3:].tolist(), greedy_output_without_pad[0, :-3].tolist())
self.assertEqual(
tokenizer.decode(greedy_output[-1, 3:], skip_special_tokens=True),
tokenizer.decode(greedy_output_without_pad[0, :-3], skip_special_tokens=True),
)
@require_torch
class BloomEmbeddingTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.path_bigscience_model = "bigscience/bigscience-small-testing"
@require_torch
def test_embeddings(self):
model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, torch_dtype="auto")
model.eval()
EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN = {
3478: 0.0002307891845703125,
368: -0.000568389892578125,
109586: -0.0003910064697265625,
35433: -0.000194549560546875,
2: 0.0004138946533203125,
77: 0.000659942626953125,
132619: -0.00031280517578125,
2175: 0.000457763671875,
23714: 0.000263214111328125,
73173: -0.000286102294921875,
144252: 0.00052642822265625,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM = {"value": 0.08203125}
EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN = {
132619: -0.00031256675720214844,
3478: 0.00023090839385986328,
368: -0.0005702972412109375,
109586: -0.00039124488830566406,
35433: -0.000194549560546875,
2: 0.0004146099090576172,
2175: 0.0004572868347167969,
23714: 0.00026416778564453125,
73173: -0.0002865791320800781,
144252: 0.0005254745483398438,
77: 0.0006618499755859375,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_SUM = {"value": 0.0821533203125}
EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN = {
132619: -0.00031267106533050537,
3478: 0.00023087859153747559,
368: -0.0005701072514057159,
109586: -0.0003911703824996948,
35433: -0.0001944899559020996,
2: 0.0004146844148635864,
2175: 0.00045740045607089996,
23714: 0.0002641640603542328,
73173: -0.0002864748239517212,
144252: 0.0005256589502096176,
77: 0.0006617321632802486,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_SUM = {"value": 0.08217757940292358}
TEST_EMBEDDINGS = {
"torch.bfloat16": {
"mean": EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM,
},
"torch.float32": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,
},
"torch.float": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,
},
"torch.float16": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_16_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_16_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_16_SUM,
},
}
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
EMBEDDINGS_DS_AFTER_LN_MEAN = {
3478: -6.580352783203125e-05,
368: 0.0001316070556640625,
109586: -0.00030517578125,
35433: 4.00543212890625e-05,
2: -7.2479248046875e-05,
77: -8.96453857421875e-05,
132619: 0.0001583099365234375,
2175: 2.1219253540039062e-05,
23714: -0.000247955322265625,
73173: -0.00021839141845703125,
144252: -0.0001430511474609375,
}
EMBEDDINGS_DS_AFTER_LN_MIN = {
3478: -1.6953125,
368: -1.6875,
109586: -1.6875,
35433: -2.125,
2: -1.390625,
77: -1.5390625,
132619: -1.875,
2175: -1.4609375,
23714: -2.296875,
73173: -1.3515625,
144252: -1.78125,
}
EMBEDDINGS_DS_AFTER_LN_MAX = {
3478: 2.265625,
368: 2.28125,
109586: 1.953125,
35433: 1.90625,
2: 2.703125,
77: 2.828125,
132619: 1.65625,
2175: 2.015625,
23714: 2.234375,
73173: 2.171875,
144252: 1.828125,
}
EMBEDDINGS_DS_AFTER_LN = {
"mean": EMBEDDINGS_DS_AFTER_LN_MEAN,
"min": EMBEDDINGS_DS_AFTER_LN_MIN,
"max": EMBEDDINGS_DS_AFTER_LN_MAX,
}
tensor_ids = torch.LongTensor([EXAMPLE_IDS])
with torch.no_grad():
embeddings = model.transformer.word_embeddings(tensor_ids)
embeddings_ln = model.transformer.word_embeddings_layernorm(embeddings)
output_dict = {"min": {}, "max": {}, "mean": {}, "sum": {"value": embeddings.sum().item()}}
for i, idx in enumerate(EXAMPLE_IDS):
output_dict["min"][idx] = embeddings.min(dim=-1).values[0][i].item()
output_dict["max"][idx] = embeddings.max(dim=-1).values[0][i].item()
output_dict["mean"][idx] = embeddings.mean(dim=-1)[0][i].item()
for key in TEST_EMBEDDINGS[str(model.dtype)].keys():
self.assertDictEqual(TEST_EMBEDDINGS[str(model.dtype)][key], output_dict[key])
output_dict_norm = {"min": {}, "max": {}, "mean": {}}
for i, idx in enumerate(EXAMPLE_IDS):
output_dict_norm["min"][idx] = embeddings_ln.min(dim=-1).values[0][i].item()
output_dict_norm["max"][idx] = embeddings_ln.max(dim=-1).values[0][i].item()
output_dict_norm["mean"][idx] = embeddings_ln.mean(dim=-1)[0][i].item()
for i, key in enumerate(output_dict_norm.keys()):
for j, idx in enumerate(output_dict[key].keys()):
self.assertAlmostEqual(EMBEDDINGS_DS_AFTER_LN[key][idx], output_dict_norm[key][idx], places=1)
@require_torch
def test_hidden_states_transformers(self):
cuda_available = torch.cuda.is_available()
model = BloomModel.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to(
torch_device
)
model.eval()
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
MEAN_VALUE_LAST_LM = -4.3392181396484375e-05
MIN_MAX_DICT = {"min": -2.0625, "max": 2.75}
tensor_ids = torch.LongTensor([EXAMPLE_IDS])
with torch.no_grad():
logits = model(tensor_ids.to(torch_device))
output_dict = {
"min": logits.last_hidden_state.min(dim=-1).values[0][0].item(),
"max": logits.last_hidden_state.max(dim=-1).values[0][0].item(),
}
if cuda_available:
self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=4)
else:
self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=3)
self.assertDictEqual(MIN_MAX_DICT, output_dict)
@require_torch
def test_logits(self):
cuda_available = torch.cuda.is_available()
model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to(
torch_device
)
model.eval()
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
MEAN_LOGITS_GPU_1 = -1.823902130126953e-05
MEAN_LOGITS_GPU_2 = 1.9431114196777344e-05
tensor_ids = torch.LongTensor([EXAMPLE_IDS]).to(torch_device)
with torch.no_grad():
output = model(tensor_ids).logits
output_gpu_1, output_gpu_2 = output.split(125440, dim=-1)
if cuda_available:
self.assertEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1)
self.assertEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2)
else:
self.assertAlmostEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1, places=6)
self.assertAlmostEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2, places=6)
| true
| true
|
f71619031253fb486e6ba783dca022105538c931
| 3,935
|
py
|
Python
|
server/website/website/parser/parser.py
|
mjain2/ottertune
|
011e896bf89df831fb1189b1ab4c9a7d7dca420a
|
[
"Apache-2.0"
] | 1
|
2019-08-16T19:35:35.000Z
|
2019-08-16T19:35:35.000Z
|
server/website/website/parser/parser.py
|
mjain2/ottertune
|
011e896bf89df831fb1189b1ab4c9a7d7dca420a
|
[
"Apache-2.0"
] | null | null | null |
server/website/website/parser/parser.py
|
mjain2/ottertune
|
011e896bf89df831fb1189b1ab4c9a7d7dca420a
|
[
"Apache-2.0"
] | null | null | null |
#
# OtterTune - parser.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
'''
Created on Dec 12, 2017
@author: dvanaken
'''
from website.models import DBMSCatalog
from website.types import DBMSType
from .myrocks import MyRocks56Parser
from .mysql import MySql57Parser
from .postgres import Postgres96Parser, PostgresOldParser
from .oracle import Oracle19Parser
class Parser(object):
__DBMS_UTILS_IMPLS = None
@staticmethod
def __utils(dbms_id=None):
if Parser.__DBMS_UTILS_IMPLS is None:
Parser.__DBMS_UTILS_IMPLS = {
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.3').pk: PostgresOldParser('9.3'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.2').pk: PostgresOldParser('9.2'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.6').pk: Postgres96Parser('9.6'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.4').pk: Postgres96Parser('9.4'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.5').pk: Postgres96Parser('9.5'),
DBMSCatalog.objects.get(
type=DBMSType.MYROCKS, version='5.6').pk: MyRocks56Parser(),
DBMSCatalog.objects.get(
type=DBMSType.ORACLE, version='19.0.0.0.0').pk: Oracle19Parser(),
DBMSCatalog.objects.get(
type=DBMSType.MYSQL, version='5.7').pk: MySql57Parser()
}
try:
if dbms_id is None:
return Parser.__DBMS_UTILS_IMPLS
return Parser.__DBMS_UTILS_IMPLS[dbms_id]
except KeyError:
raise NotImplementedError(
'Implement me! ({})'.format(dbms_id))
@staticmethod
def parse_version_string(dbms_type, version_string):
for k, v in list(Parser.__utils(dbms_type).items()):
dbms = DBMSCatalog.objects.get(pk=k)
if dbms.type == dbms_type:
try:
return v.parse_version_string(version_string)
except AttributeError:
pass
return None
@staticmethod
def convert_dbms_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).convert_dbms_knobs(knobs)
@staticmethod
def convert_dbms_metrics(dbms_id, numeric_metrics, observation_time, target_objective=None):
return Parser.__utils(dbms_id).convert_dbms_metrics(
numeric_metrics, observation_time, target_objective)
@staticmethod
def parse_dbms_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).parse_dbms_knobs(knobs)
@staticmethod
def parse_dbms_metrics(dbms_id, metrics):
return Parser.__utils(dbms_id).parse_dbms_metrics(metrics)
@staticmethod
def get_nondefault_knob_settings(dbms_id, knobs):
return Parser.__utils(dbms_id).get_nondefault_knob_settings(knobs)
@staticmethod
def create_knob_configuration(dbms_id, tuning_knobs):
return Parser.__utils(dbms_id).create_knob_configuration(tuning_knobs)
@staticmethod
def format_dbms_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).format_dbms_knobs(knobs)
@staticmethod
def get_knob_configuration_filename(dbms_id):
return Parser.__utils(dbms_id).knob_configuration_filename
@staticmethod
def filter_numeric_metrics(dbms_id, metrics):
return Parser.__utils(dbms_id).filter_numeric_metrics(metrics)
@staticmethod
def filter_tunable_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).filter_tunable_knobs(knobs)
@staticmethod
def calculate_change_in_metrics(dbms_id, metrics_start, metrics_end):
return Parser.__utils(dbms_id).calculate_change_in_metrics(
metrics_start, metrics_end)
| 35.45045
| 96
| 0.662516
|
from website.models import DBMSCatalog
from website.types import DBMSType
from .myrocks import MyRocks56Parser
from .mysql import MySql57Parser
from .postgres import Postgres96Parser, PostgresOldParser
from .oracle import Oracle19Parser
class Parser(object):
__DBMS_UTILS_IMPLS = None
@staticmethod
def __utils(dbms_id=None):
if Parser.__DBMS_UTILS_IMPLS is None:
Parser.__DBMS_UTILS_IMPLS = {
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.3').pk: PostgresOldParser('9.3'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.2').pk: PostgresOldParser('9.2'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.6').pk: Postgres96Parser('9.6'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.4').pk: Postgres96Parser('9.4'),
DBMSCatalog.objects.get(
type=DBMSType.POSTGRES, version='9.5').pk: Postgres96Parser('9.5'),
DBMSCatalog.objects.get(
type=DBMSType.MYROCKS, version='5.6').pk: MyRocks56Parser(),
DBMSCatalog.objects.get(
type=DBMSType.ORACLE, version='19.0.0.0.0').pk: Oracle19Parser(),
DBMSCatalog.objects.get(
type=DBMSType.MYSQL, version='5.7').pk: MySql57Parser()
}
try:
if dbms_id is None:
return Parser.__DBMS_UTILS_IMPLS
return Parser.__DBMS_UTILS_IMPLS[dbms_id]
except KeyError:
raise NotImplementedError(
'Implement me! ({})'.format(dbms_id))
@staticmethod
def parse_version_string(dbms_type, version_string):
for k, v in list(Parser.__utils(dbms_type).items()):
dbms = DBMSCatalog.objects.get(pk=k)
if dbms.type == dbms_type:
try:
return v.parse_version_string(version_string)
except AttributeError:
pass
return None
@staticmethod
def convert_dbms_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).convert_dbms_knobs(knobs)
@staticmethod
def convert_dbms_metrics(dbms_id, numeric_metrics, observation_time, target_objective=None):
return Parser.__utils(dbms_id).convert_dbms_metrics(
numeric_metrics, observation_time, target_objective)
@staticmethod
def parse_dbms_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).parse_dbms_knobs(knobs)
@staticmethod
def parse_dbms_metrics(dbms_id, metrics):
return Parser.__utils(dbms_id).parse_dbms_metrics(metrics)
@staticmethod
def get_nondefault_knob_settings(dbms_id, knobs):
return Parser.__utils(dbms_id).get_nondefault_knob_settings(knobs)
@staticmethod
def create_knob_configuration(dbms_id, tuning_knobs):
return Parser.__utils(dbms_id).create_knob_configuration(tuning_knobs)
@staticmethod
def format_dbms_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).format_dbms_knobs(knobs)
@staticmethod
def get_knob_configuration_filename(dbms_id):
return Parser.__utils(dbms_id).knob_configuration_filename
@staticmethod
def filter_numeric_metrics(dbms_id, metrics):
return Parser.__utils(dbms_id).filter_numeric_metrics(metrics)
@staticmethod
def filter_tunable_knobs(dbms_id, knobs):
return Parser.__utils(dbms_id).filter_tunable_knobs(knobs)
@staticmethod
def calculate_change_in_metrics(dbms_id, metrics_start, metrics_end):
return Parser.__utils(dbms_id).calculate_change_in_metrics(
metrics_start, metrics_end)
| true
| true
|
f716194f5cc205b886a9dd79a6796056afa57b63
| 15,927
|
py
|
Python
|
old/fastai/structured.py
|
fjaragones/fastai
|
be48d209a4526191f71dc7adaef090828897b9ec
|
[
"Apache-2.0"
] | 2
|
2019-02-19T18:34:29.000Z
|
2019-12-09T17:51:41.000Z
|
old/fastai/structured.py
|
fjaragones/fastai
|
be48d209a4526191f71dc7adaef090828897b9ec
|
[
"Apache-2.0"
] | 4
|
2020-02-25T20:46:35.000Z
|
2022-02-26T04:45:55.000Z
|
old/fastai/structured.py
|
fjaragones/fastai
|
be48d209a4526191f71dc7adaef090828897b9ec
|
[
"Apache-2.0"
] | 1
|
2019-01-16T08:10:48.000Z
|
2019-01-16T08:10:48.000Z
|
from .imports import *
from sklearn_pandas import DataFrameMapper
from sklearn.preprocessing import LabelEncoder, Imputer, StandardScaler
from pandas.api.types import is_string_dtype, is_numeric_dtype
from sklearn.ensemble import forest
from sklearn.tree import export_graphviz
def set_plot_sizes(sml, med, big):
plt.rc('font', size=sml) # controls default text sizes
plt.rc('axes', titlesize=sml) # fontsize of the axes title
plt.rc('axes', labelsize=med) # fontsize of the x and y labels
plt.rc('xtick', labelsize=sml) # fontsize of the tick labels
plt.rc('ytick', labelsize=sml) # fontsize of the tick labels
plt.rc('legend', fontsize=sml) # legend fontsize
plt.rc('figure', titlesize=big) # fontsize of the figure title
def parallel_trees(m, fn, n_jobs=8):
return list(ProcessPoolExecutor(n_jobs).map(fn, m.estimators_))
def draw_tree(t, df, size=10, ratio=0.6, precision=0):
""" Draws a representation of a random forest in IPython.
Parameters:
-----------
t: The tree you wish to draw
df: The data used to train the tree. This is used to get the names of the features.
"""
s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,
special_characters=True, rotate=True, precision=precision)
IPython.display.display(graphviz.Source(re.sub('Tree {',
f'Tree {{ size={size}; ratio={ratio}', s)))
def combine_date(years, months=1, days=1, weeks=None, hours=None, minutes=None,
seconds=None, milliseconds=None, microseconds=None, nanoseconds=None):
years = np.asarray(years) - 1970
months = np.asarray(months) - 1
days = np.asarray(days) - 1
types = ('<M8[Y]', '<m8[M]', '<m8[D]', '<m8[W]', '<m8[h]',
'<m8[m]', '<m8[s]', '<m8[ms]', '<m8[us]', '<m8[ns]')
vals = (years, months, days, weeks, hours, minutes, seconds,
milliseconds, microseconds, nanoseconds)
return sum(np.asarray(v, dtype=t) for t, v in zip(types, vals)
if v is not None)
def get_sample(df,n):
""" Gets a random sample of n rows from df, without replacement.
Parameters:
-----------
df: A pandas data frame, that you wish to sample from.
n: The number of rows you wish to sample.
Returns:
--------
return value: A random sample of n rows of df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
>>> get_sample(df, 2)
col1 col2
1 2 b
2 3 a
"""
idxs = sorted(np.random.permutation(len(df))[:n])
return df.iloc[idxs].copy()
def add_datepart(df, fldname, drop=True, time=False, errors="raise"):
"""add_datepart converts a column of df from a datetime64 to many columns containing
the information from the date. This applies changes inplace.
Parameters:
-----------
df: A pandas data frame. df gain several new columns.
fldname: A string that is the name of the date column you wish to expand.
If it is not a datetime64 series, it will be converted to one with pd.to_datetime.
drop: If true then the original date column will be removed.
time: If true time features: Hour, Minute, Second will be added.
Examples:
---------
>>> df = pd.DataFrame({ 'A' : pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'], infer_datetime_format=False) })
>>> df
A
0 2000-03-11
1 2000-03-12
2 2000-03-13
>>> add_datepart(df, 'A')
>>> df
AYear AMonth AWeek ADay ADayofweek ADayofyear AIs_month_end AIs_month_start AIs_quarter_end AIs_quarter_start AIs_year_end AIs_year_start AElapsed
0 2000 3 10 11 5 71 False False False False False False 952732800
1 2000 3 10 12 6 72 False False False False False False 952819200
2 2000 3 11 13 0 73 False False False False False False 952905600
"""
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors)
targ_pre = re.sub('[Dd]ate$', '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9
if drop: df.drop(fldname, axis=1, inplace=True)
def is_date(x): return np.issubdtype(x.dtype, np.datetime64)
def train_cats(df):
"""Change any columns of strings in a panda's dataframe to a column of
categorical values. This applies the changes inplace.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category
"""
for n,c in df.items():
if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()
def apply_cats(df, trn):
"""Changes any columns of strings in df into categorical variables using trn as
a template for the category codes.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values. The category codes are determined by trn.
trn: A pandas dataframe. When creating a category for df, it looks up the
what the category's code were in trn and makes those the category codes
for df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category {a : 1, b : 2}
>>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})
>>> apply_cats(df2, df)
col1 col2
0 1 b
1 2 a
2 3 a
now the type of col is category {a : 1, b : 2}
"""
for n,c in df.items():
if (n in trn.columns) and (trn[n].dtype.name=='category'):
df[n] = c.astype('category').cat.as_ordered()
df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True)
def fix_missing(df, col, name, na_dict):
""" Fill missing data in a column of df with the median, and add a {name}_na column
which specifies if the data was missing.
Parameters:
-----------
df: The data frame that will be changed.
col: The column of data to fix by filling in missing data.
name: The name of the new filled column in df.
na_dict: A dictionary of values to create na's of and the value to insert. If
name is not a key of na_dict the median will fill any missing data. Also
if name is not a key of na_dict and there is no missing data in col, then
no {name}_na column is not created.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})
>>> df
col1 col2
0 1 5
1 nan 2
2 3 2
>>> fix_missing(df, df['col1'], 'col1', {})
>>> df
col1 col2 col1_na
0 1 5 False
1 2 2 True
2 3 2 False
>>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})
>>> df
col1 col2
0 1 5
1 nan 2
2 3 2
>>> fix_missing(df, df['col2'], 'col2', {})
>>> df
col1 col2
0 1 5
1 nan 2
2 3 2
>>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})
>>> df
col1 col2
0 1 5
1 nan 2
2 3 2
>>> fix_missing(df, df['col1'], 'col1', {'col1' : 500})
>>> df
col1 col2 col1_na
0 1 5 False
1 500 2 True
2 3 2 False
"""
if is_numeric_dtype(col):
if pd.isnull(col).sum() or (name in na_dict):
df[name+'_na'] = pd.isnull(col)
filler = na_dict[name] if name in na_dict else col.median()
df[name] = col.fillna(filler)
na_dict[name] = filler
return na_dict
def numericalize(df, col, name, max_n_cat):
""" Changes the column col from a categorical type to it's integer codes.
Parameters:
-----------
df: A pandas dataframe. df[name] will be filled with the integer codes from
col.
col: The column you wish to change into the categories.
name: The column name you wish to insert into df. This column will hold the
integer codes.
max_n_cat: If col has more categories than max_n_cat it will not change the
it to its integer codes. If max_n_cat is None, then col will always be
converted.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category { a : 1, b : 2}
>>> numericalize(df, df['col2'], 'col3', None)
col1 col2 col3
0 1 a 1
1 2 b 2
2 3 a 1
"""
if not is_numeric_dtype(col) and ( max_n_cat is None or len(col.cat.categories)>max_n_cat):
df[name] = col.cat.codes+1
def scale_vars(df, mapper):
warnings.filterwarnings('ignore', category=sklearn.exceptions.DataConversionWarning)
if mapper is None:
map_f = [([n],StandardScaler()) for n in df.columns if is_numeric_dtype(df[n])]
mapper = DataFrameMapper(map_f).fit(df)
df[mapper.transformed_names_] = mapper.transform(df)
return mapper
def proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None,
preproc_fn=None, max_n_cat=None, subset=None, mapper=None):
""" proc_df takes a data frame df and splits off the response variable, and
changes the df into an entirely numeric dataframe. For each column of df
which is not in skip_flds nor in ignore_flds, na values are replaced by the
median value of the column.
Parameters:
-----------
df: The data frame you wish to process.
y_fld: The name of the response variable
skip_flds: A list of fields that dropped from df.
ignore_flds: A list of fields that are ignored during processing.
do_scale: Standardizes each column in df. Takes Boolean Values(True,False)
na_dict: a dictionary of na columns to add. Na columns are also added if there
are any missing values.
preproc_fn: A function that gets applied to df.
max_n_cat: The maximum number of categories to break into dummy values, instead
of integer codes.
subset: Takes a random subset of size subset from df.
mapper: If do_scale is set as True, the mapper variable
calculates the values used for scaling of variables during training time (mean and standard deviation).
Returns:
--------
[x, y, nas, mapper(optional)]:
x: x is the transformed version of df. x will not have the response variable
and is entirely numeric.
y: y is the response variable
nas: returns a dictionary of which nas it created, and the associated median.
mapper: A DataFrameMapper which stores the mean and standard deviation of the corresponding continuous
variables which is then used for scaling of during test-time.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category { a : 1, b : 2}
>>> x, y, nas = proc_df(df, 'col1')
>>> x
col2
0 1
1 2
2 1
>>> data = DataFrame(pet=["cat", "dog", "dog", "fish", "cat", "dog", "cat", "fish"],
children=[4., 6, 3, 3, 2, 3, 5, 4],
salary=[90, 24, 44, 27, 32, 59, 36, 27])
>>> mapper = DataFrameMapper([(:pet, LabelBinarizer()),
([:children], StandardScaler())])
>>>round(fit_transform!(mapper, copy(data)), 2)
8x4 Array{Float64,2}:
1.0 0.0 0.0 0.21
0.0 1.0 0.0 1.88
0.0 1.0 0.0 -0.63
0.0 0.0 1.0 -0.63
1.0 0.0 0.0 -1.46
0.0 1.0 0.0 -0.63
1.0 0.0 0.0 1.04
0.0 0.0 1.0 0.21
"""
if not ignore_flds: ignore_flds=[]
if not skip_flds: skip_flds=[]
if subset: df = get_sample(df,subset)
else: df = df.copy()
ignored_flds = df.loc[:, ignore_flds]
df.drop(ignore_flds, axis=1, inplace=True)
if preproc_fn: preproc_fn(df)
if y_fld is None: y = None
else:
if not is_numeric_dtype(df[y_fld]): df[y_fld] = df[y_fld].cat.codes
y = df[y_fld].values
skip_flds += [y_fld]
df.drop(skip_flds, axis=1, inplace=True)
if na_dict is None: na_dict = {}
else: na_dict = na_dict.copy()
na_dict_initial = na_dict.copy()
for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)
if len(na_dict_initial.keys()) > 0:
df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True)
if do_scale: mapper = scale_vars(df, mapper)
for n,c in df.items(): numericalize(df, c, n, max_n_cat)
df = pd.get_dummies(df, dummy_na=True)
df = pd.concat([ignored_flds, df], axis=1)
res = [df, y, na_dict]
if do_scale: res = res + [mapper]
return res
def rf_feat_importance(m, df):
return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}
).sort_values('imp', ascending=False)
def set_rf_samples(n):
""" Changes Scikit learn's random forests to give each tree a random sample of
n random rows.
"""
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n))
def reset_rf_samples():
""" Undoes the changes produced by set_rf_samples.
"""
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n_samples))
def get_nn_mappers(df, cat_vars, contin_vars):
# Replace nulls with 0 for continuous, "" for categorical.
for v in contin_vars: df[v] = df[v].fillna(df[v].max()+100,)
for v in cat_vars: df[v].fillna('#NA#', inplace=True)
# list of tuples, containing variable and instance of a transformer for that variable
# for categoricals, use LabelEncoder to map to integers. For continuous, standardize
cat_maps = [(o, LabelEncoder()) for o in cat_vars]
contin_maps = [([o], StandardScaler()) for o in contin_vars]
return DataFrameMapper(cat_maps).fit(df), DataFrameMapper(contin_maps).fit(df)
| 32.975155
| 155
| 0.585547
|
from .imports import *
from sklearn_pandas import DataFrameMapper
from sklearn.preprocessing import LabelEncoder, Imputer, StandardScaler
from pandas.api.types import is_string_dtype, is_numeric_dtype
from sklearn.ensemble import forest
from sklearn.tree import export_graphviz
def set_plot_sizes(sml, med, big):
plt.rc('font', size=sml)
plt.rc('axes', titlesize=sml)
plt.rc('axes', labelsize=med)
plt.rc('xtick', labelsize=sml)
plt.rc('ytick', labelsize=sml)
plt.rc('legend', fontsize=sml)
plt.rc('figure', titlesize=big)
def parallel_trees(m, fn, n_jobs=8):
return list(ProcessPoolExecutor(n_jobs).map(fn, m.estimators_))
def draw_tree(t, df, size=10, ratio=0.6, precision=0):
s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,
special_characters=True, rotate=True, precision=precision)
IPython.display.display(graphviz.Source(re.sub('Tree {',
f'Tree {{ size={size}; ratio={ratio}', s)))
def combine_date(years, months=1, days=1, weeks=None, hours=None, minutes=None,
seconds=None, milliseconds=None, microseconds=None, nanoseconds=None):
years = np.asarray(years) - 1970
months = np.asarray(months) - 1
days = np.asarray(days) - 1
types = ('<M8[Y]', '<m8[M]', '<m8[D]', '<m8[W]', '<m8[h]',
'<m8[m]', '<m8[s]', '<m8[ms]', '<m8[us]', '<m8[ns]')
vals = (years, months, days, weeks, hours, minutes, seconds,
milliseconds, microseconds, nanoseconds)
return sum(np.asarray(v, dtype=t) for t, v in zip(types, vals)
if v is not None)
def get_sample(df,n):
idxs = sorted(np.random.permutation(len(df))[:n])
return df.iloc[idxs].copy()
def add_datepart(df, fldname, drop=True, time=False, errors="raise"):
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors)
targ_pre = re.sub('[Dd]ate$', '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9
if drop: df.drop(fldname, axis=1, inplace=True)
def is_date(x): return np.issubdtype(x.dtype, np.datetime64)
def train_cats(df):
for n,c in df.items():
if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()
def apply_cats(df, trn):
for n,c in df.items():
if (n in trn.columns) and (trn[n].dtype.name=='category'):
df[n] = c.astype('category').cat.as_ordered()
df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True)
def fix_missing(df, col, name, na_dict):
if is_numeric_dtype(col):
if pd.isnull(col).sum() or (name in na_dict):
df[name+'_na'] = pd.isnull(col)
filler = na_dict[name] if name in na_dict else col.median()
df[name] = col.fillna(filler)
na_dict[name] = filler
return na_dict
def numericalize(df, col, name, max_n_cat):
if not is_numeric_dtype(col) and ( max_n_cat is None or len(col.cat.categories)>max_n_cat):
df[name] = col.cat.codes+1
def scale_vars(df, mapper):
warnings.filterwarnings('ignore', category=sklearn.exceptions.DataConversionWarning)
if mapper is None:
map_f = [([n],StandardScaler()) for n in df.columns if is_numeric_dtype(df[n])]
mapper = DataFrameMapper(map_f).fit(df)
df[mapper.transformed_names_] = mapper.transform(df)
return mapper
def proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None,
preproc_fn=None, max_n_cat=None, subset=None, mapper=None):
if not ignore_flds: ignore_flds=[]
if not skip_flds: skip_flds=[]
if subset: df = get_sample(df,subset)
else: df = df.copy()
ignored_flds = df.loc[:, ignore_flds]
df.drop(ignore_flds, axis=1, inplace=True)
if preproc_fn: preproc_fn(df)
if y_fld is None: y = None
else:
if not is_numeric_dtype(df[y_fld]): df[y_fld] = df[y_fld].cat.codes
y = df[y_fld].values
skip_flds += [y_fld]
df.drop(skip_flds, axis=1, inplace=True)
if na_dict is None: na_dict = {}
else: na_dict = na_dict.copy()
na_dict_initial = na_dict.copy()
for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)
if len(na_dict_initial.keys()) > 0:
df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True)
if do_scale: mapper = scale_vars(df, mapper)
for n,c in df.items(): numericalize(df, c, n, max_n_cat)
df = pd.get_dummies(df, dummy_na=True)
df = pd.concat([ignored_flds, df], axis=1)
res = [df, y, na_dict]
if do_scale: res = res + [mapper]
return res
def rf_feat_importance(m, df):
return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}
).sort_values('imp', ascending=False)
def set_rf_samples(n):
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n))
def reset_rf_samples():
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n_samples))
def get_nn_mappers(df, cat_vars, contin_vars):
for v in contin_vars: df[v] = df[v].fillna(df[v].max()+100,)
for v in cat_vars: df[v].fillna('#NA#', inplace=True)
cat_maps = [(o, LabelEncoder()) for o in cat_vars]
contin_maps = [([o], StandardScaler()) for o in contin_vars]
return DataFrameMapper(cat_maps).fit(df), DataFrameMapper(contin_maps).fit(df)
| true
| true
|
f7161a30a649ebcfed2157ecee02dcc94672948b
| 4,221
|
py
|
Python
|
mighty/trainer/autoencoder.py
|
dizcza/pytorch-mighty
|
942c53b529377c9100bffc2f7f20ec740763e6ae
|
[
"BSD-3-Clause"
] | 1
|
2020-11-14T20:15:07.000Z
|
2020-11-14T20:15:07.000Z
|
mighty/trainer/autoencoder.py
|
dizcza/pytorch-mighty
|
942c53b529377c9100bffc2f7f20ec740763e6ae
|
[
"BSD-3-Clause"
] | null | null | null |
mighty/trainer/autoencoder.py
|
dizcza/pytorch-mighty
|
942c53b529377c9100bffc2f7f20ec740763e6ae
|
[
"BSD-3-Clause"
] | 2
|
2021-01-15T05:52:53.000Z
|
2021-03-26T17:41:17.000Z
|
from typing import Union
import torch
import torch.nn as nn
import torch.utils.data
from torch.optim.lr_scheduler import _LRScheduler, ReduceLROnPlateau
from torch.optim.optimizer import Optimizer
from mighty.loss import LossPenalty
from mighty.models import AutoencoderLinear
from mighty.monitor.monitor import MonitorAutoencoder
from mighty.utils.var_online import MeanOnline
from mighty.utils.signal import peak_to_signal_noise_ratio
from mighty.utils.common import input_from_batch, batch_to_cuda
from mighty.utils.data import DataLoader
from .embedding import TrainerEmbedding
__all__ = [
"TrainerAutoencoder"
]
class TrainerAutoencoder(TrainerEmbedding):
"""
An unsupervised AutoEncoder trainer that not only transforms inputs to
meaningful embeddings but also aims to restore the input signal from it.
Parameters
----------
model : nn.Module
A neural network to train.
criterion : nn.Module
A loss function.
data_loader : DataLoader
A data loader.
optimizer : Optimizer
An optimizer (Adam, SGD, etc.).
scheduler : _LRScheduler or ReduceLROnPlateau, or None
A learning rate scheduler.
Default: None
accuracy_measure : AccuracyEmbedding, optional
Calculates the accuracy of embedding vectors.
Default: ``AccuracyEmbedding()``
**kwargs
Passed to the base class.
"""
watch_modules = TrainerEmbedding.watch_modules + (AutoencoderLinear,)
def __init__(self,
model: nn.Module,
criterion: nn.Module,
data_loader: DataLoader,
optimizer: Optimizer,
scheduler: Union[_LRScheduler, ReduceLROnPlateau] = None,
**kwargs):
super().__init__(model, criterion=criterion, data_loader=data_loader,
optimizer=optimizer, scheduler=scheduler, **kwargs)
def _init_monitor(self, mutual_info) -> MonitorAutoencoder:
monitor = MonitorAutoencoder(
mutual_info=mutual_info,
normalize_inverse=self.data_loader.normalize_inverse
)
return monitor
def _init_online_measures(self):
online = super()._init_online_measures()
# peak signal-to-noise ratio
online['psnr-train'] = MeanOnline()
online['psnr-test'] = MeanOnline()
return online
def _get_loss(self, batch, output):
input = input_from_batch(batch)
latent, reconstructed = output
if isinstance(self.criterion, LossPenalty):
loss = self.criterion(reconstructed, input, latent)
else:
loss = self.criterion(reconstructed, input)
return loss
def _on_forward_pass_batch(self, batch, output, train):
input = input_from_batch(batch)
latent, reconstructed = output
if isinstance(self.criterion, nn.BCEWithLogitsLoss):
reconstructed = reconstructed.sigmoid()
psnr = peak_to_signal_noise_ratio(input, reconstructed)
fold = 'train' if train else 'test'
if torch.isfinite(psnr):
self.online[f'psnr-{fold}'].update(psnr.cpu())
super()._on_forward_pass_batch(batch, latent, train)
def _epoch_finished(self, loss):
self.plot_autoencoder()
for fold in ('train', 'test'):
self.monitor.plot_psnr(self.online[f'psnr-{fold}'].get_mean(),
mode=fold)
super()._epoch_finished(loss)
def plot_autoencoder(self):
"""
Plots AutoEncoder reconstruction.
"""
batch = self.data_loader.sample()
batch = batch_to_cuda(batch)
mode_saved = self.model.training
self.model.train(False)
with torch.no_grad():
latent, reconstructed = self._forward(batch)
if isinstance(self.criterion, nn.BCEWithLogitsLoss):
reconstructed = reconstructed.sigmoid()
self._plot_autoencoder(batch, reconstructed)
self.model.train(mode_saved)
def _plot_autoencoder(self, batch, reconstructed, mode='train'):
input = input_from_batch(batch)
self.monitor.plot_autoencoder(input, reconstructed, mode=mode)
| 34.317073
| 77
| 0.662876
|
from typing import Union
import torch
import torch.nn as nn
import torch.utils.data
from torch.optim.lr_scheduler import _LRScheduler, ReduceLROnPlateau
from torch.optim.optimizer import Optimizer
from mighty.loss import LossPenalty
from mighty.models import AutoencoderLinear
from mighty.monitor.monitor import MonitorAutoencoder
from mighty.utils.var_online import MeanOnline
from mighty.utils.signal import peak_to_signal_noise_ratio
from mighty.utils.common import input_from_batch, batch_to_cuda
from mighty.utils.data import DataLoader
from .embedding import TrainerEmbedding
__all__ = [
"TrainerAutoencoder"
]
class TrainerAutoencoder(TrainerEmbedding):
watch_modules = TrainerEmbedding.watch_modules + (AutoencoderLinear,)
def __init__(self,
model: nn.Module,
criterion: nn.Module,
data_loader: DataLoader,
optimizer: Optimizer,
scheduler: Union[_LRScheduler, ReduceLROnPlateau] = None,
**kwargs):
super().__init__(model, criterion=criterion, data_loader=data_loader,
optimizer=optimizer, scheduler=scheduler, **kwargs)
def _init_monitor(self, mutual_info) -> MonitorAutoencoder:
monitor = MonitorAutoencoder(
mutual_info=mutual_info,
normalize_inverse=self.data_loader.normalize_inverse
)
return monitor
def _init_online_measures(self):
online = super()._init_online_measures()
online['psnr-train'] = MeanOnline()
online['psnr-test'] = MeanOnline()
return online
def _get_loss(self, batch, output):
input = input_from_batch(batch)
latent, reconstructed = output
if isinstance(self.criterion, LossPenalty):
loss = self.criterion(reconstructed, input, latent)
else:
loss = self.criterion(reconstructed, input)
return loss
def _on_forward_pass_batch(self, batch, output, train):
input = input_from_batch(batch)
latent, reconstructed = output
if isinstance(self.criterion, nn.BCEWithLogitsLoss):
reconstructed = reconstructed.sigmoid()
psnr = peak_to_signal_noise_ratio(input, reconstructed)
fold = 'train' if train else 'test'
if torch.isfinite(psnr):
self.online[f'psnr-{fold}'].update(psnr.cpu())
super()._on_forward_pass_batch(batch, latent, train)
def _epoch_finished(self, loss):
self.plot_autoencoder()
for fold in ('train', 'test'):
self.monitor.plot_psnr(self.online[f'psnr-{fold}'].get_mean(),
mode=fold)
super()._epoch_finished(loss)
def plot_autoencoder(self):
batch = self.data_loader.sample()
batch = batch_to_cuda(batch)
mode_saved = self.model.training
self.model.train(False)
with torch.no_grad():
latent, reconstructed = self._forward(batch)
if isinstance(self.criterion, nn.BCEWithLogitsLoss):
reconstructed = reconstructed.sigmoid()
self._plot_autoencoder(batch, reconstructed)
self.model.train(mode_saved)
def _plot_autoencoder(self, batch, reconstructed, mode='train'):
input = input_from_batch(batch)
self.monitor.plot_autoencoder(input, reconstructed, mode=mode)
| true
| true
|
f7161b6e4e31964b3a9005f00d17ab5c36f84872
| 119
|
py
|
Python
|
managePHP/apps.py
|
uzairAK/serverom-panel
|
3dcde05ad618e6bef280db7d3180f926fe2ab1db
|
[
"MIT"
] | null | null | null |
managePHP/apps.py
|
uzairAK/serverom-panel
|
3dcde05ad618e6bef280db7d3180f926fe2ab1db
|
[
"MIT"
] | null | null | null |
managePHP/apps.py
|
uzairAK/serverom-panel
|
3dcde05ad618e6bef280db7d3180f926fe2ab1db
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.apps import AppConfig
class ManagephpConfig(AppConfig):
name = 'managePHP'
| 13.222222
| 33
| 0.680672
|
from django.apps import AppConfig
class ManagephpConfig(AppConfig):
name = 'managePHP'
| true
| true
|
f7161b7902bab32f86e6014239d17115293e71f9
| 472
|
py
|
Python
|
home/migrations/0009_userprofile_image.py
|
VSevagen/ProctOS
|
a34124b0a5d152e30c064c8ed801e7af894eb04a
|
[
"MIT"
] | null | null | null |
home/migrations/0009_userprofile_image.py
|
VSevagen/ProctOS
|
a34124b0a5d152e30c064c8ed801e7af894eb04a
|
[
"MIT"
] | null | null | null |
home/migrations/0009_userprofile_image.py
|
VSevagen/ProctOS
|
a34124b0a5d152e30c064c8ed801e7af894eb04a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-04-29 17:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0008_userprofile_job'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='image',
field=models.ImageField(blank=True, upload_to='profile_image'),
),
]
| 22.47619
| 75
| 0.622881
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0008_userprofile_job'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='image',
field=models.ImageField(blank=True, upload_to='profile_image'),
),
]
| true
| true
|
f7161c240b0c336a9c97d362b4c36a3bed371c38
| 741
|
py
|
Python
|
leetcode/26.remove-duplicates-from-sorted-array.py
|
geemaple/algorithm
|
68bc5032e1ee52c22ef2f2e608053484c487af54
|
[
"MIT"
] | 177
|
2017-08-21T08:57:43.000Z
|
2020-06-22T03:44:22.000Z
|
leetcode/26.remove-duplicates-from-sorted-array.py
|
geemaple/algorithm
|
68bc5032e1ee52c22ef2f2e608053484c487af54
|
[
"MIT"
] | 2
|
2018-09-06T13:39:12.000Z
|
2019-06-03T02:54:45.000Z
|
leetcode/26.remove-duplicates-from-sorted-array.py
|
geemaple/algorithm
|
68bc5032e1ee52c22ef2f2e608053484c487af54
|
[
"MIT"
] | 23
|
2017-08-23T06:01:28.000Z
|
2020-04-20T03:17:36.000Z
|
class Solution(object):
def removeDuplicates(self, nums: List[int]) -> int:
i = 0
for j in range(len(nums)):
if (i == 0 or nums[i - 1] < nums[j]):
nums[i] = nums[j]
i += 1
return i
class Solution2:
def removeDuplicates(self, nums: List[int]) -> int:
slow = fast = 0
while fast < len(nums):
while fast + 1 < len(nums) and nums[fast] == nums[fast + 1]:
fast += 1
if nums[slow] < nums[fast]:
nums[slow], nums[fast] = nums[fast], nums[slow]
slow += 1
fast += 1
return slow
| 26.464286
| 72
| 0.404858
|
class Solution(object):
def removeDuplicates(self, nums: List[int]) -> int:
i = 0
for j in range(len(nums)):
if (i == 0 or nums[i - 1] < nums[j]):
nums[i] = nums[j]
i += 1
return i
class Solution2:
def removeDuplicates(self, nums: List[int]) -> int:
slow = fast = 0
while fast < len(nums):
while fast + 1 < len(nums) and nums[fast] == nums[fast + 1]:
fast += 1
if nums[slow] < nums[fast]:
nums[slow], nums[fast] = nums[fast], nums[slow]
slow += 1
fast += 1
return slow
| true
| true
|
f7161caf9548a67532b6022bdbc9d88b81af26b2
| 479
|
py
|
Python
|
code/proto/copyfiles.py
|
KasumiL5x/atom
|
90262f59e56a829017f95f297c1a6701fc4e200e
|
[
"MIT"
] | null | null | null |
code/proto/copyfiles.py
|
KasumiL5x/atom
|
90262f59e56a829017f95f297c1a6701fc4e200e
|
[
"MIT"
] | null | null | null |
code/proto/copyfiles.py
|
KasumiL5x/atom
|
90262f59e56a829017f95f297c1a6701fc4e200e
|
[
"MIT"
] | null | null | null |
from distutils.dir_util import copy_tree
import shutil
import os
def prepare_directory(path):
shutil.rmtree(path, ignore_errors=True) # remove existing folder
if not os.path.isdir(path): # create folder
os.mkdir(path)
# copy resulting files into correct folders
print 'Copying files...',
in_out_dirs = [['./cpp', '../atom/src/proto'], ['./cs', '../client/AtomClientDX/proto']]
for curr in in_out_dirs:
prepare_directory(curr[1])
copy_tree(curr[0], curr[1])
print 'done!'
| 28.176471
| 88
| 0.730689
|
from distutils.dir_util import copy_tree
import shutil
import os
def prepare_directory(path):
shutil.rmtree(path, ignore_errors=True)
if not os.path.isdir(path):
os.mkdir(path)
print 'Copying files...',
in_out_dirs = [['./cpp', '../atom/src/proto'], ['./cs', '../client/AtomClientDX/proto']]
for curr in in_out_dirs:
prepare_directory(curr[1])
copy_tree(curr[0], curr[1])
print 'done!'
| false
| true
|
f7161cda1bfd467e4b31401b02eeb1c3116488de
| 490
|
py
|
Python
|
UnitsOfWork/ConfusionMatrixUnitOfWork.py
|
tzouvanas/bio-informatics
|
f21d1786759fcdd03481f8ee8044130cf354ad7c
|
[
"MIT"
] | null | null | null |
UnitsOfWork/ConfusionMatrixUnitOfWork.py
|
tzouvanas/bio-informatics
|
f21d1786759fcdd03481f8ee8044130cf354ad7c
|
[
"MIT"
] | 1
|
2020-06-18T08:56:54.000Z
|
2020-06-24T22:50:25.000Z
|
UnitsOfWork/ConfusionMatrixUnitOfWork.py
|
tzouvanas/bio-informatics
|
f21d1786759fcdd03481f8ee8044130cf354ad7c
|
[
"MIT"
] | 1
|
2022-02-25T05:36:55.000Z
|
2022-02-25T05:36:55.000Z
|
import numpy as np
from matrices.ConfusionMatrix import ConfusionMatrix
class ConfusionMatrixUnitOfWork:
def go(self):
cm = ConfusionMatrix(4)
cm.loadRow([70, 10, 15, 5])
cm.loadRow([8, 67, 20, 5])
cm.loadRow([0, 11, 88, 1])
cm.loadRow([4, 10, 14, 72])
cm.printStatsOf(0)
cm.printStatsOf(1)
cm.printStatsOf(2)
cm.printStatsOf(3)
print(cm.totalSensitivity())
print(cm.totalSpecificity())
| 25.789474
| 53
| 0.587755
|
import numpy as np
from matrices.ConfusionMatrix import ConfusionMatrix
class ConfusionMatrixUnitOfWork:
def go(self):
cm = ConfusionMatrix(4)
cm.loadRow([70, 10, 15, 5])
cm.loadRow([8, 67, 20, 5])
cm.loadRow([0, 11, 88, 1])
cm.loadRow([4, 10, 14, 72])
cm.printStatsOf(0)
cm.printStatsOf(1)
cm.printStatsOf(2)
cm.printStatsOf(3)
print(cm.totalSensitivity())
print(cm.totalSpecificity())
| true
| true
|
f7161f0450fc301397ce147c5b5b2aada8108f6e
| 1,450
|
py
|
Python
|
apps/sushi/tests/conftest.py
|
techlib/czechelib-stats
|
ca132e326af0924740a525710474870b1fb5fd37
|
[
"MIT"
] | 1
|
2019-12-12T15:38:42.000Z
|
2019-12-12T15:38:42.000Z
|
apps/sushi/tests/conftest.py
|
techlib/czechelib-stats
|
ca132e326af0924740a525710474870b1fb5fd37
|
[
"MIT"
] | null | null | null |
apps/sushi/tests/conftest.py
|
techlib/czechelib-stats
|
ca132e326af0924740a525710474870b1fb5fd37
|
[
"MIT"
] | null | null | null |
import pytest
from core.models import UL_ORG_ADMIN
from sushi.models import CounterReportType, SushiCredentials
from organizations.tests.conftest import organizations # noqa
from publications.tests.conftest import platforms # noqa
from logs.tests.conftest import report_type_nd # noqa
@pytest.fixture()
def counter_report_type_named(report_type_nd):
def fn(name, version=5):
rt = report_type_nd(0, short_name=name + 'rt')
return CounterReportType.objects.create(
code=name, counter_version=version, name=name + ' title', report_type=rt
)
yield fn
@pytest.fixture()
def counter_report_type(report_type_nd):
report_type = report_type_nd(0)
yield CounterReportType.objects.create(
code='TR', counter_version=5, name='Title report', report_type=report_type
)
@pytest.fixture()
def counter_report_type_wrap_report_type(report_type_nd):
def fun(report_type, code='TR', counter_version=5, name='Title report'):
return CounterReportType.objects.create(
code=code, counter_version=counter_version, name=name, report_type=report_type
)
return fun
@pytest.fixture()
def credentials(organizations, platforms):
credentials = SushiCredentials.objects.create(
organization=organizations[0],
platform=platforms[0],
counter_version=5,
lock_level=UL_ORG_ADMIN,
url='http://a.b.c/',
)
yield credentials
| 29.591837
| 90
| 0.722069
|
import pytest
from core.models import UL_ORG_ADMIN
from sushi.models import CounterReportType, SushiCredentials
from organizations.tests.conftest import organizations
from publications.tests.conftest import platforms
from logs.tests.conftest import report_type_nd
@pytest.fixture()
def counter_report_type_named(report_type_nd):
def fn(name, version=5):
rt = report_type_nd(0, short_name=name + 'rt')
return CounterReportType.objects.create(
code=name, counter_version=version, name=name + ' title', report_type=rt
)
yield fn
@pytest.fixture()
def counter_report_type(report_type_nd):
report_type = report_type_nd(0)
yield CounterReportType.objects.create(
code='TR', counter_version=5, name='Title report', report_type=report_type
)
@pytest.fixture()
def counter_report_type_wrap_report_type(report_type_nd):
def fun(report_type, code='TR', counter_version=5, name='Title report'):
return CounterReportType.objects.create(
code=code, counter_version=counter_version, name=name, report_type=report_type
)
return fun
@pytest.fixture()
def credentials(organizations, platforms):
credentials = SushiCredentials.objects.create(
organization=organizations[0],
platform=platforms[0],
counter_version=5,
lock_level=UL_ORG_ADMIN,
url='http://a.b.c/',
)
yield credentials
| true
| true
|
f7161f4dccd9eaa9ca79cf77012d48452c1d866f
| 11,252
|
py
|
Python
|
chalice/deploy/swagger.py
|
devangmehta123/chalice
|
9cba1bff604871c03c179e0b4be94d59a93ba198
|
[
"Apache-2.0"
] | null | null | null |
chalice/deploy/swagger.py
|
devangmehta123/chalice
|
9cba1bff604871c03c179e0b4be94d59a93ba198
|
[
"Apache-2.0"
] | null | null | null |
chalice/deploy/swagger.py
|
devangmehta123/chalice
|
9cba1bff604871c03c179e0b4be94d59a93ba198
|
[
"Apache-2.0"
] | null | null | null |
import copy
import inspect
from typing import Any, List, Dict, Optional, Union # noqa
from chalice.app import Chalice, RouteEntry, Authorizer, CORSConfig # noqa
from chalice.app import ChaliceAuthorizer
from chalice.deploy.planner import StringFormat
from chalice.deploy.models import RestAPI # noqa
from chalice.utils import to_cfn_resource_name
class SwaggerGenerator(object):
_BASE_TEMPLATE = {
'swagger': '2.0',
'info': {
'version': '1.0',
'title': ''
},
'schemes': ['https'],
'paths': {},
'definitions': {
'Empty': {
'type': 'object',
'title': 'Empty Schema',
}
}
} # type: Dict[str, Any]
def __init__(self, region, deployed_resources):
# type: (str, Dict[str, Any]) -> None
self._region = region
self._deployed_resources = deployed_resources
def generate_swagger(self, app, rest_api=None):
# type: (Chalice, Optional[RestAPI]) -> Dict[str, Any]
api = copy.deepcopy(self._BASE_TEMPLATE)
api['info']['title'] = app.app_name
self._add_binary_types(api, app)
self._add_route_paths(api, app)
self._add_resource_policy(api, rest_api)
return api
def _add_resource_policy(self, api, rest_api):
# type: (Dict[str, Any], Optional[RestAPI]) -> None
if rest_api and rest_api.policy:
api['x-amazon-apigateway-policy'] = rest_api.policy.document
def _add_binary_types(self, api, app):
# type: (Dict[str, Any], Chalice) -> None
api['x-amazon-apigateway-binary-media-types'] = app.api.binary_types
def _add_route_paths(self, api, app):
# type: (Dict[str, Any], Chalice) -> None
for path, methods in app.routes.items():
swagger_for_path = {} # type: Dict[str, Any]
api['paths'][path] = swagger_for_path
cors_config = None
methods_with_cors = []
for http_method, view in methods.items():
current = self._generate_route_method(view)
if 'security' in current:
self._add_to_security_definition(
current['security'], api, view)
swagger_for_path[http_method.lower()] = current
if view.cors is not None:
cors_config = view.cors
methods_with_cors.append(http_method)
# Chalice ensures that routes with multiple views have the same
# CORS configuration. So if any entry has CORS enabled, use that
# entry's CORS configuration for the preflight setup.
if cors_config is not None:
self._add_preflight_request(
cors_config, methods_with_cors, swagger_for_path)
def _generate_security_from_auth_obj(self, api_config, authorizer):
# type: (Dict[str, Any], Authorizer) -> None
if isinstance(authorizer, ChaliceAuthorizer):
auth_config = authorizer.config
config = {
'in': 'header',
'type': 'apiKey',
'name': 'Authorization',
'x-amazon-apigateway-authtype': 'custom'
}
api_gateway_authorizer = {
'type': 'token',
'authorizerUri': self._auth_uri(authorizer)
}
if auth_config.execution_role is not None:
api_gateway_authorizer['authorizerCredentials'] = \
auth_config.execution_role
if auth_config.ttl_seconds is not None:
api_gateway_authorizer['authorizerResultTtlInSeconds'] = \
auth_config.ttl_seconds
config['x-amazon-apigateway-authorizer'] = api_gateway_authorizer
else:
config = authorizer.to_swagger()
api_config.setdefault(
'securityDefinitions', {})[authorizer.name] = config
def _auth_uri(self, authorizer):
# type: (ChaliceAuthorizer) -> str
function_name = '%s-%s' % (
self._deployed_resources['api_handler_name'],
authorizer.config.name
)
return self._uri(
self._deployed_resources['lambda_functions'][function_name]['arn'])
def _add_to_security_definition(self, security,
api_config, view):
# type: (Any, Dict[str, Any], RouteEntry) -> None
if view.authorizer is not None:
self._generate_security_from_auth_obj(api_config, view.authorizer)
for auth in security:
name = list(auth.keys())[0]
if name == 'api_key':
# This is just the api_key_required=True config
swagger_snippet = {
'type': 'apiKey',
'name': 'x-api-key',
'in': 'header',
} # type: Dict[str, Any]
api_config.setdefault(
'securityDefinitions', {})[name] = swagger_snippet
def _generate_route_method(self, view):
# type: (RouteEntry) -> Dict[str, Any]
current = {
'consumes': view.content_types,
'produces': ['application/json'],
'responses': self._generate_precanned_responses(),
'x-amazon-apigateway-integration': self._generate_apig_integ(
view),
} # type: Dict[str, Any]
docstring = inspect.getdoc(view.view_function)
if docstring:
doc_lines = docstring.splitlines()
current['summary'] = doc_lines[0]
if len(doc_lines) > 1:
current['description'] = '\n'.join(doc_lines[1:]).strip('\n')
if view.api_key_required:
# When this happens we also have to add the relevant portions
# to the security definitions. We have to someone indicate
# this because this neeeds to be added to the global config
# file.
current.setdefault('security', []).append({'api_key': []})
if view.authorizer:
current.setdefault('security', []).append(
{view.authorizer.name: view.authorizer.scopes})
if view.view_args:
self._add_view_args(current, view.view_args)
return current
def _generate_precanned_responses(self):
# type: () -> Dict[str, Any]
responses = {
'200': {
'description': '200 response',
'schema': {
'$ref': '#/definitions/Empty',
}
}
}
return responses
def _uri(self, lambda_arn=None):
# type: (Optional[str]) -> Any
if lambda_arn is None:
lambda_arn = self._deployed_resources['api_handler_arn']
return ('arn:aws:apigateway:{region}:lambda:path/2015-03-31'
'/functions/{lambda_arn}/invocations').format(
region=self._region, lambda_arn=lambda_arn)
def _generate_apig_integ(self, view):
# type: (RouteEntry) -> Dict[str, Any]
apig_integ = {
'responses': {
'default': {
'statusCode': "200",
}
},
'uri': self._uri(),
'passthroughBehavior': 'when_no_match',
'httpMethod': 'POST',
'contentHandling': 'CONVERT_TO_TEXT',
'type': 'aws_proxy',
}
return apig_integ
def _add_view_args(self, single_method, view_args):
# type: (Dict[str, Any], List[str]) -> None
single_method['parameters'] = [
{'name': name, 'in': 'path', 'required': True, 'type': 'string'}
for name in view_args
]
def _add_preflight_request(self, cors, methods, swagger_for_path):
# type: (CORSConfig, List[str], Dict[str, Any]) -> None
methods = methods + ['OPTIONS']
allowed_methods = ','.join(methods)
response_params = {
'Access-Control-Allow-Methods': '%s' % allowed_methods
}
response_params.update(cors.get_access_control_headers())
headers = {k: {'type': 'string'} for k, _ in response_params.items()}
response_params = {'method.response.header.%s' % k: "'%s'" % v for k, v
in response_params.items()}
options_request = {
"consumes": ["application/json"],
"produces": ["application/json"],
"responses": {
"200": {
"description": "200 response",
"schema": {"$ref": "#/definitions/Empty"},
"headers": headers
}
},
"x-amazon-apigateway-integration": {
"responses": {
"default": {
"statusCode": "200",
"responseParameters": response_params,
}
},
"requestTemplates": {
"application/json": "{\"statusCode\": 200}"
},
"passthroughBehavior": "when_no_match",
"type": "mock",
"contentHandling": "CONVERT_TO_TEXT"
}
}
swagger_for_path['options'] = options_request
class CFNSwaggerGenerator(SwaggerGenerator):
def __init__(self):
# type: () -> None
pass
def _uri(self, lambda_arn=None):
# type: (Optional[str]) -> Any
return {
'Fn::Sub': (
'arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31'
'/functions/${APIHandler.Arn}/invocations'
)
}
def _auth_uri(self, authorizer):
# type: (ChaliceAuthorizer) -> Any
return {
'Fn::Sub': (
'arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31'
'/functions/${%s.Arn}/invocations' % to_cfn_resource_name(
authorizer.name)
)
}
class TemplatedSwaggerGenerator(SwaggerGenerator):
def __init__(self):
# type: () -> None
pass
def _uri(self, lambda_arn=None):
# type: (Optional[str]) -> Any
return StringFormat(
'arn:aws:apigateway:{region_name}:lambda:path/2015-03-31'
'/functions/{api_handler_lambda_arn}/invocations',
['region_name', 'api_handler_lambda_arn'],
)
def _auth_uri(self, authorizer):
# type: (ChaliceAuthorizer) -> Any
varname = '%s_lambda_arn' % authorizer.name
return StringFormat(
'arn:aws:apigateway:{region_name}:lambda:path/2015-03-31'
'/functions/{%s}/invocations' % varname,
['region_name', varname],
)
class TerraformSwaggerGenerator(SwaggerGenerator):
def __init__(self):
# type: () -> None
pass
def _uri(self, lambda_arn=None):
# type: (Optional[str]) -> Any
return '${aws_lambda_function.api_handler.invoke_arn}'
def _auth_uri(self, authorizer):
# type: (ChaliceAuthorizer) -> Any
return '${aws_lambda_function.%s.invoke_arn}' % (authorizer.name)
| 36.891803
| 79
| 0.546481
|
import copy
import inspect
from typing import Any, List, Dict, Optional, Union
from chalice.app import Chalice, RouteEntry, Authorizer, CORSConfig
from chalice.app import ChaliceAuthorizer
from chalice.deploy.planner import StringFormat
from chalice.deploy.models import RestAPI
from chalice.utils import to_cfn_resource_name
class SwaggerGenerator(object):
_BASE_TEMPLATE = {
'swagger': '2.0',
'info': {
'version': '1.0',
'title': ''
},
'schemes': ['https'],
'paths': {},
'definitions': {
'Empty': {
'type': 'object',
'title': 'Empty Schema',
}
}
}
def __init__(self, region, deployed_resources):
self._region = region
self._deployed_resources = deployed_resources
def generate_swagger(self, app, rest_api=None):
api = copy.deepcopy(self._BASE_TEMPLATE)
api['info']['title'] = app.app_name
self._add_binary_types(api, app)
self._add_route_paths(api, app)
self._add_resource_policy(api, rest_api)
return api
def _add_resource_policy(self, api, rest_api):
if rest_api and rest_api.policy:
api['x-amazon-apigateway-policy'] = rest_api.policy.document
def _add_binary_types(self, api, app):
api['x-amazon-apigateway-binary-media-types'] = app.api.binary_types
def _add_route_paths(self, api, app):
for path, methods in app.routes.items():
swagger_for_path = {}
api['paths'][path] = swagger_for_path
cors_config = None
methods_with_cors = []
for http_method, view in methods.items():
current = self._generate_route_method(view)
if 'security' in current:
self._add_to_security_definition(
current['security'], api, view)
swagger_for_path[http_method.lower()] = current
if view.cors is not None:
cors_config = view.cors
methods_with_cors.append(http_method)
if cors_config is not None:
self._add_preflight_request(
cors_config, methods_with_cors, swagger_for_path)
def _generate_security_from_auth_obj(self, api_config, authorizer):
# type: (Dict[str, Any], Authorizer) -> None
if isinstance(authorizer, ChaliceAuthorizer):
auth_config = authorizer.config
config = {
'in': 'header',
'type': 'apiKey',
'name': 'Authorization',
'x-amazon-apigateway-authtype': 'custom'
}
api_gateway_authorizer = {
'type': 'token',
'authorizerUri': self._auth_uri(authorizer)
}
if auth_config.execution_role is not None:
api_gateway_authorizer['authorizerCredentials'] = \
auth_config.execution_role
if auth_config.ttl_seconds is not None:
api_gateway_authorizer['authorizerResultTtlInSeconds'] = \
auth_config.ttl_seconds
config['x-amazon-apigateway-authorizer'] = api_gateway_authorizer
else:
config = authorizer.to_swagger()
api_config.setdefault(
'securityDefinitions', {})[authorizer.name] = config
def _auth_uri(self, authorizer):
# type: (ChaliceAuthorizer) -> str
function_name = '%s-%s' % (
self._deployed_resources['api_handler_name'],
authorizer.config.name
)
return self._uri(
self._deployed_resources['lambda_functions'][function_name]['arn'])
def _add_to_security_definition(self, security,
api_config, view):
# type: (Any, Dict[str, Any], RouteEntry) -> None
if view.authorizer is not None:
self._generate_security_from_auth_obj(api_config, view.authorizer)
for auth in security:
name = list(auth.keys())[0]
if name == 'api_key':
# This is just the api_key_required=True config
swagger_snippet = {
'type': 'apiKey',
'name': 'x-api-key',
'in': 'header',
} # type: Dict[str, Any]
api_config.setdefault(
'securityDefinitions', {})[name] = swagger_snippet
def _generate_route_method(self, view):
# type: (RouteEntry) -> Dict[str, Any]
current = {
'consumes': view.content_types,
'produces': ['application/json'],
'responses': self._generate_precanned_responses(),
'x-amazon-apigateway-integration': self._generate_apig_integ(
view),
} # type: Dict[str, Any]
docstring = inspect.getdoc(view.view_function)
if docstring:
doc_lines = docstring.splitlines()
current['summary'] = doc_lines[0]
if len(doc_lines) > 1:
current['description'] = '\n'.join(doc_lines[1:]).strip('\n')
if view.api_key_required:
# When this happens we also have to add the relevant portions
# to the security definitions. We have to someone indicate
# this because this neeeds to be added to the global config
# file.
current.setdefault('security', []).append({'api_key': []})
if view.authorizer:
current.setdefault('security', []).append(
{view.authorizer.name: view.authorizer.scopes})
if view.view_args:
self._add_view_args(current, view.view_args)
return current
def _generate_precanned_responses(self):
# type: () -> Dict[str, Any]
responses = {
'200': {
'description': '200 response',
'schema': {
'$ref': '
}
}
}
return responses
def _uri(self, lambda_arn=None):
# type: (Optional[str]) -> Any
if lambda_arn is None:
lambda_arn = self._deployed_resources['api_handler_arn']
return ('arn:aws:apigateway:{region}:lambda:path/2015-03-31'
'/functions/{lambda_arn}/invocations').format(
region=self._region, lambda_arn=lambda_arn)
def _generate_apig_integ(self, view):
# type: (RouteEntry) -> Dict[str, Any]
apig_integ = {
'responses': {
'default': {
'statusCode': "200",
}
},
'uri': self._uri(),
'passthroughBehavior': 'when_no_match',
'httpMethod': 'POST',
'contentHandling': 'CONVERT_TO_TEXT',
'type': 'aws_proxy',
}
return apig_integ
def _add_view_args(self, single_method, view_args):
# type: (Dict[str, Any], List[str]) -> None
single_method['parameters'] = [
{'name': name, 'in': 'path', 'required': True, 'type': 'string'}
for name in view_args
]
def _add_preflight_request(self, cors, methods, swagger_for_path):
# type: (CORSConfig, List[str], Dict[str, Any]) -> None
methods = methods + ['OPTIONS']
allowed_methods = ','.join(methods)
response_params = {
'Access-Control-Allow-Methods': '%s' % allowed_methods
}
response_params.update(cors.get_access_control_headers())
headers = {k: {'type': 'string'} for k, _ in response_params.items()}
response_params = {'method.response.header.%s' % k: "'%s'" % v for k, v
in response_params.items()}
options_request = {
"consumes": ["application/json"],
"produces": ["application/json"],
"responses": {
"200": {
"description": "200 response",
"schema": {"$ref": "#/definitions/Empty"},
"headers": headers
}
},
"x-amazon-apigateway-integration": {
"responses": {
"default": {
"statusCode": "200",
"responseParameters": response_params,
}
},
"requestTemplates": {
"application/json": "{\"statusCode\": 200}"
},
"passthroughBehavior": "when_no_match",
"type": "mock",
"contentHandling": "CONVERT_TO_TEXT"
}
}
swagger_for_path['options'] = options_request
class CFNSwaggerGenerator(SwaggerGenerator):
def __init__(self):
# type: () -> None
pass
def _uri(self, lambda_arn=None):
# type: (Optional[str]) -> Any
return {
'Fn::Sub': (
'arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31'
'/functions/${APIHandler.Arn}/invocations'
)
}
def _auth_uri(self, authorizer):
# type: (ChaliceAuthorizer) -> Any
return {
'Fn::Sub': (
'arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31'
'/functions/${%s.Arn}/invocations' % to_cfn_resource_name(
authorizer.name)
)
}
class TemplatedSwaggerGenerator(SwaggerGenerator):
def __init__(self):
# type: () -> None
pass
def _uri(self, lambda_arn=None):
# type: (Optional[str]) -> Any
return StringFormat(
'arn:aws:apigateway:{region_name}:lambda:path/2015-03-31'
'/functions/{api_handler_lambda_arn}/invocations',
['region_name', 'api_handler_lambda_arn'],
)
def _auth_uri(self, authorizer):
# type: (ChaliceAuthorizer) -> Any
varname = '%s_lambda_arn' % authorizer.name
return StringFormat(
'arn:aws:apigateway:{region_name}:lambda:path/2015-03-31'
'/functions/{%s}/invocations' % varname,
['region_name', varname],
)
class TerraformSwaggerGenerator(SwaggerGenerator):
def __init__(self):
# type: () -> None
pass
def _uri(self, lambda_arn=None):
# type: (Optional[str]) -> Any
return '${aws_lambda_function.api_handler.invoke_arn}'
def _auth_uri(self, authorizer):
# type: (ChaliceAuthorizer) -> Any
return '${aws_lambda_function.%s.invoke_arn}' % (authorizer.name)
| true
| true
|
f7161f67b3ae378daf9562eda41ea0921d60fa10
| 1,605
|
py
|
Python
|
opentutorials_python2/opentutorials_python2/19_Override/2_Override_deepen.py
|
dongrami0425/Python_OpenCV-Study
|
c7faee4f63720659280c3222ba5abfe27740d1f4
|
[
"MIT"
] | null | null | null |
opentutorials_python2/opentutorials_python2/19_Override/2_Override_deepen.py
|
dongrami0425/Python_OpenCV-Study
|
c7faee4f63720659280c3222ba5abfe27740d1f4
|
[
"MIT"
] | null | null | null |
opentutorials_python2/opentutorials_python2/19_Override/2_Override_deepen.py
|
dongrami0425/Python_OpenCV-Study
|
c7faee4f63720659280c3222ba5abfe27740d1f4
|
[
"MIT"
] | null | null | null |
# 계산기 예제. 오버라이드의 활용.
class Cal(object):
_history = []
def __init__(self, v1, v2):
if isinstance(v1, int):
self.v1 = v1
if isinstance(v2, int):
self.v2 = v2
def add(self):
result = self.v1+self.v2
Cal._history.append("add : %d+%d=%d" % (self.v1, self.v2, result))
return result
def subtract(self):
result = self.v1-self.v2
Cal._history.append("subtract : %d-%d=%d" % (self.v1, self.v2, result))
return result
def setV1(self, v):
if isinstance(v, int):
self.v1 = v
def getV1(self):
return self.v1
@classmethod
def history(cls):
for item in Cal._history:
print(item)
# 부모클래스 info 메소드 : 입력된 변수의 정보를 알려주는 메소드.
def info(self):
return "Cal => v1 : %d, v2 : %d" % (self.v1, self.v2)
class CalMultiply(Cal):
def multiply(self):
result = self.v1*self.v2
Cal._history.append("multiply : %d*%d=%d" % (self.v1, self.v2, result))
return result
# 오버라이딩 info 메소드1
def info(self):
return "CalMultiply => %s" % super().info() # 여기서 super는 Cal 의 info메소드
class CalDivide(CalMultiply):
def divide(self):
result = self.v1/self.v2
Cal._history.append("divide : %d/%d=%d" % (self.v1, self.v2, result))
return result
# 오버라이딩 info 메소드2
def info(self):
return "CalDivide => %s" % super().info() # 여기서 super는 CalMultiply 의 info메소드
c0 = Cal(30, 60)
print(c0.info())
c1 = CalMultiply(10,10)
print(c1.info())
c2 = CalDivide(20,10)
print(c2.info())
| 24.318182
| 84
| 0.560125
|
class Cal(object):
_history = []
def __init__(self, v1, v2):
if isinstance(v1, int):
self.v1 = v1
if isinstance(v2, int):
self.v2 = v2
def add(self):
result = self.v1+self.v2
Cal._history.append("add : %d+%d=%d" % (self.v1, self.v2, result))
return result
def subtract(self):
result = self.v1-self.v2
Cal._history.append("subtract : %d-%d=%d" % (self.v1, self.v2, result))
return result
def setV1(self, v):
if isinstance(v, int):
self.v1 = v
def getV1(self):
return self.v1
@classmethod
def history(cls):
for item in Cal._history:
print(item)
def info(self):
return "Cal => v1 : %d, v2 : %d" % (self.v1, self.v2)
class CalMultiply(Cal):
def multiply(self):
result = self.v1*self.v2
Cal._history.append("multiply : %d*%d=%d" % (self.v1, self.v2, result))
return result
def info(self):
return "CalMultiply => %s" % super().info()
class CalDivide(CalMultiply):
def divide(self):
result = self.v1/self.v2
Cal._history.append("divide : %d/%d=%d" % (self.v1, self.v2, result))
return result
def info(self):
return "CalDivide => %s" % super().info()
c0 = Cal(30, 60)
print(c0.info())
c1 = CalMultiply(10,10)
print(c1.info())
c2 = CalDivide(20,10)
print(c2.info())
| true
| true
|
f7161fc6e8dcba67239d796b5f8323d0d179af8d
| 850
|
py
|
Python
|
scripts/filter_top.py
|
isabella232/azure-signalr-bench
|
99a5af8ac350282b78a3a06b7aadd786e7150244
|
[
"MIT"
] | null | null | null |
scripts/filter_top.py
|
isabella232/azure-signalr-bench
|
99a5af8ac350282b78a3a06b7aadd786e7150244
|
[
"MIT"
] | 1
|
2021-02-23T23:13:09.000Z
|
2021-02-23T23:13:09.000Z
|
scripts/filter_top.py
|
isabella232/azure-signalr-bench
|
99a5af8ac350282b78a3a06b7aadd786e7150244
|
[
"MIT"
] | null | null | null |
import argparse
import datetime
import glob, os, re
from filter_utils import *
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--startDate",
default=aWeekAgo(),
help="specify the starting date to check, default is a week ago")
parser.add_argument("-e", "--endDate",
help="specify the ending date to check, default is today",
default=today())
parser.add_argument("-w", "--wildcard",
type=str,
choices=["appserver_*top.txt", "appserver*.txt", "slave_*top.txt", "slave*.txt"],
help="specify the file prefix, default is appserver_*top.txt",
default="appserver_*top.txt")
args = parser.parse_args()
filterLog("/mnt/Data/NginxRoot",
args.wildcard,
args.startDate,
args.endDate)
| 34
| 91
| 0.621176
|
import argparse
import datetime
import glob, os, re
from filter_utils import *
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--startDate",
default=aWeekAgo(),
help="specify the starting date to check, default is a week ago")
parser.add_argument("-e", "--endDate",
help="specify the ending date to check, default is today",
default=today())
parser.add_argument("-w", "--wildcard",
type=str,
choices=["appserver_*top.txt", "appserver*.txt", "slave_*top.txt", "slave*.txt"],
help="specify the file prefix, default is appserver_*top.txt",
default="appserver_*top.txt")
args = parser.parse_args()
filterLog("/mnt/Data/NginxRoot",
args.wildcard,
args.startDate,
args.endDate)
| true
| true
|
f71620dd4dbcb8427eb3d4e5ea5b213670bdf5dd
| 1,482
|
py
|
Python
|
contrib/devtools/fix-copyright-headers.py
|
flirtcoin/flirtcoin
|
1bbaa7cef10b9db404095127fe6def859541b266
|
[
"MIT"
] | null | null | null |
contrib/devtools/fix-copyright-headers.py
|
flirtcoin/flirtcoin
|
1bbaa7cef10b9db404095127fe6def859541b266
|
[
"MIT"
] | null | null | null |
contrib/devtools/fix-copyright-headers.py
|
flirtcoin/flirtcoin
|
1bbaa7cef10b9db404095127fe6def859541b266
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Run this script inside of src/ and it will look for all the files
that were changed this year that still have the last year in the
copyright headers, and it will fix the headers on that file using
a perl regex one liner.
For example: if it finds something like this and we're in 2014
// Copyright (c) 2009-2013 The Flirtcoin developers
it will change it to
// Copyright (c) 2009-2014 The Flirtcoin developers
It will do this for all the files in the folder and its children.
Author: @gubatron
'''
import os
import time
year = time.gmtime()[0]
last_year = year - 1
command = "perl -pi -e 's/%s The Flirtcoin/%s The Flirtcoin/' %s"
listFilesCommand = "find . | grep %s"
extensions = [".cpp",".h"]
def getLastGitModifiedDate(filePath):
gitGetLastCommitDateCommand = "git log " + filePath +" | grep Date | head -n 1"
p = os.popen(gitGetLastCommitDateCommand)
result = ""
for l in p:
result = l
break
result = result.replace("\n","")
return result
n=1
for extension in extensions:
foundFiles = os.popen(listFilesCommand % extension)
for filePath in foundFiles:
filePath = filePath[1:-1]
if filePath.endswith(extension):
filePath = os.getcwd() + filePath
modifiedTime = getLastGitModifiedDate(filePath)
if len(modifiedTime) > 0 and str(year) in modifiedTime:
print n,"Last Git Modified: ", modifiedTime, " - ", filePath
os.popen(command % (last_year,year,filePath))
n = n + 1
| 27.444444
| 81
| 0.695007
|
'''
Run this script inside of src/ and it will look for all the files
that were changed this year that still have the last year in the
copyright headers, and it will fix the headers on that file using
a perl regex one liner.
For example: if it finds something like this and we're in 2014
// Copyright (c) 2009-2013 The Flirtcoin developers
it will change it to
// Copyright (c) 2009-2014 The Flirtcoin developers
It will do this for all the files in the folder and its children.
Author: @gubatron
'''
import os
import time
year = time.gmtime()[0]
last_year = year - 1
command = "perl -pi -e 's/%s The Flirtcoin/%s The Flirtcoin/' %s"
listFilesCommand = "find . | grep %s"
extensions = [".cpp",".h"]
def getLastGitModifiedDate(filePath):
gitGetLastCommitDateCommand = "git log " + filePath +" | grep Date | head -n 1"
p = os.popen(gitGetLastCommitDateCommand)
result = ""
for l in p:
result = l
break
result = result.replace("\n","")
return result
n=1
for extension in extensions:
foundFiles = os.popen(listFilesCommand % extension)
for filePath in foundFiles:
filePath = filePath[1:-1]
if filePath.endswith(extension):
filePath = os.getcwd() + filePath
modifiedTime = getLastGitModifiedDate(filePath)
if len(modifiedTime) > 0 and str(year) in modifiedTime:
print n,"Last Git Modified: ", modifiedTime, " - ", filePath
os.popen(command % (last_year,year,filePath))
n = n + 1
| false
| true
|
f71620ef373fd3749805cb5e901e1f1cc8895aef
| 159
|
py
|
Python
|
frimcla/StatisticalAnalysis/__init__.py
|
ManuGar/ObjectClassificationByTransferLearning
|
fc009fc5a71668355a94ea1a8f506fdde8e7bde0
|
[
"MIT"
] | 3
|
2021-04-22T09:15:34.000Z
|
2022-01-05T09:50:18.000Z
|
frimcla/StatisticalAnalysis/__init__.py
|
ManuGar/ObjectClassificationByTransferLearning
|
fc009fc5a71668355a94ea1a8f506fdde8e7bde0
|
[
"MIT"
] | 4
|
2020-09-25T22:46:39.000Z
|
2021-08-25T15:01:14.000Z
|
frimcla/StatisticalAnalysis/__init__.py
|
ManuGar/ObjectClassificationByTransferLearning
|
fc009fc5a71668355a94ea1a8f506fdde8e7bde0
|
[
"MIT"
] | 3
|
2020-07-31T14:11:26.000Z
|
2021-11-24T01:53:01.000Z
|
"""A pypi demonstration vehicle.
.. moduleauthor:: Andrew Carter <andrew@invalid.com>
"""
from .statisticalAnalysis import *
__all__ = ['compare_methods']
| 15.9
| 52
| 0.72956
|
from .statisticalAnalysis import *
__all__ = ['compare_methods']
| true
| true
|
f71622aea7d6b89a7c4742971cb49b5011e7e9cd
| 6,024
|
py
|
Python
|
src/oci/log_analytics/models/parser_test_result.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/log_analytics/models/parser_test_result.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/log_analytics/models/parser_test_result.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ParserTestResult(object):
"""
ParserTestResult
"""
def __init__(self, **kwargs):
"""
Initializes a new ParserTestResult object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param additional_info:
The value to assign to the additional_info property of this ParserTestResult.
:type additional_info: dict(str, str)
:param entries:
The value to assign to the entries property of this ParserTestResult.
:type entries: list[oci.log_analytics.models.AbstractParserTestResultLogEntry]
:param example_content:
The value to assign to the example_content property of this ParserTestResult.
:type example_content: str
:param lines:
The value to assign to the lines property of this ParserTestResult.
:type lines: list[oci.log_analytics.models.AbstractParserTestResultLogLine]
:param named_capture_groups:
The value to assign to the named_capture_groups property of this ParserTestResult.
:type named_capture_groups: list[str]
"""
self.swagger_types = {
'additional_info': 'dict(str, str)',
'entries': 'list[AbstractParserTestResultLogEntry]',
'example_content': 'str',
'lines': 'list[AbstractParserTestResultLogLine]',
'named_capture_groups': 'list[str]'
}
self.attribute_map = {
'additional_info': 'additionalInfo',
'entries': 'entries',
'example_content': 'exampleContent',
'lines': 'lines',
'named_capture_groups': 'namedCaptureGroups'
}
self._additional_info = None
self._entries = None
self._example_content = None
self._lines = None
self._named_capture_groups = None
@property
def additional_info(self):
"""
Gets the additional_info of this ParserTestResult.
Additional information for the test result.
:return: The additional_info of this ParserTestResult.
:rtype: dict(str, str)
"""
return self._additional_info
@additional_info.setter
def additional_info(self, additional_info):
"""
Sets the additional_info of this ParserTestResult.
Additional information for the test result.
:param additional_info: The additional_info of this ParserTestResult.
:type: dict(str, str)
"""
self._additional_info = additional_info
@property
def entries(self):
"""
Gets the entries of this ParserTestResult.
The test result log entries.
:return: The entries of this ParserTestResult.
:rtype: list[oci.log_analytics.models.AbstractParserTestResultLogEntry]
"""
return self._entries
@entries.setter
def entries(self, entries):
"""
Sets the entries of this ParserTestResult.
The test result log entries.
:param entries: The entries of this ParserTestResult.
:type: list[oci.log_analytics.models.AbstractParserTestResultLogEntry]
"""
self._entries = entries
@property
def example_content(self):
"""
Gets the example_content of this ParserTestResult.
The example content.
:return: The example_content of this ParserTestResult.
:rtype: str
"""
return self._example_content
@example_content.setter
def example_content(self, example_content):
"""
Sets the example_content of this ParserTestResult.
The example content.
:param example_content: The example_content of this ParserTestResult.
:type: str
"""
self._example_content = example_content
@property
def lines(self):
"""
Gets the lines of this ParserTestResult.
The test result log lines.
:return: The lines of this ParserTestResult.
:rtype: list[oci.log_analytics.models.AbstractParserTestResultLogLine]
"""
return self._lines
@lines.setter
def lines(self, lines):
"""
Sets the lines of this ParserTestResult.
The test result log lines.
:param lines: The lines of this ParserTestResult.
:type: list[oci.log_analytics.models.AbstractParserTestResultLogLine]
"""
self._lines = lines
@property
def named_capture_groups(self):
"""
Gets the named_capture_groups of this ParserTestResult.
The named capture groups.
:return: The named_capture_groups of this ParserTestResult.
:rtype: list[str]
"""
return self._named_capture_groups
@named_capture_groups.setter
def named_capture_groups(self, named_capture_groups):
"""
Sets the named_capture_groups of this ParserTestResult.
The named capture groups.
:param named_capture_groups: The named_capture_groups of this ParserTestResult.
:type: list[str]
"""
self._named_capture_groups = named_capture_groups
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 30.892308
| 245
| 0.65488
|
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ParserTestResult(object):
def __init__(self, **kwargs):
self.swagger_types = {
'additional_info': 'dict(str, str)',
'entries': 'list[AbstractParserTestResultLogEntry]',
'example_content': 'str',
'lines': 'list[AbstractParserTestResultLogLine]',
'named_capture_groups': 'list[str]'
}
self.attribute_map = {
'additional_info': 'additionalInfo',
'entries': 'entries',
'example_content': 'exampleContent',
'lines': 'lines',
'named_capture_groups': 'namedCaptureGroups'
}
self._additional_info = None
self._entries = None
self._example_content = None
self._lines = None
self._named_capture_groups = None
@property
def additional_info(self):
return self._additional_info
@additional_info.setter
def additional_info(self, additional_info):
self._additional_info = additional_info
@property
def entries(self):
return self._entries
@entries.setter
def entries(self, entries):
self._entries = entries
@property
def example_content(self):
return self._example_content
@example_content.setter
def example_content(self, example_content):
self._example_content = example_content
@property
def lines(self):
return self._lines
@lines.setter
def lines(self, lines):
self._lines = lines
@property
def named_capture_groups(self):
return self._named_capture_groups
@named_capture_groups.setter
def named_capture_groups(self, named_capture_groups):
self._named_capture_groups = named_capture_groups
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f71622f18cba8cd47f00c885daecfb96114e8221
| 806
|
py
|
Python
|
tests/utils.py
|
thiagopena/python-mcollective
|
77cac3e23e6a61542662be3b8f94ee54bbfea942
|
[
"BSD-3-Clause"
] | 1
|
2015-07-29T00:35:51.000Z
|
2015-07-29T00:35:51.000Z
|
tests/utils.py
|
jantman/python-mcollective
|
ceb8f362bc8a1981b42696889250bed1cce07fea
|
[
"BSD-3-Clause"
] | null | null | null |
tests/utils.py
|
jantman/python-mcollective
|
ceb8f362bc8a1981b42696889250bed1cce07fea
|
[
"BSD-3-Clause"
] | 1
|
2019-01-02T18:40:24.000Z
|
2019-01-02T18:40:24.000Z
|
# coding: utf-8
import os
import jinja2
ROOT = os.path.abspath(os.path.join(__file__, '..'))
DEFAULT_CTXT = {
'topic': 'topic',
'collectives': ['mcollective', 'sub1'],
'maincollective': 'mcollective',
'root': ROOT,
'loglevel': 'debug',
'daemonize': 0,
'identity': 'mco1',
'securityprovider': 'none',
'connector': {
'name': 'activemq',
'options': {
'pool.size': '1',
'pool.1.host': 'localhost',
'pool.1.port': '6163',
'pool.1.user': 'mcollective',
'pool.1.password': 'secret',
'pool.1.ssl': 'false',
},
},
}
def get_template(name, package=__package__):
env = jinja2.Environment(loader=jinja2.PackageLoader(package, 'templates'))
return env.get_template(name)
| 24.424242
| 79
| 0.550868
|
import os
import jinja2
ROOT = os.path.abspath(os.path.join(__file__, '..'))
DEFAULT_CTXT = {
'topic': 'topic',
'collectives': ['mcollective', 'sub1'],
'maincollective': 'mcollective',
'root': ROOT,
'loglevel': 'debug',
'daemonize': 0,
'identity': 'mco1',
'securityprovider': 'none',
'connector': {
'name': 'activemq',
'options': {
'pool.size': '1',
'pool.1.host': 'localhost',
'pool.1.port': '6163',
'pool.1.user': 'mcollective',
'pool.1.password': 'secret',
'pool.1.ssl': 'false',
},
},
}
def get_template(name, package=__package__):
env = jinja2.Environment(loader=jinja2.PackageLoader(package, 'templates'))
return env.get_template(name)
| true
| true
|
f71623279ac09811be88393ace0f3f65306bffca
| 3,425
|
py
|
Python
|
scripts/ancora.py
|
crscardellino/dnnvsd
|
2de14f05b71199be1b0ee601287243ea25f92cba
|
[
"BSD-3-Clause"
] | 3
|
2016-03-10T21:03:28.000Z
|
2018-04-09T03:53:58.000Z
|
scripts/ancora.py
|
crscardellino/dnnvsd
|
2de14f05b71199be1b0ee601287243ea25f92cba
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/ancora.py
|
crscardellino/dnnvsd
|
2de14f05b71199be1b0ee601287243ea25f92cba
|
[
"BSD-3-Clause"
] | null | null | null |
from nltk.corpus.reader.api import SyntaxCorpusReader
from nltk.corpus.reader import xmldocs
from nltk import tree
from nltk.util import LazyMap, LazyConcatenation
from nltk.corpus.reader.util import concat
def parsed(element):
"""Converts a 'sentence' XML element (xml.etree.ElementTree.Element) to
an NLTK tree.
element -- the XML sentence element (or a subelement)
"""
if element:
# element viewed as a list is non-empty (it has subelements)
subtrees = map(parsed, element)
subtrees = [t for t in subtrees if t is not None]
return tree.Tree(element.tag, subtrees)
else:
# element viewed as a list is empty. we are in a terminal.
if element.get('elliptic') == 'yes':
return None
else:
return tree.Tree(element.get('pos') or element.get('ne') or 'unk',
[element.get('wd')])
def tagged(element):
"""Converts a 'sentence' XML element (xml.etree.ElementTree.Element) to
a tagged sentence.
element -- the XML sentence element (or a subelement)
"""
# http://www.w3schools.com/xpath/xpath_syntax.asp
# XXX: XPath '//*[@wd]' not working
# return [(x.get('wd'), x.get('pos') or x.get('ne'))
# for x in element.findall('*//*[@wd]')] + [('.', 'fp')]
# convert to tree and get the tagged sent
pos = parsed(element).pos()
# filter None words (may return an emtpy list)
return list(filter(lambda x: x[0] is not None, pos))
def untagged(element):
"""Converts a 'sentence' XML element (xml.etree.ElementTree.Element) to
a sentence.
element -- the XML sentence element (or a subelement)
"""
# http://www.w3schools.com/xpath/xpath_syntax.asp
# XXX: XPath '//*[@wd]' not working
# return [x.get('wd') for x in element.findall('*//*[@wd]')] + [('.', 'fp')]
# convert to tree and get the sent
sent = parsed(element).leaves()
# filter None words (may return an emtpy list)
return list(filter(lambda x: x is not None, sent))
class AncoraCorpusReader(SyntaxCorpusReader):
def __init__(self, path, files=None):
if files is None:
files = '.*\.tbf\.xml'
self.xmlreader = xmldocs.XMLCorpusReader(path, files)
def parsed_sents(self, fileids=None):
return LazyMap(parsed, self.elements(fileids))
def tagged_sents(self, fileids=None):
return LazyMap(tagged, self.elements(fileids))
def sents(self, fileids=None):
return LazyMap(untagged, self.elements(fileids))
def elements(self, fileids=None):
# FIXME: skip sentence elements that will result in empty sentences!
if not fileids:
fileids = self.xmlreader.fileids()
# xml() returns a top element that is also a list of sentence elements
return LazyConcatenation(self.xmlreader.xml(f) for f in fileids)
def tagged_words(self, fileids=None):
# XXX: use LazyConcatenation?
return concat(self.tagged_sents(fileids))
def __repr__(self):
return '<AncoraCorpusReader>'
class SimpleAncoraCorpusReader(AncoraCorpusReader):
"""Ancora corpus with simplified POS tagset.
"""
def __init__(self, path, files=None):
super().__init__(path, files)
def tagged_sents(self, fileids=None):
f = lambda s: [(w, t[:2]) for w, t in s]
return LazyMap(f, super().tagged_sents(fileids))
| 33.578431
| 80
| 0.640584
|
from nltk.corpus.reader.api import SyntaxCorpusReader
from nltk.corpus.reader import xmldocs
from nltk import tree
from nltk.util import LazyMap, LazyConcatenation
from nltk.corpus.reader.util import concat
def parsed(element):
if element:
subtrees = map(parsed, element)
subtrees = [t for t in subtrees if t is not None]
return tree.Tree(element.tag, subtrees)
else:
if element.get('elliptic') == 'yes':
return None
else:
return tree.Tree(element.get('pos') or element.get('ne') or 'unk',
[element.get('wd')])
def tagged(element):
pos = parsed(element).pos()
return list(filter(lambda x: x[0] is not None, pos))
def untagged(element):
sent = parsed(element).leaves()
return list(filter(lambda x: x is not None, sent))
class AncoraCorpusReader(SyntaxCorpusReader):
def __init__(self, path, files=None):
if files is None:
files = '.*\.tbf\.xml'
self.xmlreader = xmldocs.XMLCorpusReader(path, files)
def parsed_sents(self, fileids=None):
return LazyMap(parsed, self.elements(fileids))
def tagged_sents(self, fileids=None):
return LazyMap(tagged, self.elements(fileids))
def sents(self, fileids=None):
return LazyMap(untagged, self.elements(fileids))
def elements(self, fileids=None):
if not fileids:
fileids = self.xmlreader.fileids()
return LazyConcatenation(self.xmlreader.xml(f) for f in fileids)
def tagged_words(self, fileids=None):
return concat(self.tagged_sents(fileids))
def __repr__(self):
return '<AncoraCorpusReader>'
class SimpleAncoraCorpusReader(AncoraCorpusReader):
def __init__(self, path, files=None):
super().__init__(path, files)
def tagged_sents(self, fileids=None):
f = lambda s: [(w, t[:2]) for w, t in s]
return LazyMap(f, super().tagged_sents(fileids))
| true
| true
|
f71624b25629b5f413869a0e9a164584fb6bbe16
| 54,615
|
py
|
Python
|
rllab/misc/instrument.py
|
RussellM2020/RoboticTasks
|
c7157c986cdbbf08cc0ea296205ef2dbcf6fc487
|
[
"MIT"
] | null | null | null |
rllab/misc/instrument.py
|
RussellM2020/RoboticTasks
|
c7157c986cdbbf08cc0ea296205ef2dbcf6fc487
|
[
"MIT"
] | null | null | null |
rllab/misc/instrument.py
|
RussellM2020/RoboticTasks
|
c7157c986cdbbf08cc0ea296205ef2dbcf6fc487
|
[
"MIT"
] | null | null | null |
import os
import re
import subprocess
import base64
import os.path as osp
import pickle as pickle
import inspect
import hashlib
import sys
from contextlib import contextmanager
import errno
from rllab.core.serializable import Serializable
from rllab import config
from rllab.misc.console import mkdir_p
from rllab.misc import ext
from io import StringIO
import datetime
import dateutil.tz
import json
import time
import numpy as np
from rllab.misc.ext import AttrDict
from rllab.viskit.core import flatten
import collections
class StubBase(object):
def __getitem__(self, item):
return StubMethodCall(self, "__getitem__", args=[item], kwargs=dict())
def __getattr__(self, item):
try:
return super(self.__class__, self).__getattribute__(item)
except AttributeError:
if item.startswith("__") and item.endswith("__"):
raise
return StubAttr(self, item)
def __pow__(self, power, modulo=None):
return StubMethodCall(self, "__pow__", [power, modulo], dict())
def __call__(self, *args, **kwargs):
return StubMethodCall(self.obj, self.attr_name, args, kwargs)
def __add__(self, other):
return StubMethodCall(self, "__add__", [other], dict())
def __rmul__(self, other):
return StubMethodCall(self, "__rmul__", [other], dict())
def __div__(self, other):
return StubMethodCall(self, "__div__", [other], dict())
def __rdiv__(self, other):
return StubMethodCall(BinaryOp(), "rdiv", [self, other], dict()) # self, "__rdiv__", [other], dict())
def __rpow__(self, power, modulo=None):
return StubMethodCall(self, "__rpow__", [power, modulo], dict())
class BinaryOp(Serializable):
def __init__(self):
Serializable.quick_init(self, locals())
def rdiv(self, a, b):
return b / a
# def __init__(self, opname, a, b):
# self.opname = opname
# self.a = a
# self.b = b
class StubAttr(StubBase):
def __init__(self, obj, attr_name):
self.__dict__["_obj"] = obj
self.__dict__["_attr_name"] = attr_name
@property
def obj(self):
return self.__dict__["_obj"]
@property
def attr_name(self):
return self.__dict__["_attr_name"]
def __str__(self):
return "StubAttr(%s, %s)" % (str(self.obj), str(self.attr_name))
class StubMethodCall(StubBase, Serializable):
def __init__(self, obj, method_name, args, kwargs):
self._serializable_initialized = False
Serializable.quick_init(self, locals())
self.obj = obj
self.method_name = method_name
self.args = args
self.kwargs = kwargs
def __str__(self):
return "StubMethodCall(%s, %s, %s, %s)" % (
str(self.obj), str(self.method_name), str(self.args), str(self.kwargs))
class StubClass(StubBase):
def __init__(self, proxy_class):
self.proxy_class = proxy_class
def __call__(self, *args, **kwargs):
if len(args) > 0:
# Convert the positional arguments to keyword arguments
spec = inspect.getargspec(self.proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
return StubObject(self.proxy_class, *args, **kwargs)
def __getstate__(self):
return dict(proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.proxy_class = dict["proxy_class"]
def __getattr__(self, item):
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError
def __str__(self):
return "StubClass(%s)" % self.proxy_class
class StubObject(StubBase):
def __init__(self, __proxy_class, *args, **kwargs):
if len(args) > 0:
spec = inspect.getargspec(__proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
self.proxy_class = __proxy_class
self.args = args
self.kwargs = kwargs
def __getstate__(self):
return dict(args=self.args, kwargs=self.kwargs, proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.args = dict["args"]
self.kwargs = dict["kwargs"]
self.proxy_class = dict["proxy_class"]
def __getattr__(self, item):
# why doesnt the commented code work?
# return StubAttr(self, item)
# checks bypassed to allow for accesing instance fileds
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError('Cannot get attribute %s from %s' % (item, self.proxy_class))
def __str__(self):
return "StubObject(%s, *%s, **%s)" % (str(self.proxy_class), str(self.args), str(self.kwargs))
class VariantDict(AttrDict):
def __init__(self, d, hidden_keys):
super(VariantDict, self).__init__(d)
self._hidden_keys = hidden_keys
def dump(self):
return {k: v for k, v in self.items() if k not in self._hidden_keys}
class VariantGenerator(object):
"""
Usage:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", ['x', 'y'])
vg.variants() => # all combinations of [1,2,3] x ['x','y']
Supports noncyclic dependency among parameters:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", lambda param1: [param1+1, param1+2])
vg.variants() => # ..
"""
def __init__(self):
self._variants = []
self._populate_variants()
self._hidden_keys = []
for k, vs, cfg in self._variants:
if cfg.get("hide", False):
self._hidden_keys.append(k)
def add(self, key, vals, **kwargs):
self._variants.append((key, vals, kwargs))
def _populate_variants(self):
methods = inspect.getmembers(
self.__class__, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))
methods = [x[1].__get__(self, self.__class__)
for x in methods if getattr(x[1], '__is_variant', False)]
for m in methods:
self.add(m.__name__, m, **getattr(m, "__variant_config", dict()))
def variants(self, randomized=False):
ret = list(self.ivariants())
if randomized:
np.random.shuffle(ret)
return list(map(self.variant_dict, ret))
def variant_dict(self, variant):
return VariantDict(variant, self._hidden_keys)
def to_name_suffix(self, variant):
suffix = []
for k, vs, cfg in self._variants:
if not cfg.get("hide", False):
suffix.append(k + "_" + str(variant[k]))
return "_".join(suffix)
def ivariants(self):
dependencies = list()
for key, vals, _ in self._variants:
if hasattr(vals, "__call__"):
args = inspect.getargspec(vals).args
if hasattr(vals, 'im_self') or hasattr(vals, "__self__"):
# remove the first 'self' parameter
args = args[1:]
dependencies.append((key, set(args)))
else:
dependencies.append((key, set()))
sorted_keys = []
# topo sort all nodes
while len(sorted_keys) < len(self._variants):
# get all nodes with zero in-degree
free_nodes = [k for k, v in dependencies if len(v) == 0]
if len(free_nodes) == 0:
error_msg = "Invalid parameter dependency: \n"
for k, v in dependencies:
if len(v) > 0:
error_msg += k + " depends on " + " & ".join(v) + "\n"
raise ValueError(error_msg)
dependencies = [(k, v)
for k, v in dependencies if k not in free_nodes]
# remove the free nodes from the remaining dependencies
for _, v in dependencies:
v.difference_update(free_nodes)
sorted_keys += free_nodes
return self._ivariants_sorted(sorted_keys)
def _ivariants_sorted(self, sorted_keys):
if len(sorted_keys) == 0:
yield dict()
else:
first_keys = sorted_keys[:-1]
first_variants = self._ivariants_sorted(first_keys)
last_key = sorted_keys[-1]
last_vals = [v for k, v, _ in self._variants if k == last_key][0]
if hasattr(last_vals, "__call__"):
last_val_keys = inspect.getargspec(last_vals).args
if hasattr(last_vals, 'im_self') or hasattr(last_vals, '__self__'):
last_val_keys = last_val_keys[1:]
else:
last_val_keys = None
for variant in first_variants:
if hasattr(last_vals, "__call__"):
last_variants = last_vals(
**{k: variant[k] for k in last_val_keys})
for last_choice in last_variants:
yield AttrDict(variant, **{last_key: last_choice})
else:
for last_choice in last_vals:
yield AttrDict(variant, **{last_key: last_choice})
def variant(*args, **kwargs):
def _variant(fn):
fn.__is_variant = True
fn.__variant_config = kwargs
return fn
if len(args) == 1 and isinstance(args[0], collections.Callable):
return _variant(args[0])
return _variant
def stub(glbs):
# replace the __init__ method in all classes
# hacky!!!
for k, v in list(glbs.items()):
# look at all variables that are instances of a class (not yet Stub)
if isinstance(v, type) and v != StubClass:
glbs[k] = StubClass(v) # and replaces them by a the same but Stub
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
exp_count = 0
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
remote_confirmed = False
def run_experiment_lite(
stub_method_call=None,
batch_tasks=None,
exp_prefix="experiment",
exp_name=None,
log_dir=None,
script="scripts/run_experiment_lite.py",
python_command="python",
mode="local",
dry=False,
docker_image=None,
aws_config=None,
env=None,
variant=None,
use_gpu=False,
sync_s3_pkl=False,
sync_s3_png=False,
sync_s3_log=False,
sync_log_on_termination=True,
confirm_remote=True,
terminate_machine=True,
periodic_sync=True,
periodic_sync_interval=15,
sync_all_data_node_to_s3=True,
use_cloudpickle=None,
pre_commands=None,
added_project_directories=[],
**kwargs):
"""
Serialize the stubbed method call and run the experiment using the specified mode.
:param stub_method_call: A stubbed method call.
:param script: The name of the entrance point python script
:param mode: Where & how to run the experiment. Should be one of "local", "local_docker", "ec2",
and "lab_kube".
:param dry: Whether to do a dry-run, which only prints the commands without executing them.
:param exp_prefix: Name prefix for the experiments
:param docker_image: name of the docker image. Ignored if using local mode.
:param aws_config: configuration for AWS. Only used under EC2 mode
:param env: extra environment variables
:param kwargs: All other parameters will be passed directly to the entrance python script.
:param variant: If provided, should be a dictionary of parameters
:param use_gpu: Whether the launched task is running on GPU. This triggers a few configuration changes including
certain environment flags
:param sync_s3_pkl: Whether to sync pkl files during execution of the experiment (they will always be synced at
the end of the experiment)
:param sync_s3_png: Whether to sync png files during execution of the experiment (they will always be synced at
the end of the experiment)
:param sync_s3_log: Whether to sync log files during execution of the experiment (they will always be synced at
the end of the experiment)
:param confirm_remote: Whether to confirm before launching experiments remotely
:param terminate_machine: Whether to terminate machine after experiment finishes. Only used when using
mode="ec2". This is useful when one wants to debug after an experiment finishes abnormally.
:param periodic_sync: Whether to synchronize certain experiment files periodically during execution.
:param periodic_sync_interval: Time interval between each periodic sync, in seconds.
"""
assert stub_method_call is not None or batch_tasks is not None, "Must provide at least either stub_method_call or batch_tasks"
if use_cloudpickle is None:
for maybe_stub in (batch_tasks or [stub_method_call]):
# decide mode
if isinstance(maybe_stub, StubBase):
use_cloudpickle = False
else:
assert hasattr(maybe_stub, '__call__')
use_cloudpickle = True
# ensure variant exists
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(
kwargs,
pre_commands=pre_commands,
stub_method_call=stub_method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant,
use_cloudpickle=use_cloudpickle
)
]
global exp_count
global remote_confirmed
config.USE_GPU = use_gpu
# params_list = []
for task in batch_tasks:
call = task.pop("stub_method_call")
if use_cloudpickle:
import cloudpickle
data = base64.b64encode(cloudpickle.dumps(call)).decode("utf-8")
else:
data = base64.b64encode(pickle.dumps(call)).decode("utf-8")
task["args_data"] = data
exp_count += 1
params = dict(kwargs)
if task.get("exp_name", None) is None:
task["exp_name"] = "%s_%s_%04d" % (
exp_prefix, timestamp, exp_count)
if task.get("log_dir", None) is None:
task["log_dir"] = config.LOG_DIR + "/local/" + \
exp_prefix.replace("_", "-") + "/" + task["exp_name"]
if task.get("variant", None) is not None:
variant = task.pop("variant")
if "exp_name" not in variant:
variant["exp_name"] = task["exp_name"]
task["variant_data"] = base64.b64encode(pickle.dumps(variant)).decode("utf-8")
elif "variant" in task:
del task["variant"]
task["remote_log_dir"] = osp.join(
config.AWS_S3_PATH, exp_prefix.replace("_", "-"), task["exp_name"])
task["env"] = task.get("env", dict()) or dict()
task["env"]["RLLAB_USE_GPU"] = str(use_gpu)
if mode not in ["local", "local_docker"] and not remote_confirmed and not dry and confirm_remote:
remote_confirmed = query_yes_no(
"Running in (non-dry) mode %s. Confirm?" % mode)
if not remote_confirmed:
sys.exit(1)
if hasattr(mode, "__call__"):
if docker_image is None:
docker_image = config.DOCKER_IMAGE
mode(
task,
docker_image=docker_image,
use_gpu=use_gpu,
exp_prefix=exp_prefix,
script=script,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
)
elif mode == "local":
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_local_command(
task,
python_command=python_command,
script=osp.join(config.PROJECT_PATH, script),
use_gpu=use_gpu
)
print(command)
if dry:
return
try:
if env is None:
env = dict()
subprocess.call(
command, shell=True, env=dict(os.environ, **env))
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
elif mode == "local_docker":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_docker_command(
task, # these are the params. Pre and Post command can be here
docker_image=docker_image,
script=script,
env=env,
use_gpu=use_gpu,
use_tty=True,
python_command=python_command,
)
print(command)
if dry:
return
p = subprocess.Popen(command, shell=True)
try:
p.wait()
except KeyboardInterrupt:
try:
print("terminating")
p.terminate()
except OSError:
print("os error!")
pass
p.wait()
elif mode == "ec2":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
s3_code_path = s3_sync_code(config, dry=dry, added_project_directories=added_project_directories)
launch_ec2(batch_tasks,
exp_prefix=exp_prefix,
docker_image=docker_image,
python_command=python_command,
script=script,
aws_config=aws_config,
dry=dry,
terminate_machine=terminate_machine,
use_gpu=use_gpu,
code_full_path=s3_code_path,
sync_s3_pkl=sync_s3_pkl,
sync_s3_png=sync_s3_png,
sync_s3_log=sync_s3_log,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval)
elif mode == "lab_kube":
# assert env is None
# first send code folder to s3
s3_code_path = s3_sync_code(config, dry=dry)
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
# if 'env' in task:
# assert task.pop('env') is None
# TODO: dangerous when there are multiple tasks?
task["resources"] = params.pop(
"resources", config.KUBE_DEFAULT_RESOURCES)
task["node_selector"] = params.pop(
"node_selector", config.KUBE_DEFAULT_NODE_SELECTOR)
task["exp_prefix"] = exp_prefix
pod_dict = to_lab_kube_pod(
task, code_full_path=s3_code_path, docker_image=docker_image, script=script, is_gpu=use_gpu,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl, periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
terminate_machine=terminate_machine,
)
pod_str = json.dumps(pod_dict, indent=1)
if dry:
print(pod_str)
dir = "{pod_dir}/{exp_prefix}".format(
pod_dir=config.POD_DIR, exp_prefix=exp_prefix)
ensure_dir(dir)
fname = "{dir}/{exp_name}.json".format(
dir=dir,
exp_name=task["exp_name"]
)
with open(fname, "w") as fh:
fh.write(pod_str)
kubecmd = "kubectl create -f %s" % fname
print(kubecmd)
if dry:
return
retry_count = 0
wait_interval = 1
while retry_count <= 5:
try:
return_code = subprocess.call(kubecmd, shell=True)
if return_code == 0:
break
retry_count += 1
print("trying again...")
time.sleep(wait_interval)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise
print(e)
else:
raise NotImplementedError
_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search
def ensure_dir(dirname):
"""
Ensure that a named directory exists; if it does not, attempt to create it.
"""
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _shellquote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _to_param_val(v):
if v is None:
return ""
elif isinstance(v, list):
return " ".join(map(_shellquote, list(map(str, v))))
else:
return _shellquote(str(v))
def to_local_command(params, python_command="python", script=osp.join(config.PROJECT_PATH,
'scripts/run_experiment.py'),
use_gpu=False):
command = python_command + " " + script
if use_gpu and not config.USE_TF:
command = "THEANO_FLAGS='device=gpu,dnn.enabled=auto,floatX=float32' " + command
for k, v in config.ENV.items():
command = ("%s=%s " % (k, v)) + command
pre_commands = params.pop("pre_commands", None)
post_commands = params.pop("post_commands", None)
if pre_commands is not None or post_commands is not None:
print("Not executing the pre_commands: ", pre_commands, ", nor post_commands: ", post_commands)
for k, v in params.items():
if isinstance(v, dict):
for nk, nv in v.items():
if str(nk) == "_name":
command += " --%s %s" % (k, _to_param_val(nv))
else:
command += \
" --%s_%s %s" % (k, nk, _to_param_val(nv))
else:
command += " --%s %s" % (k, _to_param_val(v))
return command
def to_docker_command(params, docker_image, python_command="python", script='scripts/run_experiment_lite.py',
pre_commands=None, use_tty=False,
mujoco_path=None,
post_commands=None, dry=False, use_gpu=False, env=None, local_code_dir=None):
"""
:param params: The parameters for the experiment. If logging directory parameters are provided, we will create
docker volume mapping to make sure that the logging files are created at the correct locations
:param docker_image: docker image to run the command on
:param script: script command for running experiment
:return:
"""
log_dir = params.get("log_dir")
docker_args = params.pop("docker_args", "")
if pre_commands is None:
pre_commands = params.pop("pre_commands", None)
if post_commands is None:
post_commands = params.pop("post_commands", None)
if mujoco_path is None:
mujoco_path = config.MUJOCO_KEY_PATH
# script = 'rllab/' + script
# if not dry:
# create volume for logging directory
if use_gpu:
command_prefix = "nvidia-docker run"
else:
command_prefix = "docker run"
docker_log_dir = config.DOCKER_LOG_DIR
if env is None:
env = dict()
env = dict(
env,
AWS_ACCESS_KEY_ID=config.AWS_ACCESS_KEY,
AWS_SECRET_ACCESS_KEY=config.AWS_ACCESS_SECRET,
)
if env is not None:
for k, v in env.items():
command_prefix += " -e \"{k}={v}\"".format(k=k, v=v)
command_prefix += " -v {local_mujoco_key_dir}:{docker_mujoco_key_dir}".format(
local_mujoco_key_dir=mujoco_path, docker_mujoco_key_dir='/root/.mujoco')
command_prefix += " -v {local_log_dir}:{docker_log_dir}".format(
local_log_dir=log_dir,
docker_log_dir=docker_log_dir
)
command_prefix += docker_args
if local_code_dir is None:
local_code_dir = config.PROJECT_PATH
command_prefix += " -v {local_code_dir}:{docker_code_dir}".format(
local_code_dir=local_code_dir,
docker_code_dir=config.DOCKER_CODE_DIR
)
params = dict(params, log_dir=docker_log_dir)
if use_tty:
command_prefix += " -ti " + docker_image + " /bin/bash -c "
else:
command_prefix += " -i " + docker_image + " /bin/bash -c "
command_list = list()
if pre_commands is not None:
command_list.extend(pre_commands)
command_list.append("echo \"Running in docker\"")
command_list.append(to_local_command(
params, python_command=python_command, script=osp.join(config.DOCKER_CODE_DIR, script), use_gpu=use_gpu))
# We for 2 min sleep after termination to allow for last syncs.
if post_commands is None:
post_commands = ['sleep 120']
command_list.extend(post_commands)
return command_prefix + "'" + "; ".join(command_list) + "'"
def dedent(s):
lines = [l.strip() for l in s.split('\n')]
return '\n'.join(lines)
def launch_ec2(params_list, exp_prefix, docker_image, code_full_path,
python_command="python",
script='scripts/run_experiment.py',
aws_config=None, dry=False, terminate_machine=True, use_gpu=False, sync_s3_pkl=False,
sync_s3_png=False,
sync_s3_log=False,
sync_log_on_termination=True,
periodic_sync=True, periodic_sync_interval=15):
if len(params_list) == 0:
return
default_config = dict(
image_id=config.AWS_IMAGE_ID,
instance_type=config.AWS_INSTANCE_TYPE,
key_name=config.AWS_KEY_NAME,
spot=config.AWS_SPOT,
spot_price=config.AWS_SPOT_PRICE,
iam_instance_profile_name=config.AWS_IAM_INSTANCE_PROFILE_NAME,
security_groups=config.AWS_SECURITY_GROUPS,
security_group_ids=config.AWS_SECURITY_GROUP_IDS,
network_interfaces=config.AWS_NETWORK_INTERFACES,
)
if aws_config is None:
aws_config = dict()
aws_config = dict(default_config, **aws_config)
sio = StringIO()
sio.write("#!/bin/bash\n")
sio.write("{\n")
sio.write("""
die() { status=$1; shift; echo "FATAL: $*"; exit $status; }
""")
sio.write("""
EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id`"
""")
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
""".format(exp_name=params_list[0].get("exp_name"), aws_region=config.AWS_REGION_NAME))
if config.LABEL:
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=owner,Value={label} --region {aws_region}
""".format(label=config.LABEL, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=exp_prefix,Value={exp_prefix} --region {aws_region}
""".format(exp_prefix=exp_prefix, aws_region=config.AWS_REGION_NAME))
sio.write("""
service docker start
""")
sio.write("""
docker --config /home/ubuntu/.docker pull {docker_image}
""".format(docker_image=docker_image))
sio.write("""
export AWS_DEFAULT_REGION={aws_region}
""".format(aws_region=config.AWS_REGION_NAME))
if config.FAST_CODE_SYNC:
# sio.write("""
# aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz --region {aws_region}
# """.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
# aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))
sio.write("""
mkdir -p {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
aws_region=config.AWS_REGION_NAME))
sio.write("""
tar -zxvf /tmp/rllab_code.tar.gz -C {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
aws_region=config.AWS_REGION_NAME))
else:
# sio.write("""
# aws s3 cp --recursive {code_full_path} {local_code_path} --region {aws_region}
# """.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
# aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {code_full_path} {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))
s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'
# sio.write("""
# aws s3 cp --recursive {} {} --region {}
# """.format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH, config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {} {}
""".format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH))
sio.write("""
cd {local_code_path}
""".format(local_code_path=config.DOCKER_CODE_DIR))
for params in params_list:
log_dir = params.get("log_dir")
remote_log_dir = params.pop("remote_log_dir")
env = params.pop("env", None)
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
""".format(exp_name=params.get("exp_name"), aws_region=config.AWS_REGION_NAME))
sio.write("""
mkdir -p {log_dir}
""".format(log_dir=log_dir))
if periodic_sync:
include_png = " --include '*.png' " if sync_s3_png else " "
include_pkl = " --include '*.pkl' " if sync_s3_pkl else " "
include_log = " --include '*.log' " if sync_s3_log else " "
# sio.write("""
# while /bin/true; do
# aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region}
# sleep {periodic_sync_interval}
# done & echo sync initiated""".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,
# log_dir=log_dir, remote_log_dir=remote_log_dir,
# aws_region=config.AWS_REGION_NAME,
# periodic_sync_interval=periodic_sync_interval))
sio.write("""
while /bin/true; do
aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir}
sleep {periodic_sync_interval}
done & echo sync initiated""".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,
log_dir=log_dir, remote_log_dir=remote_log_dir,
periodic_sync_interval=periodic_sync_interval))
if sync_log_on_termination:
# sio.write("""
# while /bin/true; do
# if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \ -f 2) ]
# then
# logger "Running shutdown hook."
# aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}
# aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}
# break
# else
# # Spot instance not yet marked for termination.
# sleep 5
# fi
# done & echo log sync initiated
# """.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
while /bin/true; do
if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \ -f 2) ]
then
logger "Running shutdown hook."
aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log
aws s3 cp --recursive {log_dir} {remote_log_dir}
break
else
# Spot instance not yet marked for termination.
sleep 5
fi
done & echo log sync initiated
""".format(log_dir=log_dir, remote_log_dir=remote_log_dir))
if use_gpu:
sio.write("""
for i in {1..800}; do su -c "nvidia-modprobe -u -c=0" ubuntu && break || sleep 3; done
systemctl start nvidia-docker
""")
sio.write("""
{command}
""".format(command=to_docker_command(params, docker_image, python_command=python_command, script=script,
use_gpu=use_gpu, env=env,
local_code_dir=config.DOCKER_CODE_DIR)))
# sio.write("""
# aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}
# """.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {log_dir} {remote_log_dir}
""".format(log_dir=log_dir, remote_log_dir=remote_log_dir))
# sio.write("""
# aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}
# """.format(remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log
""".format(remote_log_dir=remote_log_dir))
if terminate_machine:
sio.write("""
EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id || die \"wget instance-id has failed: $?\"`"
aws ec2 terminate-instances --instance-ids $EC2_INSTANCE_ID --region {aws_region}
""".format(aws_region=config.AWS_REGION_NAME))
sio.write("} >> /home/ubuntu/user_data.log 2>&1\n")
full_script = dedent(sio.getvalue())
import boto3
import botocore
if aws_config["spot"]:
ec2 = boto3.client(
"ec2",
region_name=config.AWS_REGION_NAME,
aws_access_key_id=config.AWS_ACCESS_KEY,
aws_secret_access_key=config.AWS_ACCESS_SECRET,
)
else:
ec2 = boto3.resource(
"ec2",
region_name=config.AWS_REGION_NAME,
aws_access_key_id=config.AWS_ACCESS_KEY,
aws_secret_access_key=config.AWS_ACCESS_SECRET,
)
if len(full_script) > 10000 or len(base64.b64encode(full_script.encode()).decode("utf-8")) > 10000:
# Script too long; need to upload script to s3 first.
# We're being conservative here since the actual limit is 16384 bytes
s3_path = upload_file_to_s3(full_script)
sio = StringIO()
sio.write("#!/bin/bash\n")
sio.write("""
aws s3 cp {s3_path} /home/ubuntu/remote_script.sh --region {aws_region} && \\
chmod +x /home/ubuntu/remote_script.sh && \\
bash /home/ubuntu/remote_script.sh
""".format(s3_path=s3_path, aws_region=config.AWS_REGION_NAME))
user_data = dedent(sio.getvalue())
else:
user_data = full_script
print(full_script)
with open("/tmp/full_script", "w") as f:
f.write(full_script)
instance_args = dict(
ImageId=aws_config["image_id"],
KeyName=aws_config["key_name"],
UserData=user_data,
InstanceType=aws_config["instance_type"],
EbsOptimized=config.EBS_OPTIMIZED,
SecurityGroups=aws_config["security_groups"],
SecurityGroupIds=aws_config["security_group_ids"],
NetworkInterfaces=aws_config["network_interfaces"],
IamInstanceProfile=dict(
Name=aws_config["iam_instance_profile_name"],
),
**config.AWS_EXTRA_CONFIGS,
)
if len(instance_args["NetworkInterfaces"]) > 0:
# disable_security_group = query_yes_no(
# "Cannot provide both network interfaces and security groups info. Do you want to disable security group settings?",
# default="yes",
# )
disable_security_group = True
if disable_security_group:
instance_args.pop("SecurityGroups")
instance_args.pop("SecurityGroupIds")
if aws_config.get("placement", None) is not None:
instance_args["Placement"] = aws_config["placement"]
if not aws_config["spot"]:
instance_args["MinCount"] = 1
instance_args["MaxCount"] = 1
print("************************************************************")
print(instance_args["UserData"])
print("************************************************************")
if aws_config["spot"]:
instance_args["UserData"] = base64.b64encode(instance_args["UserData"].encode()).decode("utf-8")
spot_args = dict(
DryRun=dry,
InstanceCount=1,
LaunchSpecification=instance_args,
SpotPrice=aws_config["spot_price"],
# ClientToken=params_list[0]["exp_name"],
)
import pprint
pprint.pprint(spot_args)
if not dry:
response = ec2.request_spot_instances(**spot_args)
print(response)
spot_request_id = response['SpotInstanceRequests'][
0]['SpotInstanceRequestId']
for _ in range(10):
try:
ec2.create_tags(
Resources=[spot_request_id],
Tags=[
{'Key': 'Name', 'Value': params_list[0]["exp_name"]}
],
)
break
except botocore.exceptions.ClientError:
continue
else:
import pprint
pprint.pprint(instance_args)
ec2.create_instances(
DryRun=dry,
**instance_args
)
S3_CODE_PATH = None
def s3_sync_code(config, dry=False, added_project_directories=[]):
global S3_CODE_PATH
if S3_CODE_PATH is not None:
return S3_CODE_PATH
base = config.AWS_CODE_SYNC_S3_PATH
has_git = True
if config.FAST_CODE_SYNC:
try:
current_commit = subprocess.check_output(
["git", "rev-parse", "HEAD"]).strip().decode("utf-8")
except subprocess.CalledProcessError as _:
print("Warning: failed to execute git commands")
current_commit = None
file_name = str(timestamp) + "_" + hashlib.sha224(
subprocess.check_output(["pwd"]) + str(current_commit).encode() + str(timestamp).encode()
).hexdigest() + ".tar.gz"
file_path = "/tmp/" + file_name
tar_cmd = ["tar", "-zcvf", file_path, "-C", config.PROJECT_PATH]
for pattern in config.FAST_CODE_SYNC_IGNORES:
tar_cmd += ["--exclude", pattern]
tar_cmd += ["-h", "."]
for path in added_project_directories:
tar_cmd.append("-C")
tar_cmd.append(path)
tar_cmd += ["."]
remote_path = "%s/%s" % (base, file_name)
upload_cmd = ["aws", "s3", "cp", file_path, remote_path]
mujoco_key_cmd = [
"aws", "s3", "sync", config.MUJOCO_KEY_PATH, "{}/.mujoco/".format(base)]
print(" ".join(tar_cmd))
print(" ".join(upload_cmd))
print(" ".join(mujoco_key_cmd))
if not dry:
subprocess.check_call(tar_cmd)
subprocess.check_call(upload_cmd)
try:
subprocess.check_call(mujoco_key_cmd)
except Exception as e:
print(e)
S3_CODE_PATH = remote_path
return remote_path
else:
try:
current_commit = subprocess.check_output(
["git", "rev-parse", "HEAD"]).strip().decode("utf-8")
clean_state = len(
subprocess.check_output(["git", "status", "--porcelain"])) == 0
except subprocess.CalledProcessError as _:
print("Warning: failed to execute git commands")
has_git = False
dir_hash = base64.b64encode(subprocess.check_output(["pwd"])).decode("utf-8")
code_path = "%s_%s" % (
dir_hash,
(current_commit if clean_state else "%s_dirty_%s" % (current_commit, timestamp)) if
has_git else timestamp
)
full_path = "%s/%s" % (base, code_path)
cache_path = "%s/%s" % (base, dir_hash)
cache_cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[cache_path, full_path]
cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[".", full_path]
caching_cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[full_path, cache_path]
mujoco_key_cmd = [
"aws", "s3", "sync", config.MUJOCO_KEY_PATH, "{}/.mujoco/".format(base)]
print(cache_cmds, cmds, caching_cmds, mujoco_key_cmd)
if not dry:
subprocess.check_call(cache_cmds)
subprocess.check_call(cmds)
subprocess.check_call(caching_cmds)
try:
subprocess.check_call(mujoco_key_cmd)
except Exception:
print('Unable to sync mujoco keys!')
S3_CODE_PATH = full_path
return full_path
def upload_file_to_s3(script_content):
import tempfile
import uuid
f = tempfile.NamedTemporaryFile(delete=False)
f.write(script_content.encode())
f.close()
remote_path = os.path.join(
config.AWS_CODE_SYNC_S3_PATH, "oversize_bash_scripts", str(uuid.uuid4()))
subprocess.check_call(["aws", "s3", "cp", f.name, remote_path])
os.unlink(f.name)
return remote_path
def to_lab_kube_pod(
params, docker_image, code_full_path,
python_command="python",
script='scripts/run_experiment.py',
is_gpu=False,
sync_s3_pkl=False,
periodic_sync=True,
periodic_sync_interval=15,
sync_all_data_node_to_s3=False,
terminate_machine=True
):
"""
:param params: The parameters for the experiment. If logging directory parameters are provided, we will create
docker volume mapping to make sure that the logging files are created at the correct locations
:param docker_image: docker image to run the command on
:param script: script command for running experiment
:return:
"""
log_dir = params.get("log_dir")
remote_log_dir = params.pop("remote_log_dir")
resources = params.pop("resources")
node_selector = params.pop("node_selector")
exp_prefix = params.pop("exp_prefix")
kube_env = [
{"name": k, "value": v}
for k, v in (params.pop("env", None) or dict()).items()
]
mkdir_p(log_dir)
pre_commands = list()
pre_commands.append('mkdir -p ~/.aws')
pre_commands.append('mkdir ~/.mujoco')
# fetch credentials from the kubernetes secret file
pre_commands.append('echo "[default]" >> ~/.aws/credentials')
pre_commands.append(
"echo \"aws_access_key_id = %s\" >> ~/.aws/credentials" % config.AWS_ACCESS_KEY)
pre_commands.append(
"echo \"aws_secret_access_key = %s\" >> ~/.aws/credentials" % config.AWS_ACCESS_SECRET)
s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'
pre_commands.append(
'aws s3 cp --recursive {} {}'.format(s3_mujoco_key_path, '~/.mujoco'))
if config.FAST_CODE_SYNC:
pre_commands.append('aws s3 cp %s /tmp/rllab_code.tar.gz' % code_full_path)
pre_commands.append('mkdir -p %s' % config.DOCKER_CODE_DIR)
pre_commands.append('tar -zxvf /tmp/rllab_code.tar.gz -C %s' % config.DOCKER_CODE_DIR)
else:
pre_commands.append('aws s3 cp --recursive %s %s' %
(code_full_path, config.DOCKER_CODE_DIR))
pre_commands.append('cd %s' % config.DOCKER_CODE_DIR)
pre_commands.append('mkdir -p %s' %
(log_dir))
if sync_all_data_node_to_s3:
print('Syncing all data from node to s3.')
if periodic_sync:
if sync_s3_pkl:
pre_commands.append("""
while /bin/true; do
aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
pre_commands.append("""
while /bin/true; do
aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
if periodic_sync:
if sync_s3_pkl:
pre_commands.append("""
while /bin/true; do
aws s3 sync --exclude '*' --include '*.csv' --include '*.json' --include '*.pkl' {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
pre_commands.append("""
while /bin/true; do
aws s3 sync --exclude '*' --include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
# copy the file to s3 after execution
post_commands = list()
post_commands.append('aws s3 cp --recursive %s %s' %
(log_dir,
remote_log_dir))
if not terminate_machine:
post_commands.append('sleep infinity')
command_list = list()
if pre_commands is not None:
command_list.extend(pre_commands)
command_list.append("echo \"Running in docker\"")
command_list.append(
"%s 2>&1 | tee -a %s" % (
to_local_command(params, python_command=python_command, script=script),
"%s/stdouterr.log" % log_dir
)
)
if post_commands is not None:
command_list.extend(post_commands)
command = "; ".join(command_list)
pod_name = config.KUBE_PREFIX + params["exp_name"]
# underscore is not allowed in pod names
pod_name = pod_name.replace("_", "-")
print("Is gpu: ", is_gpu)
if not is_gpu:
return {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": pod_name,
"labels": {
"owner": config.LABEL,
"expt": pod_name,
"exp_time": timestamp,
"exp_prefix": exp_prefix,
},
},
"spec": {
"containers": [
{
"name": "foo",
"image": docker_image,
"command": [
"/bin/bash",
"-c",
"-li", # to load conda env file
command,
],
"resources": resources,
"imagePullPolicy": "Always",
}
],
"restartPolicy": "Never",
"nodeSelector": node_selector,
"dnsPolicy": "Default",
}
}
return {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": pod_name,
"labels": {
"owner": config.LABEL,
"expt": pod_name,
"exp_time": timestamp,
"exp_prefix": exp_prefix,
},
},
"spec": {
"containers": [
{
"name": "foo",
"image": docker_image,
"env": kube_env,
"command": [
"/bin/bash",
"-c",
"-li", # to load conda env file
command,
],
"resources": resources,
"imagePullPolicy": "Always",
# gpu specific
"volumeMounts": [
{
"name": "nvidia",
"mountPath": "/usr/local/nvidia",
"readOnly": True,
}
],
"securityContext": {
"privileged": True,
}
}
],
"volumes": [
{
"name": "nvidia",
"hostPath": {
"path": "/var/lib/docker/volumes/nvidia_driver_352.63/_data",
}
}
],
"restartPolicy": "Never",
"nodeSelector": node_selector,
"dnsPolicy": "Default",
}
}
def concretize(maybe_stub):
if isinstance(maybe_stub, StubMethodCall):
obj = concretize(maybe_stub.obj)
method = getattr(obj, maybe_stub.method_name)
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
return method(*args, **kwargs)
elif isinstance(maybe_stub, StubClass):
return maybe_stub.proxy_class
elif isinstance(maybe_stub, StubAttr):
obj = concretize(maybe_stub.obj)
attr_name = maybe_stub.attr_name
attr_val = getattr(obj, attr_name)
return concretize(attr_val)
elif isinstance(maybe_stub, StubObject):
if not hasattr(maybe_stub, "__stub_cache"):
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
try:
maybe_stub.__stub_cache = maybe_stub.proxy_class(
*args, **kwargs)
except Exception as e:
print(("Error while instantiating %s" % maybe_stub.proxy_class))
import traceback
traceback.print_exc()
ret = maybe_stub.__stub_cache
return ret
elif isinstance(maybe_stub, dict):
# make sure that there's no hidden caveat
ret = dict()
for k, v in maybe_stub.items():
ret[concretize(k)] = concretize(v)
return ret
elif isinstance(maybe_stub, (list, tuple)):
return maybe_stub.__class__(list(map(concretize, maybe_stub)))
else:
return maybe_stub
| 39.576087
| 174
| 0.567555
|
import os
import re
import subprocess
import base64
import os.path as osp
import pickle as pickle
import inspect
import hashlib
import sys
from contextlib import contextmanager
import errno
from rllab.core.serializable import Serializable
from rllab import config
from rllab.misc.console import mkdir_p
from rllab.misc import ext
from io import StringIO
import datetime
import dateutil.tz
import json
import time
import numpy as np
from rllab.misc.ext import AttrDict
from rllab.viskit.core import flatten
import collections
class StubBase(object):
def __getitem__(self, item):
return StubMethodCall(self, "__getitem__", args=[item], kwargs=dict())
def __getattr__(self, item):
try:
return super(self.__class__, self).__getattribute__(item)
except AttributeError:
if item.startswith("__") and item.endswith("__"):
raise
return StubAttr(self, item)
def __pow__(self, power, modulo=None):
return StubMethodCall(self, "__pow__", [power, modulo], dict())
def __call__(self, *args, **kwargs):
return StubMethodCall(self.obj, self.attr_name, args, kwargs)
def __add__(self, other):
return StubMethodCall(self, "__add__", [other], dict())
def __rmul__(self, other):
return StubMethodCall(self, "__rmul__", [other], dict())
def __div__(self, other):
return StubMethodCall(self, "__div__", [other], dict())
def __rdiv__(self, other):
return StubMethodCall(BinaryOp(), "rdiv", [self, other], dict())
def __rpow__(self, power, modulo=None):
return StubMethodCall(self, "__rpow__", [power, modulo], dict())
class BinaryOp(Serializable):
def __init__(self):
Serializable.quick_init(self, locals())
def rdiv(self, a, b):
return b / a
class StubAttr(StubBase):
def __init__(self, obj, attr_name):
self.__dict__["_obj"] = obj
self.__dict__["_attr_name"] = attr_name
@property
def obj(self):
return self.__dict__["_obj"]
@property
def attr_name(self):
return self.__dict__["_attr_name"]
def __str__(self):
return "StubAttr(%s, %s)" % (str(self.obj), str(self.attr_name))
class StubMethodCall(StubBase, Serializable):
def __init__(self, obj, method_name, args, kwargs):
self._serializable_initialized = False
Serializable.quick_init(self, locals())
self.obj = obj
self.method_name = method_name
self.args = args
self.kwargs = kwargs
def __str__(self):
return "StubMethodCall(%s, %s, %s, %s)" % (
str(self.obj), str(self.method_name), str(self.args), str(self.kwargs))
class StubClass(StubBase):
def __init__(self, proxy_class):
self.proxy_class = proxy_class
def __call__(self, *args, **kwargs):
if len(args) > 0:
spec = inspect.getargspec(self.proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
return StubObject(self.proxy_class, *args, **kwargs)
def __getstate__(self):
return dict(proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.proxy_class = dict["proxy_class"]
def __getattr__(self, item):
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError
def __str__(self):
return "StubClass(%s)" % self.proxy_class
class StubObject(StubBase):
def __init__(self, __proxy_class, *args, **kwargs):
if len(args) > 0:
spec = inspect.getargspec(__proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
self.proxy_class = __proxy_class
self.args = args
self.kwargs = kwargs
def __getstate__(self):
return dict(args=self.args, kwargs=self.kwargs, proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.args = dict["args"]
self.kwargs = dict["kwargs"]
self.proxy_class = dict["proxy_class"]
def __getattr__(self, item):
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError('Cannot get attribute %s from %s' % (item, self.proxy_class))
def __str__(self):
return "StubObject(%s, *%s, **%s)" % (str(self.proxy_class), str(self.args), str(self.kwargs))
class VariantDict(AttrDict):
def __init__(self, d, hidden_keys):
super(VariantDict, self).__init__(d)
self._hidden_keys = hidden_keys
def dump(self):
return {k: v for k, v in self.items() if k not in self._hidden_keys}
class VariantGenerator(object):
def __init__(self):
self._variants = []
self._populate_variants()
self._hidden_keys = []
for k, vs, cfg in self._variants:
if cfg.get("hide", False):
self._hidden_keys.append(k)
def add(self, key, vals, **kwargs):
self._variants.append((key, vals, kwargs))
def _populate_variants(self):
methods = inspect.getmembers(
self.__class__, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))
methods = [x[1].__get__(self, self.__class__)
for x in methods if getattr(x[1], '__is_variant', False)]
for m in methods:
self.add(m.__name__, m, **getattr(m, "__variant_config", dict()))
def variants(self, randomized=False):
ret = list(self.ivariants())
if randomized:
np.random.shuffle(ret)
return list(map(self.variant_dict, ret))
def variant_dict(self, variant):
return VariantDict(variant, self._hidden_keys)
def to_name_suffix(self, variant):
suffix = []
for k, vs, cfg in self._variants:
if not cfg.get("hide", False):
suffix.append(k + "_" + str(variant[k]))
return "_".join(suffix)
def ivariants(self):
dependencies = list()
for key, vals, _ in self._variants:
if hasattr(vals, "__call__"):
args = inspect.getargspec(vals).args
if hasattr(vals, 'im_self') or hasattr(vals, "__self__"):
args = args[1:]
dependencies.append((key, set(args)))
else:
dependencies.append((key, set()))
sorted_keys = []
while len(sorted_keys) < len(self._variants):
free_nodes = [k for k, v in dependencies if len(v) == 0]
if len(free_nodes) == 0:
error_msg = "Invalid parameter dependency: \n"
for k, v in dependencies:
if len(v) > 0:
error_msg += k + " depends on " + " & ".join(v) + "\n"
raise ValueError(error_msg)
dependencies = [(k, v)
for k, v in dependencies if k not in free_nodes]
for _, v in dependencies:
v.difference_update(free_nodes)
sorted_keys += free_nodes
return self._ivariants_sorted(sorted_keys)
def _ivariants_sorted(self, sorted_keys):
if len(sorted_keys) == 0:
yield dict()
else:
first_keys = sorted_keys[:-1]
first_variants = self._ivariants_sorted(first_keys)
last_key = sorted_keys[-1]
last_vals = [v for k, v, _ in self._variants if k == last_key][0]
if hasattr(last_vals, "__call__"):
last_val_keys = inspect.getargspec(last_vals).args
if hasattr(last_vals, 'im_self') or hasattr(last_vals, '__self__'):
last_val_keys = last_val_keys[1:]
else:
last_val_keys = None
for variant in first_variants:
if hasattr(last_vals, "__call__"):
last_variants = last_vals(
**{k: variant[k] for k in last_val_keys})
for last_choice in last_variants:
yield AttrDict(variant, **{last_key: last_choice})
else:
for last_choice in last_vals:
yield AttrDict(variant, **{last_key: last_choice})
def variant(*args, **kwargs):
def _variant(fn):
fn.__is_variant = True
fn.__variant_config = kwargs
return fn
if len(args) == 1 and isinstance(args[0], collections.Callable):
return _variant(args[0])
return _variant
def stub(glbs):
for k, v in list(glbs.items()):
if isinstance(v, type) and v != StubClass:
glbs[k] = StubClass(v)
def query_yes_no(question, default="yes"):
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
exp_count = 0
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
remote_confirmed = False
def run_experiment_lite(
stub_method_call=None,
batch_tasks=None,
exp_prefix="experiment",
exp_name=None,
log_dir=None,
script="scripts/run_experiment_lite.py",
python_command="python",
mode="local",
dry=False,
docker_image=None,
aws_config=None,
env=None,
variant=None,
use_gpu=False,
sync_s3_pkl=False,
sync_s3_png=False,
sync_s3_log=False,
sync_log_on_termination=True,
confirm_remote=True,
terminate_machine=True,
periodic_sync=True,
periodic_sync_interval=15,
sync_all_data_node_to_s3=True,
use_cloudpickle=None,
pre_commands=None,
added_project_directories=[],
**kwargs):
assert stub_method_call is not None or batch_tasks is not None, "Must provide at least either stub_method_call or batch_tasks"
if use_cloudpickle is None:
for maybe_stub in (batch_tasks or [stub_method_call]):
if isinstance(maybe_stub, StubBase):
use_cloudpickle = False
else:
assert hasattr(maybe_stub, '__call__')
use_cloudpickle = True
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(
kwargs,
pre_commands=pre_commands,
stub_method_call=stub_method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant,
use_cloudpickle=use_cloudpickle
)
]
global exp_count
global remote_confirmed
config.USE_GPU = use_gpu
for task in batch_tasks:
call = task.pop("stub_method_call")
if use_cloudpickle:
import cloudpickle
data = base64.b64encode(cloudpickle.dumps(call)).decode("utf-8")
else:
data = base64.b64encode(pickle.dumps(call)).decode("utf-8")
task["args_data"] = data
exp_count += 1
params = dict(kwargs)
if task.get("exp_name", None) is None:
task["exp_name"] = "%s_%s_%04d" % (
exp_prefix, timestamp, exp_count)
if task.get("log_dir", None) is None:
task["log_dir"] = config.LOG_DIR + "/local/" + \
exp_prefix.replace("_", "-") + "/" + task["exp_name"]
if task.get("variant", None) is not None:
variant = task.pop("variant")
if "exp_name" not in variant:
variant["exp_name"] = task["exp_name"]
task["variant_data"] = base64.b64encode(pickle.dumps(variant)).decode("utf-8")
elif "variant" in task:
del task["variant"]
task["remote_log_dir"] = osp.join(
config.AWS_S3_PATH, exp_prefix.replace("_", "-"), task["exp_name"])
task["env"] = task.get("env", dict()) or dict()
task["env"]["RLLAB_USE_GPU"] = str(use_gpu)
if mode not in ["local", "local_docker"] and not remote_confirmed and not dry and confirm_remote:
remote_confirmed = query_yes_no(
"Running in (non-dry) mode %s. Confirm?" % mode)
if not remote_confirmed:
sys.exit(1)
if hasattr(mode, "__call__"):
if docker_image is None:
docker_image = config.DOCKER_IMAGE
mode(
task,
docker_image=docker_image,
use_gpu=use_gpu,
exp_prefix=exp_prefix,
script=script,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
)
elif mode == "local":
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_local_command(
task,
python_command=python_command,
script=osp.join(config.PROJECT_PATH, script),
use_gpu=use_gpu
)
print(command)
if dry:
return
try:
if env is None:
env = dict()
subprocess.call(
command, shell=True, env=dict(os.environ, **env))
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
elif mode == "local_docker":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_docker_command(
task,
docker_image=docker_image,
script=script,
env=env,
use_gpu=use_gpu,
use_tty=True,
python_command=python_command,
)
print(command)
if dry:
return
p = subprocess.Popen(command, shell=True)
try:
p.wait()
except KeyboardInterrupt:
try:
print("terminating")
p.terminate()
except OSError:
print("os error!")
pass
p.wait()
elif mode == "ec2":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
s3_code_path = s3_sync_code(config, dry=dry, added_project_directories=added_project_directories)
launch_ec2(batch_tasks,
exp_prefix=exp_prefix,
docker_image=docker_image,
python_command=python_command,
script=script,
aws_config=aws_config,
dry=dry,
terminate_machine=terminate_machine,
use_gpu=use_gpu,
code_full_path=s3_code_path,
sync_s3_pkl=sync_s3_pkl,
sync_s3_png=sync_s3_png,
sync_s3_log=sync_s3_log,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval)
elif mode == "lab_kube":
s3_code_path = s3_sync_code(config, dry=dry)
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
task["resources"] = params.pop(
"resources", config.KUBE_DEFAULT_RESOURCES)
task["node_selector"] = params.pop(
"node_selector", config.KUBE_DEFAULT_NODE_SELECTOR)
task["exp_prefix"] = exp_prefix
pod_dict = to_lab_kube_pod(
task, code_full_path=s3_code_path, docker_image=docker_image, script=script, is_gpu=use_gpu,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl, periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
terminate_machine=terminate_machine,
)
pod_str = json.dumps(pod_dict, indent=1)
if dry:
print(pod_str)
dir = "{pod_dir}/{exp_prefix}".format(
pod_dir=config.POD_DIR, exp_prefix=exp_prefix)
ensure_dir(dir)
fname = "{dir}/{exp_name}.json".format(
dir=dir,
exp_name=task["exp_name"]
)
with open(fname, "w") as fh:
fh.write(pod_str)
kubecmd = "kubectl create -f %s" % fname
print(kubecmd)
if dry:
return
retry_count = 0
wait_interval = 1
while retry_count <= 5:
try:
return_code = subprocess.call(kubecmd, shell=True)
if return_code == 0:
break
retry_count += 1
print("trying again...")
time.sleep(wait_interval)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise
print(e)
else:
raise NotImplementedError
_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search
def ensure_dir(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _shellquote(s):
if not s:
return "''"
if _find_unsafe(s) is None:
return s
return "'" + s.replace("'", "'\"'\"'") + "'"
def _to_param_val(v):
if v is None:
return ""
elif isinstance(v, list):
return " ".join(map(_shellquote, list(map(str, v))))
else:
return _shellquote(str(v))
def to_local_command(params, python_command="python", script=osp.join(config.PROJECT_PATH,
'scripts/run_experiment.py'),
use_gpu=False):
command = python_command + " " + script
if use_gpu and not config.USE_TF:
command = "THEANO_FLAGS='device=gpu,dnn.enabled=auto,floatX=float32' " + command
for k, v in config.ENV.items():
command = ("%s=%s " % (k, v)) + command
pre_commands = params.pop("pre_commands", None)
post_commands = params.pop("post_commands", None)
if pre_commands is not None or post_commands is not None:
print("Not executing the pre_commands: ", pre_commands, ", nor post_commands: ", post_commands)
for k, v in params.items():
if isinstance(v, dict):
for nk, nv in v.items():
if str(nk) == "_name":
command += " --%s %s" % (k, _to_param_val(nv))
else:
command += \
" --%s_%s %s" % (k, nk, _to_param_val(nv))
else:
command += " --%s %s" % (k, _to_param_val(v))
return command
def to_docker_command(params, docker_image, python_command="python", script='scripts/run_experiment_lite.py',
pre_commands=None, use_tty=False,
mujoco_path=None,
post_commands=None, dry=False, use_gpu=False, env=None, local_code_dir=None):
log_dir = params.get("log_dir")
docker_args = params.pop("docker_args", "")
if pre_commands is None:
pre_commands = params.pop("pre_commands", None)
if post_commands is None:
post_commands = params.pop("post_commands", None)
if mujoco_path is None:
mujoco_path = config.MUJOCO_KEY_PATH
if use_gpu:
command_prefix = "nvidia-docker run"
else:
command_prefix = "docker run"
docker_log_dir = config.DOCKER_LOG_DIR
if env is None:
env = dict()
env = dict(
env,
AWS_ACCESS_KEY_ID=config.AWS_ACCESS_KEY,
AWS_SECRET_ACCESS_KEY=config.AWS_ACCESS_SECRET,
)
if env is not None:
for k, v in env.items():
command_prefix += " -e \"{k}={v}\"".format(k=k, v=v)
command_prefix += " -v {local_mujoco_key_dir}:{docker_mujoco_key_dir}".format(
local_mujoco_key_dir=mujoco_path, docker_mujoco_key_dir='/root/.mujoco')
command_prefix += " -v {local_log_dir}:{docker_log_dir}".format(
local_log_dir=log_dir,
docker_log_dir=docker_log_dir
)
command_prefix += docker_args
if local_code_dir is None:
local_code_dir = config.PROJECT_PATH
command_prefix += " -v {local_code_dir}:{docker_code_dir}".format(
local_code_dir=local_code_dir,
docker_code_dir=config.DOCKER_CODE_DIR
)
params = dict(params, log_dir=docker_log_dir)
if use_tty:
command_prefix += " -ti " + docker_image + " /bin/bash -c "
else:
command_prefix += " -i " + docker_image + " /bin/bash -c "
command_list = list()
if pre_commands is not None:
command_list.extend(pre_commands)
command_list.append("echo \"Running in docker\"")
command_list.append(to_local_command(
params, python_command=python_command, script=osp.join(config.DOCKER_CODE_DIR, script), use_gpu=use_gpu))
if post_commands is None:
post_commands = ['sleep 120']
command_list.extend(post_commands)
return command_prefix + "'" + "; ".join(command_list) + "'"
def dedent(s):
lines = [l.strip() for l in s.split('\n')]
return '\n'.join(lines)
def launch_ec2(params_list, exp_prefix, docker_image, code_full_path,
python_command="python",
script='scripts/run_experiment.py',
aws_config=None, dry=False, terminate_machine=True, use_gpu=False, sync_s3_pkl=False,
sync_s3_png=False,
sync_s3_log=False,
sync_log_on_termination=True,
periodic_sync=True, periodic_sync_interval=15):
if len(params_list) == 0:
return
default_config = dict(
image_id=config.AWS_IMAGE_ID,
instance_type=config.AWS_INSTANCE_TYPE,
key_name=config.AWS_KEY_NAME,
spot=config.AWS_SPOT,
spot_price=config.AWS_SPOT_PRICE,
iam_instance_profile_name=config.AWS_IAM_INSTANCE_PROFILE_NAME,
security_groups=config.AWS_SECURITY_GROUPS,
security_group_ids=config.AWS_SECURITY_GROUP_IDS,
network_interfaces=config.AWS_NETWORK_INTERFACES,
)
if aws_config is None:
aws_config = dict()
aws_config = dict(default_config, **aws_config)
sio = StringIO()
sio.write("#!/bin/bash\n")
sio.write("{\n")
sio.write("""
die() { status=$1; shift; echo "FATAL: $*"; exit $status; }
""")
sio.write("""
EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id`"
""")
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
""".format(exp_name=params_list[0].get("exp_name"), aws_region=config.AWS_REGION_NAME))
if config.LABEL:
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=owner,Value={label} --region {aws_region}
""".format(label=config.LABEL, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=exp_prefix,Value={exp_prefix} --region {aws_region}
""".format(exp_prefix=exp_prefix, aws_region=config.AWS_REGION_NAME))
sio.write("""
service docker start
""")
sio.write("""
docker --config /home/ubuntu/.docker pull {docker_image}
""".format(docker_image=docker_image))
sio.write("""
export AWS_DEFAULT_REGION={aws_region}
""".format(aws_region=config.AWS_REGION_NAME))
if config.FAST_CODE_SYNC:
# aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz --region {aws_region}
# """.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
sio.write("""
aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))
sio.write("""
mkdir -p {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
aws_region=config.AWS_REGION_NAME))
sio.write("""
tar -zxvf /tmp/rllab_code.tar.gz -C {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
aws_region=config.AWS_REGION_NAME))
else:
# aws s3 cp --recursive {code_full_path} {local_code_path} --region {aws_region}
# """.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
sio.write("""
aws s3 cp --recursive {code_full_path} {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))
s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'
# aws s3 cp --recursive {} {} --region {}
# """.format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH, config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {} {}
""".format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH))
sio.write("""
cd {local_code_path}
""".format(local_code_path=config.DOCKER_CODE_DIR))
for params in params_list:
log_dir = params.get("log_dir")
remote_log_dir = params.pop("remote_log_dir")
env = params.pop("env", None)
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
""".format(exp_name=params.get("exp_name"), aws_region=config.AWS_REGION_NAME))
sio.write("""
mkdir -p {log_dir}
""".format(log_dir=log_dir))
if periodic_sync:
include_png = " --include '*.png' " if sync_s3_png else " "
include_pkl = " --include '*.pkl' " if sync_s3_pkl else " "
include_log = " --include '*.log' " if sync_s3_log else " "
# while /bin/true; do
# aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region}
# sleep {periodic_sync_interval}
# done & echo sync initiated""".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,
sio.write("""
while /bin/true; do
aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir}
sleep {periodic_sync_interval}
done & echo sync initiated""".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,
log_dir=log_dir, remote_log_dir=remote_log_dir,
periodic_sync_interval=periodic_sync_interval))
if sync_log_on_termination:
# while /bin/true; do
# if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \ -f 2) ]
# then
# logger "Running shutdown hook."
# aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}
# aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}
# break
# else
# # Spot instance not yet marked for termination.
# sleep 5
# fi
# done & echo log sync initiated
# """.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
while /bin/true; do
if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \ -f 2) ]
then
logger "Running shutdown hook."
aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log
aws s3 cp --recursive {log_dir} {remote_log_dir}
break
else
# Spot instance not yet marked for termination.
sleep 5
fi
done & echo log sync initiated
""".format(log_dir=log_dir, remote_log_dir=remote_log_dir))
if use_gpu:
sio.write("""
for i in {1..800}; do su -c "nvidia-modprobe -u -c=0" ubuntu && break || sleep 3; done
systemctl start nvidia-docker
""")
sio.write("""
{command}
""".format(command=to_docker_command(params, docker_image, python_command=python_command, script=script,
use_gpu=use_gpu, env=env,
local_code_dir=config.DOCKER_CODE_DIR)))
# aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}
# """.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {log_dir} {remote_log_dir}
""".format(log_dir=log_dir, remote_log_dir=remote_log_dir))
# aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}
# """.format(remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log
""".format(remote_log_dir=remote_log_dir))
if terminate_machine:
sio.write("""
EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id || die \"wget instance-id has failed: $?\"`"
aws ec2 terminate-instances --instance-ids $EC2_INSTANCE_ID --region {aws_region}
""".format(aws_region=config.AWS_REGION_NAME))
sio.write("} >> /home/ubuntu/user_data.log 2>&1\n")
full_script = dedent(sio.getvalue())
import boto3
import botocore
if aws_config["spot"]:
ec2 = boto3.client(
"ec2",
region_name=config.AWS_REGION_NAME,
aws_access_key_id=config.AWS_ACCESS_KEY,
aws_secret_access_key=config.AWS_ACCESS_SECRET,
)
else:
ec2 = boto3.resource(
"ec2",
region_name=config.AWS_REGION_NAME,
aws_access_key_id=config.AWS_ACCESS_KEY,
aws_secret_access_key=config.AWS_ACCESS_SECRET,
)
if len(full_script) > 10000 or len(base64.b64encode(full_script.encode()).decode("utf-8")) > 10000:
s3_path = upload_file_to_s3(full_script)
sio = StringIO()
sio.write("#!/bin/bash\n")
sio.write("""
aws s3 cp {s3_path} /home/ubuntu/remote_script.sh --region {aws_region} && \\
chmod +x /home/ubuntu/remote_script.sh && \\
bash /home/ubuntu/remote_script.sh
""".format(s3_path=s3_path, aws_region=config.AWS_REGION_NAME))
user_data = dedent(sio.getvalue())
else:
user_data = full_script
print(full_script)
with open("/tmp/full_script", "w") as f:
f.write(full_script)
instance_args = dict(
ImageId=aws_config["image_id"],
KeyName=aws_config["key_name"],
UserData=user_data,
InstanceType=aws_config["instance_type"],
EbsOptimized=config.EBS_OPTIMIZED,
SecurityGroups=aws_config["security_groups"],
SecurityGroupIds=aws_config["security_group_ids"],
NetworkInterfaces=aws_config["network_interfaces"],
IamInstanceProfile=dict(
Name=aws_config["iam_instance_profile_name"],
),
**config.AWS_EXTRA_CONFIGS,
)
if len(instance_args["NetworkInterfaces"]) > 0:
# disable_security_group = query_yes_no(
# "Cannot provide both network interfaces and security groups info. Do you want to disable security group settings?",
# default="yes",
# )
disable_security_group = True
if disable_security_group:
instance_args.pop("SecurityGroups")
instance_args.pop("SecurityGroupIds")
if aws_config.get("placement", None) is not None:
instance_args["Placement"] = aws_config["placement"]
if not aws_config["spot"]:
instance_args["MinCount"] = 1
instance_args["MaxCount"] = 1
print("************************************************************")
print(instance_args["UserData"])
print("************************************************************")
if aws_config["spot"]:
instance_args["UserData"] = base64.b64encode(instance_args["UserData"].encode()).decode("utf-8")
spot_args = dict(
DryRun=dry,
InstanceCount=1,
LaunchSpecification=instance_args,
SpotPrice=aws_config["spot_price"],
# ClientToken=params_list[0]["exp_name"],
)
import pprint
pprint.pprint(spot_args)
if not dry:
response = ec2.request_spot_instances(**spot_args)
print(response)
spot_request_id = response['SpotInstanceRequests'][
0]['SpotInstanceRequestId']
for _ in range(10):
try:
ec2.create_tags(
Resources=[spot_request_id],
Tags=[
{'Key': 'Name', 'Value': params_list[0]["exp_name"]}
],
)
break
except botocore.exceptions.ClientError:
continue
else:
import pprint
pprint.pprint(instance_args)
ec2.create_instances(
DryRun=dry,
**instance_args
)
S3_CODE_PATH = None
def s3_sync_code(config, dry=False, added_project_directories=[]):
global S3_CODE_PATH
if S3_CODE_PATH is not None:
return S3_CODE_PATH
base = config.AWS_CODE_SYNC_S3_PATH
has_git = True
if config.FAST_CODE_SYNC:
try:
current_commit = subprocess.check_output(
["git", "rev-parse", "HEAD"]).strip().decode("utf-8")
except subprocess.CalledProcessError as _:
print("Warning: failed to execute git commands")
current_commit = None
file_name = str(timestamp) + "_" + hashlib.sha224(
subprocess.check_output(["pwd"]) + str(current_commit).encode() + str(timestamp).encode()
).hexdigest() + ".tar.gz"
file_path = "/tmp/" + file_name
tar_cmd = ["tar", "-zcvf", file_path, "-C", config.PROJECT_PATH]
for pattern in config.FAST_CODE_SYNC_IGNORES:
tar_cmd += ["--exclude", pattern]
tar_cmd += ["-h", "."]
for path in added_project_directories:
tar_cmd.append("-C")
tar_cmd.append(path)
tar_cmd += ["."]
remote_path = "%s/%s" % (base, file_name)
upload_cmd = ["aws", "s3", "cp", file_path, remote_path]
mujoco_key_cmd = [
"aws", "s3", "sync", config.MUJOCO_KEY_PATH, "{}/.mujoco/".format(base)]
print(" ".join(tar_cmd))
print(" ".join(upload_cmd))
print(" ".join(mujoco_key_cmd))
if not dry:
subprocess.check_call(tar_cmd)
subprocess.check_call(upload_cmd)
try:
subprocess.check_call(mujoco_key_cmd)
except Exception as e:
print(e)
S3_CODE_PATH = remote_path
return remote_path
else:
try:
current_commit = subprocess.check_output(
["git", "rev-parse", "HEAD"]).strip().decode("utf-8")
clean_state = len(
subprocess.check_output(["git", "status", "--porcelain"])) == 0
except subprocess.CalledProcessError as _:
print("Warning: failed to execute git commands")
has_git = False
dir_hash = base64.b64encode(subprocess.check_output(["pwd"])).decode("utf-8")
code_path = "%s_%s" % (
dir_hash,
(current_commit if clean_state else "%s_dirty_%s" % (current_commit, timestamp)) if
has_git else timestamp
)
full_path = "%s/%s" % (base, code_path)
cache_path = "%s/%s" % (base, dir_hash)
cache_cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[cache_path, full_path]
cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[".", full_path]
caching_cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[full_path, cache_path]
mujoco_key_cmd = [
"aws", "s3", "sync", config.MUJOCO_KEY_PATH, "{}/.mujoco/".format(base)]
print(cache_cmds, cmds, caching_cmds, mujoco_key_cmd)
if not dry:
subprocess.check_call(cache_cmds)
subprocess.check_call(cmds)
subprocess.check_call(caching_cmds)
try:
subprocess.check_call(mujoco_key_cmd)
except Exception:
print('Unable to sync mujoco keys!')
S3_CODE_PATH = full_path
return full_path
def upload_file_to_s3(script_content):
import tempfile
import uuid
f = tempfile.NamedTemporaryFile(delete=False)
f.write(script_content.encode())
f.close()
remote_path = os.path.join(
config.AWS_CODE_SYNC_S3_PATH, "oversize_bash_scripts", str(uuid.uuid4()))
subprocess.check_call(["aws", "s3", "cp", f.name, remote_path])
os.unlink(f.name)
return remote_path
def to_lab_kube_pod(
params, docker_image, code_full_path,
python_command="python",
script='scripts/run_experiment.py',
is_gpu=False,
sync_s3_pkl=False,
periodic_sync=True,
periodic_sync_interval=15,
sync_all_data_node_to_s3=False,
terminate_machine=True
):
log_dir = params.get("log_dir")
remote_log_dir = params.pop("remote_log_dir")
resources = params.pop("resources")
node_selector = params.pop("node_selector")
exp_prefix = params.pop("exp_prefix")
kube_env = [
{"name": k, "value": v}
for k, v in (params.pop("env", None) or dict()).items()
]
mkdir_p(log_dir)
pre_commands = list()
pre_commands.append('mkdir -p ~/.aws')
pre_commands.append('mkdir ~/.mujoco')
# fetch credentials from the kubernetes secret file
pre_commands.append('echo "[default]" >> ~/.aws/credentials')
pre_commands.append(
"echo \"aws_access_key_id = %s\" >> ~/.aws/credentials" % config.AWS_ACCESS_KEY)
pre_commands.append(
"echo \"aws_secret_access_key = %s\" >> ~/.aws/credentials" % config.AWS_ACCESS_SECRET)
s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'
pre_commands.append(
'aws s3 cp --recursive {} {}'.format(s3_mujoco_key_path, '~/.mujoco'))
if config.FAST_CODE_SYNC:
pre_commands.append('aws s3 cp %s /tmp/rllab_code.tar.gz' % code_full_path)
pre_commands.append('mkdir -p %s' % config.DOCKER_CODE_DIR)
pre_commands.append('tar -zxvf /tmp/rllab_code.tar.gz -C %s' % config.DOCKER_CODE_DIR)
else:
pre_commands.append('aws s3 cp --recursive %s %s' %
(code_full_path, config.DOCKER_CODE_DIR))
pre_commands.append('cd %s' % config.DOCKER_CODE_DIR)
pre_commands.append('mkdir -p %s' %
(log_dir))
if sync_all_data_node_to_s3:
print('Syncing all data from node to s3.')
if periodic_sync:
if sync_s3_pkl:
pre_commands.append("""
while /bin/true; do
aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
pre_commands.append("""
while /bin/true; do
aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
if periodic_sync:
if sync_s3_pkl:
pre_commands.append("""
while /bin/true; do
aws s3 sync --exclude '*' --include '*.csv' --include '*.json' --include '*.pkl' {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
pre_commands.append("""
while /bin/true; do
aws s3 sync --exclude '*' --include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
# copy the file to s3 after execution
post_commands = list()
post_commands.append('aws s3 cp --recursive %s %s' %
(log_dir,
remote_log_dir))
if not terminate_machine:
post_commands.append('sleep infinity')
command_list = list()
if pre_commands is not None:
command_list.extend(pre_commands)
command_list.append("echo \"Running in docker\"")
command_list.append(
"%s 2>&1 | tee -a %s" % (
to_local_command(params, python_command=python_command, script=script),
"%s/stdouterr.log" % log_dir
)
)
if post_commands is not None:
command_list.extend(post_commands)
command = "; ".join(command_list)
pod_name = config.KUBE_PREFIX + params["exp_name"]
# underscore is not allowed in pod names
pod_name = pod_name.replace("_", "-")
print("Is gpu: ", is_gpu)
if not is_gpu:
return {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": pod_name,
"labels": {
"owner": config.LABEL,
"expt": pod_name,
"exp_time": timestamp,
"exp_prefix": exp_prefix,
},
},
"spec": {
"containers": [
{
"name": "foo",
"image": docker_image,
"command": [
"/bin/bash",
"-c",
"-li", # to load conda env file
command,
],
"resources": resources,
"imagePullPolicy": "Always",
}
],
"restartPolicy": "Never",
"nodeSelector": node_selector,
"dnsPolicy": "Default",
}
}
return {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": pod_name,
"labels": {
"owner": config.LABEL,
"expt": pod_name,
"exp_time": timestamp,
"exp_prefix": exp_prefix,
},
},
"spec": {
"containers": [
{
"name": "foo",
"image": docker_image,
"env": kube_env,
"command": [
"/bin/bash",
"-c",
"-li", # to load conda env file
command,
],
"resources": resources,
"imagePullPolicy": "Always",
# gpu specific
"volumeMounts": [
{
"name": "nvidia",
"mountPath": "/usr/local/nvidia",
"readOnly": True,
}
],
"securityContext": {
"privileged": True,
}
}
],
"volumes": [
{
"name": "nvidia",
"hostPath": {
"path": "/var/lib/docker/volumes/nvidia_driver_352.63/_data",
}
}
],
"restartPolicy": "Never",
"nodeSelector": node_selector,
"dnsPolicy": "Default",
}
}
def concretize(maybe_stub):
if isinstance(maybe_stub, StubMethodCall):
obj = concretize(maybe_stub.obj)
method = getattr(obj, maybe_stub.method_name)
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
return method(*args, **kwargs)
elif isinstance(maybe_stub, StubClass):
return maybe_stub.proxy_class
elif isinstance(maybe_stub, StubAttr):
obj = concretize(maybe_stub.obj)
attr_name = maybe_stub.attr_name
attr_val = getattr(obj, attr_name)
return concretize(attr_val)
elif isinstance(maybe_stub, StubObject):
if not hasattr(maybe_stub, "__stub_cache"):
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
try:
maybe_stub.__stub_cache = maybe_stub.proxy_class(
*args, **kwargs)
except Exception as e:
print(("Error while instantiating %s" % maybe_stub.proxy_class))
import traceback
traceback.print_exc()
ret = maybe_stub.__stub_cache
return ret
elif isinstance(maybe_stub, dict):
# make sure that there's no hidden caveat
ret = dict()
for k, v in maybe_stub.items():
ret[concretize(k)] = concretize(v)
return ret
elif isinstance(maybe_stub, (list, tuple)):
return maybe_stub.__class__(list(map(concretize, maybe_stub)))
else:
return maybe_stub
| true
| true
|
f716261ac483bcf478965800be894dea21a24632
| 4,700
|
py
|
Python
|
openstates/openstates-master/openstates/ky/legislators.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/ky/legislators.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/ky/legislators.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
from collections import defaultdict
from billy.scrape.legislators import Legislator, LegislatorScraper
import lxml.html
class KYLegislatorScraper(LegislatorScraper):
jurisdiction = 'ky'
latest_only = True
def scrape(self, chamber, year):
if chamber == 'upper':
leg_list_url = 'http://www.lrc.ky.gov/senate/senmembers.htm'
else:
leg_list_url = 'http://www.lrc.ky.gov/house/hsemembers.htm'
page = self.get(leg_list_url).text
page = lxml.html.fromstring(page)
for link in page.xpath('//a[@onmouseout="hidePicture();"]'):
self.scrape_member(chamber, year, link.get('href'))
def scrape_office_info(self, url):
ret = {}
legislator_page = self.get(url).text
legislator_page = lxml.html.fromstring(legislator_page)
legislator_page.make_links_absolute(url)
info = legislator_page.xpath("//table//span")
for span in info:
elements = span.xpath("./*")
if len(elements) < 1:
continue
if elements[0].tag != "b":
continue
txt = elements[0].text_content().strip()
if txt == "Bio" or \
"committees" in txt.lower() or \
"service" in txt.lower() or \
txt == "":
continue
def _handle_phone(obj):
ret = defaultdict(list)
for x in obj.xpath(".//*")[:-1]:
phone = x.tail.strip()
obj = phone.split(":", 1)
if len(obj) != 2:
continue
typ, number = obj
typ, number = typ.strip(), number.strip()
ret[typ].append(number)
return ret
def _handle_address(obj):
addr = " ".join([x.tail or "" for x in obj.xpath(".//*")[1:]])
return [addr.strip()]
def _handle_emails(obj):
ret = []
emails = obj.xpath(".//a[contains(@href, 'mailto')]")
if len(emails) < 1:
return []
for email in emails:
_, efax = email.attrib['href'].split(":", 1)
ret.append(efax)
return ret
handlers = {
"Mailing Address": _handle_address,
"Frankfort Address(es)": _handle_address,
"Phone Number(s)": _handle_phone,
"Email Address(es)": _handle_emails
}
try:
handler = handlers[txt]
ret[txt] = handler(span)
except KeyError:
pass
return ret
def scrape_member(self, chamber, year, member_url):
member_page = self.get(member_url).text
doc = lxml.html.fromstring(member_page)
photo_url = doc.xpath('//div[@id="bioImage"]/img/@src')[0]
name_pieces = doc.xpath('//span[@id="name"]/text()')[0].split()
full_name = ' '.join(name_pieces[1:-1]).strip()
party = name_pieces[-1]
if party == '(R)':
party = 'Republican'
elif party == '(D)':
party = 'Democratic'
elif party == '(I)':
party = 'Independent'
district = doc.xpath('//span[@id="districtHeader"]/text()')[0].split()[-1]
leg = Legislator(year, chamber, district, full_name, party=party,
photo_url=photo_url, url=member_url)
leg.add_source(member_url)
address = '\n'.join(doc.xpath('//div[@id="FrankfortAddresses"]//span[@class="bioText"]/text()'))
phone = None
fax = None
phone_numbers = doc.xpath('//div[@id="PhoneNumbers"]//span[@class="bioText"]/text()')
for num in phone_numbers:
if num.startswith('Annex: '):
num = num.replace('Annex: ', '')
if num.endswith(' (fax)'):
fax = num.replace(' (fax)', '')
else:
phone = num
emails = doc.xpath(
'//div[@id="EmailAddresses"]//span[@class="bioText"]//a/text()'
)
email = reduce(
lambda match, address: address if '@lrc.ky.gov' in str(address) else match,
[None] + emails
)
if address.strip() == "":
self.warning("Missing Capitol Office!!")
else:
leg.add_office(
'capitol', 'Capitol Office',
address=address,
phone=phone,
fax=fax,
email=email
)
self.save_legislator(leg)
| 33.098592
| 104
| 0.491702
|
from collections import defaultdict
from billy.scrape.legislators import Legislator, LegislatorScraper
import lxml.html
class KYLegislatorScraper(LegislatorScraper):
jurisdiction = 'ky'
latest_only = True
def scrape(self, chamber, year):
if chamber == 'upper':
leg_list_url = 'http://www.lrc.ky.gov/senate/senmembers.htm'
else:
leg_list_url = 'http://www.lrc.ky.gov/house/hsemembers.htm'
page = self.get(leg_list_url).text
page = lxml.html.fromstring(page)
for link in page.xpath('//a[@onmouseout="hidePicture();"]'):
self.scrape_member(chamber, year, link.get('href'))
def scrape_office_info(self, url):
ret = {}
legislator_page = self.get(url).text
legislator_page = lxml.html.fromstring(legislator_page)
legislator_page.make_links_absolute(url)
info = legislator_page.xpath("//table//span")
for span in info:
elements = span.xpath("./*")
if len(elements) < 1:
continue
if elements[0].tag != "b":
continue
txt = elements[0].text_content().strip()
if txt == "Bio" or \
"committees" in txt.lower() or \
"service" in txt.lower() or \
txt == "":
continue
def _handle_phone(obj):
ret = defaultdict(list)
for x in obj.xpath(".//*")[:-1]:
phone = x.tail.strip()
obj = phone.split(":", 1)
if len(obj) != 2:
continue
typ, number = obj
typ, number = typ.strip(), number.strip()
ret[typ].append(number)
return ret
def _handle_address(obj):
addr = " ".join([x.tail or "" for x in obj.xpath(".//*")[1:]])
return [addr.strip()]
def _handle_emails(obj):
ret = []
emails = obj.xpath(".//a[contains(@href, 'mailto')]")
if len(emails) < 1:
return []
for email in emails:
_, efax = email.attrib['href'].split(":", 1)
ret.append(efax)
return ret
handlers = {
"Mailing Address": _handle_address,
"Frankfort Address(es)": _handle_address,
"Phone Number(s)": _handle_phone,
"Email Address(es)": _handle_emails
}
try:
handler = handlers[txt]
ret[txt] = handler(span)
except KeyError:
pass
return ret
def scrape_member(self, chamber, year, member_url):
member_page = self.get(member_url).text
doc = lxml.html.fromstring(member_page)
photo_url = doc.xpath('//div[@id="bioImage"]/img/@src')[0]
name_pieces = doc.xpath('//span[@id="name"]/text()')[0].split()
full_name = ' '.join(name_pieces[1:-1]).strip()
party = name_pieces[-1]
if party == '(R)':
party = 'Republican'
elif party == '(D)':
party = 'Democratic'
elif party == '(I)':
party = 'Independent'
district = doc.xpath('//span[@id="districtHeader"]/text()')[0].split()[-1]
leg = Legislator(year, chamber, district, full_name, party=party,
photo_url=photo_url, url=member_url)
leg.add_source(member_url)
address = '\n'.join(doc.xpath('//div[@id="FrankfortAddresses"]//span[@class="bioText"]/text()'))
phone = None
fax = None
phone_numbers = doc.xpath('//div[@id="PhoneNumbers"]//span[@class="bioText"]/text()')
for num in phone_numbers:
if num.startswith('Annex: '):
num = num.replace('Annex: ', '')
if num.endswith(' (fax)'):
fax = num.replace(' (fax)', '')
else:
phone = num
emails = doc.xpath(
'//div[@id="EmailAddresses"]//span[@class="bioText"]//a/text()'
)
email = reduce(
lambda match, address: address if '@lrc.ky.gov' in str(address) else match,
[None] + emails
)
if address.strip() == "":
self.warning("Missing Capitol Office!!")
else:
leg.add_office(
'capitol', 'Capitol Office',
address=address,
phone=phone,
fax=fax,
email=email
)
self.save_legislator(leg)
| true
| true
|
f7162748180db3d0c6d31d12bd970036ad3500b1
| 1,747
|
py
|
Python
|
python/training/ami2text.py
|
bmilde/ambientsearch
|
74bf83a313e19da54a4e44158063041f981424c9
|
[
"Apache-2.0"
] | 20
|
2016-04-30T11:24:45.000Z
|
2021-11-09T10:39:25.000Z
|
python/training/ami2text.py
|
bmilde/ambientsearch
|
74bf83a313e19da54a4e44158063041f981424c9
|
[
"Apache-2.0"
] | 1
|
2020-09-23T13:36:58.000Z
|
2020-09-23T13:36:58.000Z
|
python/training/ami2text.py
|
bmilde/ambientsearch
|
74bf83a313e19da54a4e44158063041f981424c9
|
[
"Apache-2.0"
] | 8
|
2015-10-07T13:40:36.000Z
|
2019-08-07T06:45:24.000Z
|
import xml.etree.ElementTree as ET
import os
import codecs
import logging
import sys
import argparse
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
def convert_ami(ami_root_dir, txt_output_dir):
logger.info('Starting conversion process...')
for myfile in os.listdir(ami_root_dir):
if myfile.endswith('.xml'):
with codecs.open(os.path.join(ami_root_dir, myfile), 'r', encoding='utf-8', errors='replace') as in_file:
raw = in_file.read()
tree = ET.fromstring(raw)
text = ET.tostring(tree, encoding='utf-8', method='text')
output = u' '.join(text.split())
filename = os.path.splitext(myfile)[0]
output_file = os.path.join(txt_output_dir, filename + '.txt')
with codecs.open(output_file, 'w', encoding='utf-8') as out_file:
out_file.write(output)
logger.info(output_file + ' written')
logger.info('Conversion done.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-a', '--ami-root-dir', dest='ami_root_dir', help='Ami root directory, corpus is read from this directory', type=str, default = './data/ami_raw/words/')
parser.add_argument('-t', '--txt-output-dir', dest='txt_output_dir', help='Txt output directory', type=str, default = './data/ami_transcripts/' )
args = parser.parse_args()
logger.info('Using ami directory:' + args.ami_root_dir)
logger.info('Output text is saved in:' + args.txt_output_dir)
convert_ami(args.ami_root_dir, args.txt_output_dir)
| 41.595238
| 176
| 0.651975
|
import xml.etree.ElementTree as ET
import os
import codecs
import logging
import sys
import argparse
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
def convert_ami(ami_root_dir, txt_output_dir):
logger.info('Starting conversion process...')
for myfile in os.listdir(ami_root_dir):
if myfile.endswith('.xml'):
with codecs.open(os.path.join(ami_root_dir, myfile), 'r', encoding='utf-8', errors='replace') as in_file:
raw = in_file.read()
tree = ET.fromstring(raw)
text = ET.tostring(tree, encoding='utf-8', method='text')
output = u' '.join(text.split())
filename = os.path.splitext(myfile)[0]
output_file = os.path.join(txt_output_dir, filename + '.txt')
with codecs.open(output_file, 'w', encoding='utf-8') as out_file:
out_file.write(output)
logger.info(output_file + ' written')
logger.info('Conversion done.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-a', '--ami-root-dir', dest='ami_root_dir', help='Ami root directory, corpus is read from this directory', type=str, default = './data/ami_raw/words/')
parser.add_argument('-t', '--txt-output-dir', dest='txt_output_dir', help='Txt output directory', type=str, default = './data/ami_transcripts/' )
args = parser.parse_args()
logger.info('Using ami directory:' + args.ami_root_dir)
logger.info('Output text is saved in:' + args.txt_output_dir)
convert_ami(args.ami_root_dir, args.txt_output_dir)
| true
| true
|
f71628d61972f5279f3ebaa39213c9541c466954
| 2,296
|
py
|
Python
|
nc/binders/epub.py
|
masroore/novel_crawler
|
7c3c7affc4a177e7a5a308af5b48685ebb55ec9d
|
[
"Apache-2.0"
] | null | null | null |
nc/binders/epub.py
|
masroore/novel_crawler
|
7c3c7affc4a177e7a5a308af5b48685ebb55ec9d
|
[
"Apache-2.0"
] | null | null | null |
nc/binders/epub.py
|
masroore/novel_crawler
|
7c3c7affc4a177e7a5a308af5b48685ebb55ec9d
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
from ebooklib import epub
logger = logging.getLogger('EPUB_BINDER')
def make_intro_page(crawler):
html = '<div style="padding-top: 25%; text-align: center;">'
html += '<h1>%s</h1>' % (crawler.novel_title or 'N/A')
html += '<h3>%s</h3>' % (crawler.novel_author or 'N/A').replace(':', ': ')
html += '</div>'
return epub.EpubHtml(
uid='intro',
file_name='intro.xhtml',
title='Intro',
content=html,
)
def make_chapters(book, chapters):
book.toc = []
for i, chapter in enumerate(chapters):
xhtml_file = 'chap_%s.xhtml' % str(i + 1).rjust(5, '0')
content = epub.EpubHtml(
# uid=str(i + 1),
file_name=xhtml_file,
title=chapter['title'],
content=chapter['body'] or '',
)
book.add_item(content)
book.toc.append(content)
def bind_epub_book(app, chapters, volume=''):
book_title = (app.crawler.novel_title + ' ' + volume).strip()
logger.debug('Binding %s.epub', book_title)
# Create book
book = epub.EpubBook()
book.set_language('en')
book.set_title(book_title)
book.add_author(app.crawler.novel_author)
book.set_identifier(app.output_path + volume)
# Create intro page
intro_page = make_intro_page(app.crawler)
book.add_item(intro_page)
# Create book spine
if app.book_cover:
book.set_cover('image.jpg', open(app.book_cover, 'rb').read())
book.spine = ['cover', intro_page, 'nav']
else:
book.spine = [intro_page, 'nav']
# Create chapters
make_chapters(book, chapters)
book.spine += book.toc
book.add_item(epub.EpubNav())
book.add_item(epub.EpubNcx())
# Save epub file
epub_path = os.path.join(app.output_path, 'epub')
file_path = os.path.join(epub_path, book_title + '.epub')
logger.debug('Writing %s', file_path)
os.makedirs(epub_path, exist_ok=True)
epub.write_epub(file_path, book, {})
logger.warning('Created: %s.epub', book_title)
return file_path
def make_epubs(app, data):
epub_files = []
for vol in data:
if len(data[vol]) > 0:
book = bind_epub_book(app, volume=vol, chapters=data[vol])
epub_files.append(book)
return epub_files
| 28
| 78
| 0.616725
|
import logging
import os
from ebooklib import epub
logger = logging.getLogger('EPUB_BINDER')
def make_intro_page(crawler):
html = '<div style="padding-top: 25%; text-align: center;">'
html += '<h1>%s</h1>' % (crawler.novel_title or 'N/A')
html += '<h3>%s</h3>' % (crawler.novel_author or 'N/A').replace(':', ': ')
html += '</div>'
return epub.EpubHtml(
uid='intro',
file_name='intro.xhtml',
title='Intro',
content=html,
)
def make_chapters(book, chapters):
book.toc = []
for i, chapter in enumerate(chapters):
xhtml_file = 'chap_%s.xhtml' % str(i + 1).rjust(5, '0')
content = epub.EpubHtml(
file_name=xhtml_file,
title=chapter['title'],
content=chapter['body'] or '',
)
book.add_item(content)
book.toc.append(content)
def bind_epub_book(app, chapters, volume=''):
book_title = (app.crawler.novel_title + ' ' + volume).strip()
logger.debug('Binding %s.epub', book_title)
book = epub.EpubBook()
book.set_language('en')
book.set_title(book_title)
book.add_author(app.crawler.novel_author)
book.set_identifier(app.output_path + volume)
intro_page = make_intro_page(app.crawler)
book.add_item(intro_page)
if app.book_cover:
book.set_cover('image.jpg', open(app.book_cover, 'rb').read())
book.spine = ['cover', intro_page, 'nav']
else:
book.spine = [intro_page, 'nav']
make_chapters(book, chapters)
book.spine += book.toc
book.add_item(epub.EpubNav())
book.add_item(epub.EpubNcx())
epub_path = os.path.join(app.output_path, 'epub')
file_path = os.path.join(epub_path, book_title + '.epub')
logger.debug('Writing %s', file_path)
os.makedirs(epub_path, exist_ok=True)
epub.write_epub(file_path, book, {})
logger.warning('Created: %s.epub', book_title)
return file_path
def make_epubs(app, data):
epub_files = []
for vol in data:
if len(data[vol]) > 0:
book = bind_epub_book(app, volume=vol, chapters=data[vol])
epub_files.append(book)
return epub_files
| true
| true
|
f71628d715ee90235618f6ae675b83c05225b297
| 1,284
|
py
|
Python
|
Q2.py
|
jlo118/DLlab2
|
01978907f48cfeb5cc406564a64454dc6b4f8485
|
[
"MIT"
] | null | null | null |
Q2.py
|
jlo118/DLlab2
|
01978907f48cfeb5cc406564a64454dc6b4f8485
|
[
"MIT"
] | null | null | null |
Q2.py
|
jlo118/DLlab2
|
01978907f48cfeb5cc406564a64454dc6b4f8485
|
[
"MIT"
] | null | null | null |
import pandas
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.callbacks import TensorBoard
# load dataset
from sklearn.model_selection import train_test_split
import pandas as pd
dataset = pd.read_csv("framingham.csv", header=None).values
import numpy as np
X_train, X_test, Y_train, Y_test = train_test_split(dataset[:,0:15], dataset[:,15],
test_size=0.33, random_state=87)
np.random.seed(100)
nnokay = Sequential() # create model
nnokay.add(Dense(20, input_dim=15, activation='tanh')) # hidden layer
nnokay.add(Dense(30, activation='tanh')) #add whole layer
nnokay.add(Dense(60, activation='tanh'))
nnokay.add(Dense(20, activation='tanh'))
nnokay.add(Dense(15, activation='tanh'))
nnokay.add(Dense(60, activation='tanh'))
nnokay.add(Dense(1, activation='tanh')) # output layer
nnokay.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
nnokay.fit(X_train, Y_train, epochs=250, verbose=0,
callbacks=[TensorBoard(log_dir = '/tmp/auto')])
#print(nnokay.summary())
#print(nnokay.evaluate(X_test, Y_test, verbose=0))
score = nnokay.evaluate(X_test, Y_test)
print('test accuracy', score[1])
| 37.764706
| 85
| 0.690031
|
import pandas
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.callbacks import TensorBoard
from sklearn.model_selection import train_test_split
import pandas as pd
dataset = pd.read_csv("framingham.csv", header=None).values
import numpy as np
X_train, X_test, Y_train, Y_test = train_test_split(dataset[:,0:15], dataset[:,15],
test_size=0.33, random_state=87)
np.random.seed(100)
nnokay = Sequential()
nnokay.add(Dense(20, input_dim=15, activation='tanh'))
nnokay.add(Dense(30, activation='tanh'))
nnokay.add(Dense(60, activation='tanh'))
nnokay.add(Dense(20, activation='tanh'))
nnokay.add(Dense(15, activation='tanh'))
nnokay.add(Dense(60, activation='tanh'))
nnokay.add(Dense(1, activation='tanh'))
nnokay.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
nnokay.fit(X_train, Y_train, epochs=250, verbose=0,
callbacks=[TensorBoard(log_dir = '/tmp/auto')])
score = nnokay.evaluate(X_test, Y_test)
print('test accuracy', score[1])
| true
| true
|
f71628db5d9203bb58bae7d94aa015efbb6d6e01
| 13,115
|
py
|
Python
|
saleor/graphql/account/mutations/staff.py
|
lov3stor3/lov3stor3
|
1a0d94da1ce61d35ba5efbadbe737b039fedfe87
|
[
"CC-BY-4.0"
] | 1
|
2020-09-30T19:33:43.000Z
|
2020-09-30T19:33:43.000Z
|
saleor/graphql/account/mutations/staff.py
|
lov3stor3/lov3stor3
|
1a0d94da1ce61d35ba5efbadbe737b039fedfe87
|
[
"CC-BY-4.0"
] | 2
|
2021-03-09T17:15:05.000Z
|
2022-02-10T19:15:11.000Z
|
saleor/graphql/account/mutations/staff.py
|
lov3stor3/lov3stor3
|
1a0d94da1ce61d35ba5efbadbe737b039fedfe87
|
[
"CC-BY-4.0"
] | 1
|
2019-12-04T22:24:13.000Z
|
2019-12-04T22:24:13.000Z
|
from copy import copy
import graphene
from django.core.exceptions import ValidationError
from graphql_jwt.decorators import staff_member_required
from graphql_jwt.exceptions import PermissionDenied
from ....account import events as account_events, models, utils
from ....account.thumbnails import create_user_avatar_thumbnails
from ....account.utils import get_random_avatar
from ....checkout import AddressType
from ....core.permissions import get_permissions
from ....core.utils.url import validate_storefront_url
from ....dashboard.emails import send_set_password_email_with_url
from ....dashboard.staff.utils import remove_staff_member
from ...account.enums import AddressTypeEnum
from ...account.types import Address, AddressInput, User
from ...core.enums import PermissionEnum
from ...core.mutations import (
BaseMutation,
ClearMetaBaseMutation,
ModelDeleteMutation,
ModelMutation,
UpdateMetaBaseMutation,
)
from ...core.types import Upload
from ...core.utils import validate_image_file
from ..utils import CustomerDeleteMixin, StaffDeleteMixin, UserDeleteMixin
from .base import (
BaseAddressDelete,
BaseAddressUpdate,
BaseCustomerCreate,
CustomerInput,
UserInput,
)
class StaffInput(UserInput):
permissions = graphene.List(
PermissionEnum,
description="List of permission code names to assign to this user.",
)
class StaffCreateInput(StaffInput):
send_password_email = graphene.Boolean(
description="Send an email with a link to set the password"
)
redirect_url = graphene.String(
description=(
"URL of a view where users should be redirected to "
"set the password. URL in RFC 1808 format.",
)
)
class CustomerCreate(BaseCustomerCreate):
class Meta:
description = "Creates a new customer."
exclude = ["password"]
model = models.User
permissions = ("account.manage_users",)
class CustomerUpdate(CustomerCreate):
class Arguments:
id = graphene.ID(description="ID of a customer to update.", required=True)
input = CustomerInput(
description="Fields required to update a customer.", required=True
)
class Meta:
description = "Updates an existing customer."
exclude = ["password"]
model = models.User
permissions = ("account.manage_users",)
@classmethod
def generate_events(
cls, info, old_instance: models.User, new_instance: models.User
):
# Retrieve the event base data
staff_user = info.context.user
new_email = new_instance.email
new_fullname = new_instance.get_full_name()
# Compare the data
has_new_name = old_instance.get_full_name() != new_fullname
has_new_email = old_instance.email != new_email
# Generate the events accordingly
if has_new_email:
account_events.staff_user_assigned_email_to_a_customer_event(
staff_user=staff_user, new_email=new_email
)
if has_new_name:
account_events.staff_user_assigned_name_to_a_customer_event(
staff_user=staff_user, new_name=new_fullname
)
@classmethod
def perform_mutation(cls, _root, info, **data):
"""Generate events by comparing the old instance with the new data.
It overrides the `perform_mutation` base method of ModelMutation.
"""
# Retrieve the data
original_instance = cls.get_instance(info, **data)
data = data.get("input")
# Clean the input and generate a new instance from the new data
cleaned_input = cls.clean_input(info, original_instance, data)
new_instance = cls.construct_instance(copy(original_instance), cleaned_input)
# Save the new instance data
cls.clean_instance(new_instance)
cls.save(info, new_instance, cleaned_input)
cls._save_m2m(info, new_instance, cleaned_input)
# Generate events by comparing the instances
cls.generate_events(info, original_instance, new_instance)
# Return the response
return cls.success_response(new_instance)
class UserDelete(UserDeleteMixin, ModelDeleteMutation):
class Meta:
abstract = True
class CustomerDelete(CustomerDeleteMixin, UserDelete):
class Meta:
description = "Deletes a customer."
model = models.User
permissions = ("account.manage_users",)
class Arguments:
id = graphene.ID(required=True, description="ID of a customer to delete.")
@classmethod
def perform_mutation(cls, root, info, **data):
results = super().perform_mutation(root, info, **data)
cls.post_process(info)
return results
class StaffCreate(ModelMutation):
class Arguments:
input = StaffCreateInput(
description="Fields required to create a staff user.", required=True
)
class Meta:
description = "Creates a new staff user."
exclude = ["password"]
model = models.User
permissions = ("account.manage_staff",)
@classmethod
def clean_input(cls, info, instance, data):
cleaned_input = super().clean_input(info, instance, data)
if cleaned_input.get("send_password_email"):
if not cleaned_input.get("redirect_url"):
raise ValidationError(
{"redirect_url": "Redirect url is required to send a password."}
)
validate_storefront_url(cleaned_input.get("redirect_url"))
# set is_staff to True to create a staff user
cleaned_input["is_staff"] = True
# clean and prepare permissions
if "permissions" in cleaned_input:
permissions = cleaned_input.pop("permissions")
cleaned_input["user_permissions"] = get_permissions(permissions)
return cleaned_input
@classmethod
def save(cls, info, user, cleaned_input):
create_avatar = not user.avatar
if create_avatar:
user.avatar = get_random_avatar()
user.save()
if create_avatar:
create_user_avatar_thumbnails.delay(user_id=user.pk)
if cleaned_input.get("send_password_email"):
send_set_password_email_with_url(
redirect_url=cleaned_input.get("redirect_url"), user=user, staff=True
)
class StaffUpdate(StaffCreate):
class Arguments:
id = graphene.ID(description="ID of a staff user to update.", required=True)
input = StaffInput(
description="Fields required to update a staff user.", required=True
)
class Meta:
description = "Updates an existing staff user."
exclude = ["password"]
model = models.User
permissions = ("account.manage_staff",)
@classmethod
def clean_is_active(cls, is_active, instance, user):
if not is_active:
if user == instance:
raise ValidationError(
{"is_active": "Cannot deactivate your own account."}
)
elif instance.is_superuser:
raise ValidationError(
{"is_active": "Cannot deactivate superuser's account."}
)
@classmethod
def clean_input(cls, info, instance, data):
cleaned_input = super().clean_input(info, instance, data)
is_active = cleaned_input.get("is_active")
if is_active is not None:
cls.clean_is_active(is_active, instance, info.context.user)
return cleaned_input
class StaffDelete(StaffDeleteMixin, UserDelete):
class Meta:
description = "Deletes a staff user."
model = models.User
permissions = ("account.manage_staff",)
class Arguments:
id = graphene.ID(required=True, description="ID of a staff user to delete.")
@classmethod
def perform_mutation(cls, _root, info, **data):
if not cls.check_permissions(info.context.user):
raise PermissionDenied()
user_id = data.get("id")
instance = cls.get_node_or_error(info, user_id, only_type=User)
cls.clean_instance(info, instance)
db_id = instance.id
remove_staff_member(instance)
# After the instance is deleted, set its ID to the original database's
# ID so that the success response contains ID of the deleted object.
instance.id = db_id
return cls.success_response(instance)
class AddressCreate(ModelMutation):
user = graphene.Field(
User, description="A user instance for which the address was created."
)
class Arguments:
user_id = graphene.ID(
description="ID of a user to create address for", required=True
)
input = AddressInput(
description="Fields required to create address", required=True
)
class Meta:
description = "Creates user address"
model = models.Address
permissions = ("account.manage_users",)
@classmethod
def perform_mutation(cls, root, info, **data):
user_id = data["user_id"]
user = cls.get_node_or_error(info, user_id, field="user_id", only_type=User)
response = super().perform_mutation(root, info, **data)
if not response.errors:
user.addresses.add(response.address)
response.user = user
return response
class AddressUpdate(BaseAddressUpdate):
class Meta:
description = "Updates an address"
model = models.Address
permissions = ("account.manage_users",)
class AddressDelete(BaseAddressDelete):
class Meta:
description = "Deletes an address"
model = models.Address
permissions = ("account.manage_users",)
class AddressSetDefault(BaseMutation):
user = graphene.Field(User, description="An updated user instance.")
class Arguments:
address_id = graphene.ID(required=True, description="ID of the address.")
user_id = graphene.ID(
required=True, description="ID of the user to change the address for."
)
type = AddressTypeEnum(required=True, description="The type of address.")
class Meta:
description = "Sets a default address for the given user."
permissions = ("account.manage_users",)
@classmethod
def perform_mutation(cls, _root, info, address_id, user_id, **data):
address = cls.get_node_or_error(
info, address_id, field="address_id", only_type=Address
)
user = cls.get_node_or_error(info, user_id, field="user_id", only_type=User)
if not user.addresses.filter(pk=address.pk).exists():
raise ValidationError(
{"address_id": "The address doesn't belong to that user."}
)
if data.get("type") == AddressTypeEnum.BILLING.value:
address_type = AddressType.BILLING
else:
address_type = AddressType.SHIPPING
utils.change_user_default_address(user, address, address_type)
return cls(user=user)
class UserAvatarUpdate(BaseMutation):
user = graphene.Field(User, description="An updated user instance.")
class Arguments:
image = Upload(
required=True,
description="Represents an image file in a multipart request.",
)
class Meta:
description = """
Create a user avatar. Only for staff members. This mutation must
be sent as a `multipart` request. More detailed specs of the
upload format can be found here:
https://github.com/jaydenseric/graphql-multipart-request-spec
"""
@classmethod
@staff_member_required
def perform_mutation(cls, _root, info, image):
user = info.context.user
image_data = info.context.FILES.get(image)
validate_image_file(image_data, "image")
if user.avatar:
user.avatar.delete_sized_images()
user.avatar.delete()
user.avatar = image_data
user.save()
create_user_avatar_thumbnails.delay(user_id=user.pk)
return UserAvatarUpdate(user=user)
class UserAvatarDelete(BaseMutation):
user = graphene.Field(User, description="An updated user instance.")
class Meta:
description = "Deletes a user avatar. Only for staff members."
@classmethod
@staff_member_required
def perform_mutation(cls, _root, info):
user = info.context.user
user.avatar.delete_sized_images()
user.avatar.delete()
return UserAvatarDelete(user=user)
class UserUpdatePrivateMeta(UpdateMetaBaseMutation):
class Meta:
description = "Updates private metadata for user."
permissions = ("account.manage_users",)
model = models.User
public = False
class UserClearStoredPrivateMeta(ClearMetaBaseMutation):
class Meta:
description = "Clear stored metadata value."
model = models.User
permissions = ("account.manage_users",)
public = False
| 33.118687
| 85
| 0.658025
|
from copy import copy
import graphene
from django.core.exceptions import ValidationError
from graphql_jwt.decorators import staff_member_required
from graphql_jwt.exceptions import PermissionDenied
from ....account import events as account_events, models, utils
from ....account.thumbnails import create_user_avatar_thumbnails
from ....account.utils import get_random_avatar
from ....checkout import AddressType
from ....core.permissions import get_permissions
from ....core.utils.url import validate_storefront_url
from ....dashboard.emails import send_set_password_email_with_url
from ....dashboard.staff.utils import remove_staff_member
from ...account.enums import AddressTypeEnum
from ...account.types import Address, AddressInput, User
from ...core.enums import PermissionEnum
from ...core.mutations import (
BaseMutation,
ClearMetaBaseMutation,
ModelDeleteMutation,
ModelMutation,
UpdateMetaBaseMutation,
)
from ...core.types import Upload
from ...core.utils import validate_image_file
from ..utils import CustomerDeleteMixin, StaffDeleteMixin, UserDeleteMixin
from .base import (
BaseAddressDelete,
BaseAddressUpdate,
BaseCustomerCreate,
CustomerInput,
UserInput,
)
class StaffInput(UserInput):
permissions = graphene.List(
PermissionEnum,
description="List of permission code names to assign to this user.",
)
class StaffCreateInput(StaffInput):
send_password_email = graphene.Boolean(
description="Send an email with a link to set the password"
)
redirect_url = graphene.String(
description=(
"URL of a view where users should be redirected to "
"set the password. URL in RFC 1808 format.",
)
)
class CustomerCreate(BaseCustomerCreate):
class Meta:
description = "Creates a new customer."
exclude = ["password"]
model = models.User
permissions = ("account.manage_users",)
class CustomerUpdate(CustomerCreate):
class Arguments:
id = graphene.ID(description="ID of a customer to update.", required=True)
input = CustomerInput(
description="Fields required to update a customer.", required=True
)
class Meta:
description = "Updates an existing customer."
exclude = ["password"]
model = models.User
permissions = ("account.manage_users",)
@classmethod
def generate_events(
cls, info, old_instance: models.User, new_instance: models.User
):
staff_user = info.context.user
new_email = new_instance.email
new_fullname = new_instance.get_full_name()
has_new_name = old_instance.get_full_name() != new_fullname
has_new_email = old_instance.email != new_email
if has_new_email:
account_events.staff_user_assigned_email_to_a_customer_event(
staff_user=staff_user, new_email=new_email
)
if has_new_name:
account_events.staff_user_assigned_name_to_a_customer_event(
staff_user=staff_user, new_name=new_fullname
)
@classmethod
def perform_mutation(cls, _root, info, **data):
original_instance = cls.get_instance(info, **data)
data = data.get("input")
cleaned_input = cls.clean_input(info, original_instance, data)
new_instance = cls.construct_instance(copy(original_instance), cleaned_input)
cls.clean_instance(new_instance)
cls.save(info, new_instance, cleaned_input)
cls._save_m2m(info, new_instance, cleaned_input)
cls.generate_events(info, original_instance, new_instance)
return cls.success_response(new_instance)
class UserDelete(UserDeleteMixin, ModelDeleteMutation):
class Meta:
abstract = True
class CustomerDelete(CustomerDeleteMixin, UserDelete):
class Meta:
description = "Deletes a customer."
model = models.User
permissions = ("account.manage_users",)
class Arguments:
id = graphene.ID(required=True, description="ID of a customer to delete.")
@classmethod
def perform_mutation(cls, root, info, **data):
results = super().perform_mutation(root, info, **data)
cls.post_process(info)
return results
class StaffCreate(ModelMutation):
class Arguments:
input = StaffCreateInput(
description="Fields required to create a staff user.", required=True
)
class Meta:
description = "Creates a new staff user."
exclude = ["password"]
model = models.User
permissions = ("account.manage_staff",)
@classmethod
def clean_input(cls, info, instance, data):
cleaned_input = super().clean_input(info, instance, data)
if cleaned_input.get("send_password_email"):
if not cleaned_input.get("redirect_url"):
raise ValidationError(
{"redirect_url": "Redirect url is required to send a password."}
)
validate_storefront_url(cleaned_input.get("redirect_url"))
cleaned_input["is_staff"] = True
if "permissions" in cleaned_input:
permissions = cleaned_input.pop("permissions")
cleaned_input["user_permissions"] = get_permissions(permissions)
return cleaned_input
@classmethod
def save(cls, info, user, cleaned_input):
create_avatar = not user.avatar
if create_avatar:
user.avatar = get_random_avatar()
user.save()
if create_avatar:
create_user_avatar_thumbnails.delay(user_id=user.pk)
if cleaned_input.get("send_password_email"):
send_set_password_email_with_url(
redirect_url=cleaned_input.get("redirect_url"), user=user, staff=True
)
class StaffUpdate(StaffCreate):
class Arguments:
id = graphene.ID(description="ID of a staff user to update.", required=True)
input = StaffInput(
description="Fields required to update a staff user.", required=True
)
class Meta:
description = "Updates an existing staff user."
exclude = ["password"]
model = models.User
permissions = ("account.manage_staff",)
@classmethod
def clean_is_active(cls, is_active, instance, user):
if not is_active:
if user == instance:
raise ValidationError(
{"is_active": "Cannot deactivate your own account."}
)
elif instance.is_superuser:
raise ValidationError(
{"is_active": "Cannot deactivate superuser's account."}
)
@classmethod
def clean_input(cls, info, instance, data):
cleaned_input = super().clean_input(info, instance, data)
is_active = cleaned_input.get("is_active")
if is_active is not None:
cls.clean_is_active(is_active, instance, info.context.user)
return cleaned_input
class StaffDelete(StaffDeleteMixin, UserDelete):
class Meta:
description = "Deletes a staff user."
model = models.User
permissions = ("account.manage_staff",)
class Arguments:
id = graphene.ID(required=True, description="ID of a staff user to delete.")
@classmethod
def perform_mutation(cls, _root, info, **data):
if not cls.check_permissions(info.context.user):
raise PermissionDenied()
user_id = data.get("id")
instance = cls.get_node_or_error(info, user_id, only_type=User)
cls.clean_instance(info, instance)
db_id = instance.id
remove_staff_member(instance)
# After the instance is deleted, set its ID to the original database's
instance.id = db_id
return cls.success_response(instance)
class AddressCreate(ModelMutation):
user = graphene.Field(
User, description="A user instance for which the address was created."
)
class Arguments:
user_id = graphene.ID(
description="ID of a user to create address for", required=True
)
input = AddressInput(
description="Fields required to create address", required=True
)
class Meta:
description = "Creates user address"
model = models.Address
permissions = ("account.manage_users",)
@classmethod
def perform_mutation(cls, root, info, **data):
user_id = data["user_id"]
user = cls.get_node_or_error(info, user_id, field="user_id", only_type=User)
response = super().perform_mutation(root, info, **data)
if not response.errors:
user.addresses.add(response.address)
response.user = user
return response
class AddressUpdate(BaseAddressUpdate):
class Meta:
description = "Updates an address"
model = models.Address
permissions = ("account.manage_users",)
class AddressDelete(BaseAddressDelete):
class Meta:
description = "Deletes an address"
model = models.Address
permissions = ("account.manage_users",)
class AddressSetDefault(BaseMutation):
user = graphene.Field(User, description="An updated user instance.")
class Arguments:
address_id = graphene.ID(required=True, description="ID of the address.")
user_id = graphene.ID(
required=True, description="ID of the user to change the address for."
)
type = AddressTypeEnum(required=True, description="The type of address.")
class Meta:
description = "Sets a default address for the given user."
permissions = ("account.manage_users",)
@classmethod
def perform_mutation(cls, _root, info, address_id, user_id, **data):
address = cls.get_node_or_error(
info, address_id, field="address_id", only_type=Address
)
user = cls.get_node_or_error(info, user_id, field="user_id", only_type=User)
if not user.addresses.filter(pk=address.pk).exists():
raise ValidationError(
{"address_id": "The address doesn't belong to that user."}
)
if data.get("type") == AddressTypeEnum.BILLING.value:
address_type = AddressType.BILLING
else:
address_type = AddressType.SHIPPING
utils.change_user_default_address(user, address, address_type)
return cls(user=user)
class UserAvatarUpdate(BaseMutation):
user = graphene.Field(User, description="An updated user instance.")
class Arguments:
image = Upload(
required=True,
description="Represents an image file in a multipart request.",
)
class Meta:
description = """
Create a user avatar. Only for staff members. This mutation must
be sent as a `multipart` request. More detailed specs of the
upload format can be found here:
https://github.com/jaydenseric/graphql-multipart-request-spec
"""
@classmethod
@staff_member_required
def perform_mutation(cls, _root, info, image):
user = info.context.user
image_data = info.context.FILES.get(image)
validate_image_file(image_data, "image")
if user.avatar:
user.avatar.delete_sized_images()
user.avatar.delete()
user.avatar = image_data
user.save()
create_user_avatar_thumbnails.delay(user_id=user.pk)
return UserAvatarUpdate(user=user)
class UserAvatarDelete(BaseMutation):
user = graphene.Field(User, description="An updated user instance.")
class Meta:
description = "Deletes a user avatar. Only for staff members."
@classmethod
@staff_member_required
def perform_mutation(cls, _root, info):
user = info.context.user
user.avatar.delete_sized_images()
user.avatar.delete()
return UserAvatarDelete(user=user)
class UserUpdatePrivateMeta(UpdateMetaBaseMutation):
class Meta:
description = "Updates private metadata for user."
permissions = ("account.manage_users",)
model = models.User
public = False
class UserClearStoredPrivateMeta(ClearMetaBaseMutation):
class Meta:
description = "Clear stored metadata value."
model = models.User
permissions = ("account.manage_users",)
public = False
| true
| true
|
f7162a0290f40dedd34ce67aa72eb82899a6ccca
| 1,107
|
py
|
Python
|
Modules/tobii/eye_tracking_io/utils/events.py
|
ATUAV/ATUAV_Experiment
|
d0c1c3e1ff790bffa37d404ec1f4d70b537cd7fb
|
[
"BSD-2-Clause"
] | 7
|
2019-04-20T05:38:05.000Z
|
2022-01-17T14:48:43.000Z
|
Modules/tobii/eye_tracking_io/utils/events.py
|
ATUAV/ATUAV_Experiment
|
d0c1c3e1ff790bffa37d404ec1f4d70b537cd7fb
|
[
"BSD-2-Clause"
] | 1
|
2021-04-04T01:50:09.000Z
|
2021-04-04T01:50:09.000Z
|
Modules/tobii/eye_tracking_io/utils/events.py
|
ATUAV/ATUAV_Experiment
|
d0c1c3e1ff790bffa37d404ec1f4d70b537cd7fb
|
[
"BSD-2-Clause"
] | 2
|
2020-06-22T03:04:26.000Z
|
2021-07-10T20:14:55.000Z
|
class Events:
def __getattr__(self, name):
if hasattr(self.__class__, '__events__'):
assert name in self.__class__.__events__, \
"Event '%s' is not declared" % name
self.__dict__[name] = ev = _EventSlot(name)
return ev
def __repr__(self):
return 'Events' + str(list(self))
__str__ = __repr__
def __len__(self):
return NotImplemented
def __iter__(self):
def gen(dictitems=self.__dict__.items()):
for val in dictitems.itervalues():
if isinstance(val, _EventSlot):
yield val
return gen()
class _EventSlot:
def __init__(self, name):
self.targets = []
self.__name__ = name
def __repr__(self):
return 'event ' + self.__name__
def __call__(self, *a, **kw):
for f in self.targets: f(*a, **kw)
def __iadd__(self, f):
self.targets.append(f)
return self
def __isub__(self, f):
while f in self.targets:
self.targets.remove(f)
return self
| 24.065217
| 56
| 0.551942
|
class Events:
def __getattr__(self, name):
if hasattr(self.__class__, '__events__'):
assert name in self.__class__.__events__, \
"Event '%s' is not declared" % name
self.__dict__[name] = ev = _EventSlot(name)
return ev
def __repr__(self):
return 'Events' + str(list(self))
__str__ = __repr__
def __len__(self):
return NotImplemented
def __iter__(self):
def gen(dictitems=self.__dict__.items()):
for val in dictitems.itervalues():
if isinstance(val, _EventSlot):
yield val
return gen()
class _EventSlot:
def __init__(self, name):
self.targets = []
self.__name__ = name
def __repr__(self):
return 'event ' + self.__name__
def __call__(self, *a, **kw):
for f in self.targets: f(*a, **kw)
def __iadd__(self, f):
self.targets.append(f)
return self
def __isub__(self, f):
while f in self.targets:
self.targets.remove(f)
return self
| true
| true
|
f7162a3a84cc9137e9125489d2dc95fb668b61c3
| 6,972
|
py
|
Python
|
lib/spack/spack/mixins.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-05-24T15:23:12.000Z
|
2020-05-24T15:23:12.000Z
|
lib/spack/spack/mixins.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 6
|
2022-02-26T11:44:34.000Z
|
2022-03-12T12:14:50.000Z
|
lib/spack/spack/mixins.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2020-09-15T02:37:59.000Z
|
2020-09-21T04:34:38.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This module contains additional behavior that can be attached to any given
package.
"""
import collections
import os
import llnl.util.filesystem
__all__ = [
'filter_compiler_wrappers'
]
class PackageMixinsMeta(type):
"""This metaclass serves the purpose of implementing a declarative syntax
for package mixins.
Mixins are implemented below in the form of a function. Each one of them
needs to register a callable that takes a single argument to be run
before or after a certain phase. This callable is basically a method that
gets implicitly attached to the package class by calling the mixin.
"""
_methods_to_be_added = {}
_add_method_before = collections.defaultdict(list)
_add_method_after = collections.defaultdict(list)
@staticmethod
def register_method_before(fn, phase):
"""Registers a method to be run before a certain phase.
Args:
fn: function taking a single argument (self)
phase (str): phase before which fn must run
"""
PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn
PackageMixinsMeta._add_method_before[phase].append(fn)
@staticmethod
def register_method_after(fn, phase):
"""Registers a method to be run after a certain phase.
Args:
fn: function taking a single argument (self)
phase (str): phase after which fn must run
"""
PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn
PackageMixinsMeta._add_method_after[phase].append(fn)
def __init__(cls, name, bases, attr_dict):
# Add the methods to the class being created
if PackageMixinsMeta._methods_to_be_added:
attr_dict.update(PackageMixinsMeta._methods_to_be_added)
PackageMixinsMeta._methods_to_be_added.clear()
attr_fmt = '_InstallPhase_{0}'
# Copy the phases that needs it to the most derived classes
# in order not to interfere with other packages in the hierarchy
phases_to_be_copied = list(
PackageMixinsMeta._add_method_before.keys()
)
phases_to_be_copied += list(
PackageMixinsMeta._add_method_after.keys()
)
for phase in phases_to_be_copied:
attr_name = attr_fmt.format(phase)
# Here we want to get the attribute directly from the class (not
# from the instance), so that we can modify it and add the mixin
# method to the pipeline.
phase = getattr(cls, attr_name)
# Due to MRO, we may have taken a method from a parent class
# and modifying it may influence other packages in unwanted
# manners. Solve the problem by copying the phase into the most
# derived class.
setattr(cls, attr_name, phase.copy())
# Insert the methods in the appropriate position
# in the installation pipeline.
for phase in PackageMixinsMeta._add_method_before:
attr_name = attr_fmt.format(phase)
phase_obj = getattr(cls, attr_name)
fn_list = PackageMixinsMeta._add_method_after[phase]
for f in fn_list:
phase_obj.run_before.append(f)
# Flush the dictionary for the next class
PackageMixinsMeta._add_method_before.clear()
for phase in PackageMixinsMeta._add_method_after:
attr_name = attr_fmt.format(phase)
phase_obj = getattr(cls, attr_name)
fn_list = PackageMixinsMeta._add_method_after[phase]
for f in fn_list:
phase_obj.run_after.append(f)
# Flush the dictionary for the next class
PackageMixinsMeta._add_method_after.clear()
super(PackageMixinsMeta, cls).__init__(name, bases, attr_dict)
def filter_compiler_wrappers(*files, **kwargs):
"""Substitutes any path referring to a Spack compiler wrapper with the
path of the underlying compiler that has been used.
If this isn't done, the files will have CC, CXX, F77, and FC set to
Spack's generic cc, c++, f77, and f90. We want them to be bound to
whatever compiler they were built with.
Args:
*files: files to be filtered relative to the search root (which is,
by default, the installation prefix)
**kwargs: allowed keyword arguments
after
specifies after which phase the files should be
filtered (defaults to 'install')
relative_root
path relative to prefix where to start searching for
the files to be filtered. If not set the install prefix
wil be used as the search root. **It is highly recommended
to set this, as searching from the installation prefix may
affect performance severely in some cases**.
ignore_absent, backup
these two keyword arguments, if present, will be forwarded
to ``filter_file`` (see its documentation for more information
on their behavior)
recursive
this keyword argument, if present, will be forwarded to
``find`` (see its documentation for more information on the
behavior)
"""
after = kwargs.get('after', 'install')
relative_root = kwargs.get('relative_root', None)
filter_kwargs = {
'ignore_absent': kwargs.get('ignore_absent', True),
'backup': kwargs.get('backup', False),
'string': True
}
find_kwargs = {
'recursive': kwargs.get('recursive', False)
}
def _filter_compiler_wrappers_impl(self):
# Compute the absolute path of the search root
root = os.path.join(
self.prefix, relative_root
) if relative_root else self.prefix
# Compute the absolute path of the files to be filtered and
# remove links from the list.
abs_files = llnl.util.filesystem.find(root, files, **find_kwargs)
abs_files = [x for x in abs_files if not os.path.islink(x)]
x = llnl.util.filesystem.FileFilter(*abs_files)
replacements = [
('CC', self.compiler.cc),
('CXX', self.compiler.cxx),
('F77', self.compiler.f77),
('FC', self.compiler.fc)
]
for env_var, compiler_path in replacements:
if env_var in os.environ:
x.filter(os.environ[env_var], compiler_path, **filter_kwargs)
# Remove this linking flag if present (it turns RPATH into RUNPATH)
x.filter('-Wl,--enable-new-dtags', '', **filter_kwargs)
PackageMixinsMeta.register_method_after(
_filter_compiler_wrappers_impl, after
)
| 35.753846
| 78
| 0.647017
|
import collections
import os
import llnl.util.filesystem
__all__ = [
'filter_compiler_wrappers'
]
class PackageMixinsMeta(type):
_methods_to_be_added = {}
_add_method_before = collections.defaultdict(list)
_add_method_after = collections.defaultdict(list)
@staticmethod
def register_method_before(fn, phase):
PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn
PackageMixinsMeta._add_method_before[phase].append(fn)
@staticmethod
def register_method_after(fn, phase):
PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn
PackageMixinsMeta._add_method_after[phase].append(fn)
def __init__(cls, name, bases, attr_dict):
if PackageMixinsMeta._methods_to_be_added:
attr_dict.update(PackageMixinsMeta._methods_to_be_added)
PackageMixinsMeta._methods_to_be_added.clear()
attr_fmt = '_InstallPhase_{0}'
phases_to_be_copied = list(
PackageMixinsMeta._add_method_before.keys()
)
phases_to_be_copied += list(
PackageMixinsMeta._add_method_after.keys()
)
for phase in phases_to_be_copied:
attr_name = attr_fmt.format(phase)
phase = getattr(cls, attr_name)
setattr(cls, attr_name, phase.copy())
for phase in PackageMixinsMeta._add_method_before:
attr_name = attr_fmt.format(phase)
phase_obj = getattr(cls, attr_name)
fn_list = PackageMixinsMeta._add_method_after[phase]
for f in fn_list:
phase_obj.run_before.append(f)
PackageMixinsMeta._add_method_before.clear()
for phase in PackageMixinsMeta._add_method_after:
attr_name = attr_fmt.format(phase)
phase_obj = getattr(cls, attr_name)
fn_list = PackageMixinsMeta._add_method_after[phase]
for f in fn_list:
phase_obj.run_after.append(f)
PackageMixinsMeta._add_method_after.clear()
super(PackageMixinsMeta, cls).__init__(name, bases, attr_dict)
def filter_compiler_wrappers(*files, **kwargs):
after = kwargs.get('after', 'install')
relative_root = kwargs.get('relative_root', None)
filter_kwargs = {
'ignore_absent': kwargs.get('ignore_absent', True),
'backup': kwargs.get('backup', False),
'string': True
}
find_kwargs = {
'recursive': kwargs.get('recursive', False)
}
def _filter_compiler_wrappers_impl(self):
root = os.path.join(
self.prefix, relative_root
) if relative_root else self.prefix
abs_files = llnl.util.filesystem.find(root, files, **find_kwargs)
abs_files = [x for x in abs_files if not os.path.islink(x)]
x = llnl.util.filesystem.FileFilter(*abs_files)
replacements = [
('CC', self.compiler.cc),
('CXX', self.compiler.cxx),
('F77', self.compiler.f77),
('FC', self.compiler.fc)
]
for env_var, compiler_path in replacements:
if env_var in os.environ:
x.filter(os.environ[env_var], compiler_path, **filter_kwargs)
x.filter('-Wl,--enable-new-dtags', '', **filter_kwargs)
PackageMixinsMeta.register_method_after(
_filter_compiler_wrappers_impl, after
)
| true
| true
|
f7162a97b20d3185af1c91ee93a27ee014a8082c
| 9,122
|
py
|
Python
|
tests/utils/test_requirements_utils.py
|
ericgosno91/mlflow
|
8d1a9e354b22919423e5295afd650e39191f701a
|
[
"Apache-2.0"
] | 2
|
2020-06-23T03:58:12.000Z
|
2020-11-26T13:59:10.000Z
|
tests/utils/test_requirements_utils.py
|
ericgosno91/mlflow
|
8d1a9e354b22919423e5295afd650e39191f701a
|
[
"Apache-2.0"
] | null | null | null |
tests/utils/test_requirements_utils.py
|
ericgosno91/mlflow
|
8d1a9e354b22919423e5295afd650e39191f701a
|
[
"Apache-2.0"
] | 1
|
2021-08-17T17:53:12.000Z
|
2021-08-17T17:53:12.000Z
|
import os
import sys
import importlib
from unittest import mock
import importlib_metadata
import pytest
import mlflow
from mlflow.utils.requirements_utils import (
_is_comment,
_is_empty,
_is_requirements_file,
_strip_inline_comment,
_join_continued_lines,
_parse_requirements,
_prune_packages,
_strip_local_version_label,
_get_installed_version,
_get_pinned_requirement,
_module_to_packages,
_infer_requirements,
)
def test_is_comment():
assert _is_comment("# comment")
assert _is_comment("#")
assert _is_comment("### comment ###")
assert not _is_comment("comment")
assert not _is_comment("")
def test_is_empty():
assert _is_empty("")
assert not _is_empty(" ")
assert not _is_empty("a")
def test_is_requirements_file():
assert _is_requirements_file("-r req.txt")
assert _is_requirements_file("-r req.txt")
assert _is_requirements_file("--requirement req.txt")
assert _is_requirements_file("--requirement req.txt")
assert not _is_requirements_file("req")
def test_strip_inline_comment():
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # com1 # com2") == "aaa"
# Ensure a URI fragment is not stripped
assert (
_strip_inline_comment("git+https://git/repo.git#subdirectory=subdir")
== "git+https://git/repo.git#subdirectory=subdir"
)
def test_join_continued_lines():
assert list(_join_continued_lines(["a"])) == ["a"]
assert list(_join_continued_lines(["a\\", "b"])) == ["ab"]
assert list(_join_continued_lines(["a\\", "b\\", "c"])) == ["abc"]
assert list(_join_continued_lines(["a\\", " b"])) == ["a b"]
assert list(_join_continued_lines(["a\\", " b\\", " c"])) == ["a b c"]
assert list(_join_continued_lines(["a\\", "\\", "b"])) == ["ab"]
assert list(_join_continued_lines(["a\\", "b", "c\\", "d"])) == ["ab", "cd"]
assert list(_join_continued_lines(["a\\", "", "b"])) == ["a", "b"]
assert list(_join_continued_lines(["a\\"])) == ["a"]
assert list(_join_continued_lines(["\\", "a"])) == ["a"]
def test_parse_requirements(request, tmpdir):
"""
Ensures `_parse_requirements` returns the same result as `pip._internal.req.parse_requirements`
"""
from pip._internal.req import parse_requirements as pip_parse_requirements
from pip._internal.network.session import PipSession
root_req_src = """
# No version specifier
noverspec
no-ver-spec
# Version specifiers
verspec<1.0
ver-spec == 2.0
# Environment marker
env-marker; python_version < "3.8"
inline-comm # Inline comment
inlinecomm # Inline comment
# Git URIs
git+https://github.com/git/uri
git+https://github.com/sub/dir#subdirectory=subdir
# Requirements files
-r {relative_req}
--requirement {absolute_req}
# Constraints files
-c {relative_con}
--constraint {absolute_con}
# Line continuation
line-cont\
==\
1.0
# Line continuation with spaces
line-cont-space \
== \
1.0
# Line continuation with a blank line
line-cont-blank\
# Line continuation at EOF
line-cont-eof\
""".strip()
try:
os.chdir(tmpdir)
root_req = tmpdir.join("requirements.txt")
# Requirements files
rel_req = tmpdir.join("relative_req.txt")
abs_req = tmpdir.join("absolute_req.txt")
# Constraints files
rel_con = tmpdir.join("relative_con.txt")
abs_con = tmpdir.join("absolute_con.txt")
# pip's requirements parser collapses an absolute requirements file path:
# https://github.com/pypa/pip/issues/10121
# As a workaround, use a relative path on Windows.
absolute_req = abs_req.basename if os.name == "nt" else abs_req.strpath
absolute_con = abs_con.basename if os.name == "nt" else abs_con.strpath
root_req.write(
root_req_src.format(
relative_req=rel_req.basename,
absolute_req=absolute_req,
relative_con=rel_con.basename,
absolute_con=absolute_con,
)
)
rel_req.write("rel-req-xxx\nrel-req-yyy")
abs_req.write("abs-req-zzz")
rel_con.write("rel-con-xxx\nrel-con-yyy")
abs_con.write("abs-con-zzz")
expected_cons = [
"rel-con-xxx",
"rel-con-yyy",
"abs-con-zzz",
]
expected_reqs = [
"noverspec",
"no-ver-spec",
"verspec<1.0",
"ver-spec == 2.0",
'env-marker; python_version < "3.8"',
"inline-comm",
"inlinecomm",
"git+https://github.com/git/uri",
"git+https://github.com/sub/dir#subdirectory=subdir",
"rel-req-xxx",
"rel-req-yyy",
"abs-req-zzz",
"line-cont==1.0",
"line-cont-space == 1.0",
"line-cont-blank",
"line-cont-eof",
]
parsed_reqs = list(_parse_requirements(root_req.basename, is_constraint=False))
pip_reqs = list(pip_parse_requirements(root_req.basename, session=PipSession()))
# Requirements
assert [r.req_str for r in parsed_reqs if not r.is_constraint] == expected_reqs
assert [r.requirement for r in pip_reqs if not r.constraint] == expected_reqs
# Constraints
assert [r.req_str for r in parsed_reqs if r.is_constraint] == expected_cons
assert [r.requirement for r in pip_reqs if r.constraint] == expected_cons
finally:
os.chdir(request.config.invocation_dir)
def test_prune_packages():
assert _prune_packages(["mlflow"]) == {"mlflow"}
assert _prune_packages(["mlflow", "packaging"]) == {"mlflow"}
assert _prune_packages(["mlflow", "scikit-learn"]) == {"mlflow", "scikit-learn"}
def test_capture_imported_modules():
from mlflow.utils._capture_modules import _CaptureImportedModules
with _CaptureImportedModules() as cap:
# pylint: disable=unused-import,unused-variable
import math
__import__("pandas")
importlib.import_module("numpy")
assert "math" in cap.imported_modules
assert "pandas" in cap.imported_modules
assert "numpy" in cap.imported_modules
def test_strip_local_version_label():
assert _strip_local_version_label("1.2.3") == "1.2.3"
assert _strip_local_version_label("1.2.3+ab") == "1.2.3"
assert _strip_local_version_label("1.2.3rc0+ab") == "1.2.3rc0"
assert _strip_local_version_label("1.2.3.dev0+ab") == "1.2.3.dev0"
assert _strip_local_version_label("1.2.3.post0+ab") == "1.2.3.post0"
assert _strip_local_version_label("invalid") == "invalid"
def test_get_installed_version(tmpdir):
import numpy as np
import pandas as pd
import sklearn
assert _get_installed_version("mlflow") == mlflow.__version__
assert _get_installed_version("numpy") == np.__version__
assert _get_installed_version("pandas") == pd.__version__
assert _get_installed_version("scikit-learn", module="sklearn") == sklearn.__version__
not_found_package = tmpdir.join("not_found.py")
not_found_package.write("__version__ = '1.2.3'")
sys.path.insert(0, tmpdir.strpath)
with pytest.raises(importlib_metadata.PackageNotFoundError):
importlib_metadata.version("not_found")
assert _get_installed_version("not_found") == "1.2.3"
def test_get_pinned_requirement(tmpdir):
assert _get_pinned_requirement("mlflow") == f"mlflow=={mlflow.__version__}"
assert _get_pinned_requirement("mlflow", version="1.2.3") == "mlflow==1.2.3"
not_found_package = tmpdir.join("not_found.py")
not_found_package.write("__version__ = '1.2.3'")
sys.path.insert(0, tmpdir.strpath)
with pytest.raises(importlib_metadata.PackageNotFoundError):
importlib_metadata.version("not_found")
assert _get_pinned_requirement("not_found") == "not_found==1.2.3"
def test_get_pinned_requirement_local_version_label(tmpdir):
package = tmpdir.join("my_package.py")
lvl = "abc.def.ghi" # Local version label
package.write(f"__version__ = '1.2.3+{lvl}'")
sys.path.insert(0, tmpdir.strpath)
with mock.patch("mlflow.utils.requirements_utils._logger.warning") as mock_warning:
req = _get_pinned_requirement("my_package")
mock_warning.assert_called_once()
(first_pos_arg,) = mock_warning.call_args[0]
assert first_pos_arg.startswith(
f"Found my_package version (1.2.3+{lvl}) contains a local version label (+{lvl})."
)
assert req == "my_package==1.2.3"
def test_infer_requirements_excludes_mlflow():
with mock.patch(
"mlflow.utils.requirements_utils._capture_imported_modules",
return_value=["mlflow", "pytest"],
):
mlflow_package = "mlflow-skinny" if "MLFLOW_SKINNY" in os.environ else "mlflow"
assert mlflow_package in _module_to_packages("mlflow")
assert _infer_requirements("path/to/model", "sklearn") == [f"pytest=={pytest.__version__}"]
| 33.413919
| 99
| 0.662684
|
import os
import sys
import importlib
from unittest import mock
import importlib_metadata
import pytest
import mlflow
from mlflow.utils.requirements_utils import (
_is_comment,
_is_empty,
_is_requirements_file,
_strip_inline_comment,
_join_continued_lines,
_parse_requirements,
_prune_packages,
_strip_local_version_label,
_get_installed_version,
_get_pinned_requirement,
_module_to_packages,
_infer_requirements,
)
def test_is_comment():
assert _is_comment("# comment")
assert _is_comment("#")
assert _is_comment("### comment ###")
assert not _is_comment("comment")
assert not _is_comment("")
def test_is_empty():
assert _is_empty("")
assert not _is_empty(" ")
assert not _is_empty("a")
def test_is_requirements_file():
assert _is_requirements_file("-r req.txt")
assert _is_requirements_file("-r req.txt")
assert _is_requirements_file("--requirement req.txt")
assert _is_requirements_file("--requirement req.txt")
assert not _is_requirements_file("req")
def test_strip_inline_comment():
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # comment") == "aaa"
assert _strip_inline_comment("aaa # com1 # com2") == "aaa"
assert (
_strip_inline_comment("git+https://git/repo.git#subdirectory=subdir")
== "git+https://git/repo.git#subdirectory=subdir"
)
def test_join_continued_lines():
assert list(_join_continued_lines(["a"])) == ["a"]
assert list(_join_continued_lines(["a\\", "b"])) == ["ab"]
assert list(_join_continued_lines(["a\\", "b\\", "c"])) == ["abc"]
assert list(_join_continued_lines(["a\\", " b"])) == ["a b"]
assert list(_join_continued_lines(["a\\", " b\\", " c"])) == ["a b c"]
assert list(_join_continued_lines(["a\\", "\\", "b"])) == ["ab"]
assert list(_join_continued_lines(["a\\", "b", "c\\", "d"])) == ["ab", "cd"]
assert list(_join_continued_lines(["a\\", "", "b"])) == ["a", "b"]
assert list(_join_continued_lines(["a\\"])) == ["a"]
assert list(_join_continued_lines(["\\", "a"])) == ["a"]
def test_parse_requirements(request, tmpdir):
from pip._internal.req import parse_requirements as pip_parse_requirements
from pip._internal.network.session import PipSession
root_req_src = """
# No version specifier
noverspec
no-ver-spec
# Version specifiers
verspec<1.0
ver-spec == 2.0
# Environment marker
env-marker; python_version < "3.8"
inline-comm # Inline comment
inlinecomm # Inline comment
# Git URIs
git+https://github.com/git/uri
git+https://github.com/sub/dir#subdirectory=subdir
# Requirements files
-r {relative_req}
--requirement {absolute_req}
# Constraints files
-c {relative_con}
--constraint {absolute_con}
# Line continuation
line-cont\
==\
1.0
# Line continuation with spaces
line-cont-space \
== \
1.0
# Line continuation with a blank line
line-cont-blank\
# Line continuation at EOF
line-cont-eof\
""".strip()
try:
os.chdir(tmpdir)
root_req = tmpdir.join("requirements.txt")
rel_req = tmpdir.join("relative_req.txt")
abs_req = tmpdir.join("absolute_req.txt")
rel_con = tmpdir.join("relative_con.txt")
abs_con = tmpdir.join("absolute_con.txt")
# https://github.com/pypa/pip/issues/10121
# As a workaround, use a relative path on Windows.
absolute_req = abs_req.basename if os.name == "nt" else abs_req.strpath
absolute_con = abs_con.basename if os.name == "nt" else abs_con.strpath
root_req.write(
root_req_src.format(
relative_req=rel_req.basename,
absolute_req=absolute_req,
relative_con=rel_con.basename,
absolute_con=absolute_con,
)
)
rel_req.write("rel-req-xxx\nrel-req-yyy")
abs_req.write("abs-req-zzz")
rel_con.write("rel-con-xxx\nrel-con-yyy")
abs_con.write("abs-con-zzz")
expected_cons = [
"rel-con-xxx",
"rel-con-yyy",
"abs-con-zzz",
]
expected_reqs = [
"noverspec",
"no-ver-spec",
"verspec<1.0",
"ver-spec == 2.0",
'env-marker; python_version < "3.8"',
"inline-comm",
"inlinecomm",
"git+https://github.com/git/uri",
"git+https://github.com/sub/dir#subdirectory=subdir",
"rel-req-xxx",
"rel-req-yyy",
"abs-req-zzz",
"line-cont==1.0",
"line-cont-space == 1.0",
"line-cont-blank",
"line-cont-eof",
]
parsed_reqs = list(_parse_requirements(root_req.basename, is_constraint=False))
pip_reqs = list(pip_parse_requirements(root_req.basename, session=PipSession()))
# Requirements
assert [r.req_str for r in parsed_reqs if not r.is_constraint] == expected_reqs
assert [r.requirement for r in pip_reqs if not r.constraint] == expected_reqs
# Constraints
assert [r.req_str for r in parsed_reqs if r.is_constraint] == expected_cons
assert [r.requirement for r in pip_reqs if r.constraint] == expected_cons
finally:
os.chdir(request.config.invocation_dir)
def test_prune_packages():
assert _prune_packages(["mlflow"]) == {"mlflow"}
assert _prune_packages(["mlflow", "packaging"]) == {"mlflow"}
assert _prune_packages(["mlflow", "scikit-learn"]) == {"mlflow", "scikit-learn"}
def test_capture_imported_modules():
from mlflow.utils._capture_modules import _CaptureImportedModules
with _CaptureImportedModules() as cap:
# pylint: disable=unused-import,unused-variable
import math
__import__("pandas")
importlib.import_module("numpy")
assert "math" in cap.imported_modules
assert "pandas" in cap.imported_modules
assert "numpy" in cap.imported_modules
def test_strip_local_version_label():
assert _strip_local_version_label("1.2.3") == "1.2.3"
assert _strip_local_version_label("1.2.3+ab") == "1.2.3"
assert _strip_local_version_label("1.2.3rc0+ab") == "1.2.3rc0"
assert _strip_local_version_label("1.2.3.dev0+ab") == "1.2.3.dev0"
assert _strip_local_version_label("1.2.3.post0+ab") == "1.2.3.post0"
assert _strip_local_version_label("invalid") == "invalid"
def test_get_installed_version(tmpdir):
import numpy as np
import pandas as pd
import sklearn
assert _get_installed_version("mlflow") == mlflow.__version__
assert _get_installed_version("numpy") == np.__version__
assert _get_installed_version("pandas") == pd.__version__
assert _get_installed_version("scikit-learn", module="sklearn") == sklearn.__version__
not_found_package = tmpdir.join("not_found.py")
not_found_package.write("__version__ = '1.2.3'")
sys.path.insert(0, tmpdir.strpath)
with pytest.raises(importlib_metadata.PackageNotFoundError):
importlib_metadata.version("not_found")
assert _get_installed_version("not_found") == "1.2.3"
def test_get_pinned_requirement(tmpdir):
assert _get_pinned_requirement("mlflow") == f"mlflow=={mlflow.__version__}"
assert _get_pinned_requirement("mlflow", version="1.2.3") == "mlflow==1.2.3"
not_found_package = tmpdir.join("not_found.py")
not_found_package.write("__version__ = '1.2.3'")
sys.path.insert(0, tmpdir.strpath)
with pytest.raises(importlib_metadata.PackageNotFoundError):
importlib_metadata.version("not_found")
assert _get_pinned_requirement("not_found") == "not_found==1.2.3"
def test_get_pinned_requirement_local_version_label(tmpdir):
package = tmpdir.join("my_package.py")
lvl = "abc.def.ghi" # Local version label
package.write(f"__version__ = '1.2.3+{lvl}'")
sys.path.insert(0, tmpdir.strpath)
with mock.patch("mlflow.utils.requirements_utils._logger.warning") as mock_warning:
req = _get_pinned_requirement("my_package")
mock_warning.assert_called_once()
(first_pos_arg,) = mock_warning.call_args[0]
assert first_pos_arg.startswith(
f"Found my_package version (1.2.3+{lvl}) contains a local version label (+{lvl})."
)
assert req == "my_package==1.2.3"
def test_infer_requirements_excludes_mlflow():
with mock.patch(
"mlflow.utils.requirements_utils._capture_imported_modules",
return_value=["mlflow", "pytest"],
):
mlflow_package = "mlflow-skinny" if "MLFLOW_SKINNY" in os.environ else "mlflow"
assert mlflow_package in _module_to_packages("mlflow")
assert _infer_requirements("path/to/model", "sklearn") == [f"pytest=={pytest.__version__}"]
| true
| true
|
f7162cd39631cdb90524c08f6d65d11f9c020727
| 9,440
|
py
|
Python
|
reid/modeling/baseline.py
|
raoyongming/CAL
|
76475ff56e399b276630d8bf3a4f5594803609a6
|
[
"MIT"
] | 58
|
2021-08-19T16:18:41.000Z
|
2022-03-30T13:00:15.000Z
|
reid/modeling/baseline.py
|
raoyongming/CAL
|
76475ff56e399b276630d8bf3a4f5594803609a6
|
[
"MIT"
] | 9
|
2021-09-07T03:46:13.000Z
|
2022-03-24T07:22:41.000Z
|
reid/modeling/baseline.py
|
raoyongming/CAL
|
76475ff56e399b276630d8bf3a4f5594803609a6
|
[
"MIT"
] | 13
|
2021-08-20T05:08:09.000Z
|
2022-03-07T13:12:29.000Z
|
import torch
from torch import nn
import torch.nn.functional as F
import sys
from .backbones.resnet import ResNet
sys.path.append('.')
EPSILON = 1e-12
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return y
class BAP(nn.Module):
def __init__(self, pool='GAP'):
super(BAP, self).__init__()
assert pool in ['GAP', 'GMP']
if pool == 'GAP':
self.pool = None
else:
self.pool = nn.AdaptiveMaxPool2d(1)
def forward(self, features, attentions, counterfactual=False):
B, C, H, W = features.size()
_, M, AH, AW = attentions.size()
# match size
if AH != H or AW != W:
attentions = F.upsample_bilinear(attentions, size=(H, W))
# feature_matrix: (B, M, C) -> (B, M * C)
if self.pool is None:
feature_matrix = (torch.einsum('imjk,injk->imn', (attentions, features)) / float(H * W)).view(B, -1)
else:
feature_matrix = []
for i in range(M):
AiF = self.pool(features * attentions[:, i:i + 1, ...]).view(B, -1)
feature_matrix.append(AiF)
feature_matrix = torch.cat(feature_matrix, dim=1)
# sign-sqrt
feature_matrix_raw = torch.sign(feature_matrix) * torch.sqrt(torch.abs(feature_matrix) + EPSILON)
# l2 normalization along dimension M and C
feature_matrix = F.normalize(feature_matrix_raw, dim=-1)
if counterfactual:
if self.training:
fake_att = torch.zeros_like(attentions).uniform_(0, 2)
else:
fake_att = torch.ones_like(attentions)
# mean_feature = features.mean(3).mean(2).view(B, 1, C)
# counterfactual_feature = mean_feature.expand(B, M, C).contiguous().view(B, -1)
counterfactual_feature = (torch.einsum('imjk,injk->imn', (fake_att, features)) / float(H * W)).view(B, -1)
counterfactual_feature = torch.sign(counterfactual_feature) * torch.sqrt(torch.abs(counterfactual_feature) + EPSILON)
counterfactual_feature = F.normalize(counterfactual_feature, dim=-1)
return feature_matrix, counterfactual_feature
else:
return feature_matrix
class MultiHeadAtt(nn.Module):
"""
Extend the channel attention into MultiHeadAtt.
It is modified from "Zhang H, Wu C, Zhang Z, et al. Resnest: Split-attention networks."
"""
def __init__(self, in_channels, channels,
radix=4, reduction_factor=4,
rectify=False, norm_layer=nn.BatchNorm2d):
super(MultiHeadAtt, self).__init__()
inter_channels = max(in_channels*radix//reduction_factor, 32)
self.radix = radix
self.channels = channels
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Conv2d(channels, inter_channels, 1, groups=1)
self.bn1 = norm_layer(inter_channels)
self.fc2 = nn.Conv2d(inter_channels, channels*radix, 1, groups=1)
def forward(self, x):
batch, channel = x.shape[:2]
splited = torch.split(x, channel//self.radix, dim=1)
gap = sum(splited)
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap).view((batch, self.radix, self.channels))
atten = F.softmax(atten, dim=1).view(batch, -1, 1, 1)
atten = torch.split(atten, channel//self.radix, dim=1)
out= torch.cat([att*split for (att, split) in zip(atten, splited)],1)
return out.contiguous()
class BN2d(nn.Module):
def __init__(self, planes):
super(BN2d, self).__init__()
self.bottleneck2 = nn.BatchNorm2d(planes)
self.bottleneck2.bias.requires_grad_(False) # no shift
self.bottleneck2.apply(weights_init_kaiming)
def forward(self, x):
return self.bottleneck2(x)
class Baseline(nn.Module):
in_planes = 2048
def __init__(self, num_classes, last_stride, model_path, using_cal):
super(Baseline, self).__init__()
self.using_cal = using_cal
self.base = ResNet(last_stride)
self.base.load_param(model_path)
self.radix = 2
self.base_1 = nn.Sequential(*list(self.base.children())[0:3])
self.BN1 = BN2d(64)
self.att1 = SELayer(64,8)
self.att_s1=MultiHeadAtt(64,int(64/self.radix),radix=self.radix)
self.base_2 = nn.Sequential(*list(self.base.children())[3:4])
self.BN2 = BN2d(256)
self.att2 = SELayer(256,32)
self.att_s2=MultiHeadAtt(256,int(256/self.radix),radix=self.radix)
self.base_3 = nn.Sequential(*list(self.base.children())[4:5])
self.BN3 = BN2d(512)
self.att3 = SELayer(512,64)
self.att_s3 = MultiHeadAtt(512,int(512/self.radix),radix=self.radix)
self.base_4 = nn.Sequential(*list(self.base.children())[5:6])
self.BN4 = BN2d(1024)
self.att4 = SELayer(1024,128)
self.att_s4=MultiHeadAtt(1024,int(1024/self.radix),radix=self.radix)
self.base_5 = nn.Sequential(*list(self.base.children())[6:])
self.BN5 = BN2d(2048)
self.att5 = SELayer(2048,256)
self.att_s5=MultiHeadAtt(2048,int(2048/self.radix),radix=self.radix)
self.M = 8
self.attentions = BasicConv2d(2048, self.M, kernel_size=1)
self.bap = BAP(pool='GAP')
self.gap = nn.AdaptiveAvgPool2d(1)
self.num_classes = num_classes
self.bottleneck = nn.BatchNorm1d(self.in_planes)
self.bottleneck.bias.requires_grad_(False) # no shift
self.bottleneck.apply(weights_init_kaiming)
self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)
self.classifier_bap = nn.Linear(self.in_planes*self.M, self.in_planes, bias=False)
self.classifier.apply(weights_init_classifier)
self.classifier_bap.apply(weights_init_classifier)
def forward(self, x):
############
x_1 = self.base_1(x)
x_1 = self.att_s1(x_1)
x_1 = self.BN1(x_1)
y_1 = self.att1(x_1)
x_att1=x_1*y_1.expand_as(x_1)
x_2 = self.base_2(x_att1)
x_2 = self.att_s2(x_2)
x_2 = self.BN2(x_2)
y_2 = self.att2(x_2)
x_att2=x_2*y_2.expand_as(x_2)
x_3 = self.base_3(x_att2)
x_3 = self.att_s3(x_3)
x_3 = self.BN3(x_3)
y_3 = self.att3(x_3)
x_att3=x_3*y_3.expand_as(x_3)
x_4 = self.base_4(x_att3)
x_4 = self.att_s4(x_4)
x_4 = self.BN4(x_4)
y_4 = self.att4(x_4)
x_att4=x_4*y_4.expand_as(x_4)
x_5 = self.base_5(x_att4)
x_5 = self.att_s5(x_5)
x_5 = self.BN5(x_5)
y_5 = self.att5(x_5)
x=x_5*y_5.expand_as(x_5)
############
# x = self.base(x) replace above with this to use base network
attention_maps = self.attentions(x)
global_feat,global_feat_hat = self.bap(x, attention_maps,counterfactual=True)
global_feat = global_feat.view(global_feat.shape[0], -1)
global_feat_hat = global_feat_hat.view(global_feat.shape[0], -1)
global_feat = self.classifier_bap(global_feat)
global_feat_hat = self.classifier_bap(global_feat_hat)
feat_hat = self.bottleneck(global_feat_hat)
feat = self.bottleneck(global_feat) # normalize for angular softmax
cls_score = self.classifier(feat)
cls_score_hat = self.classifier(feat_hat)
if self.training:
if self.using_cal:
return cls_score, cls_score-cls_score_hat, global_feat # global feature for triplet loss
else:
return cls_score, global_feat
else:
return cls_score
| 33.835125
| 129
| 0.606144
|
import torch
from torch import nn
import torch.nn.functional as F
import sys
from .backbones.resnet import ResNet
sys.path.append('.')
EPSILON = 1e-12
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return y
class BAP(nn.Module):
def __init__(self, pool='GAP'):
super(BAP, self).__init__()
assert pool in ['GAP', 'GMP']
if pool == 'GAP':
self.pool = None
else:
self.pool = nn.AdaptiveMaxPool2d(1)
def forward(self, features, attentions, counterfactual=False):
B, C, H, W = features.size()
_, M, AH, AW = attentions.size()
if AH != H or AW != W:
attentions = F.upsample_bilinear(attentions, size=(H, W))
if self.pool is None:
feature_matrix = (torch.einsum('imjk,injk->imn', (attentions, features)) / float(H * W)).view(B, -1)
else:
feature_matrix = []
for i in range(M):
AiF = self.pool(features * attentions[:, i:i + 1, ...]).view(B, -1)
feature_matrix.append(AiF)
feature_matrix = torch.cat(feature_matrix, dim=1)
feature_matrix_raw = torch.sign(feature_matrix) * torch.sqrt(torch.abs(feature_matrix) + EPSILON)
feature_matrix = F.normalize(feature_matrix_raw, dim=-1)
if counterfactual:
if self.training:
fake_att = torch.zeros_like(attentions).uniform_(0, 2)
else:
fake_att = torch.ones_like(attentions)
counterfactual_feature = (torch.einsum('imjk,injk->imn', (fake_att, features)) / float(H * W)).view(B, -1)
counterfactual_feature = torch.sign(counterfactual_feature) * torch.sqrt(torch.abs(counterfactual_feature) + EPSILON)
counterfactual_feature = F.normalize(counterfactual_feature, dim=-1)
return feature_matrix, counterfactual_feature
else:
return feature_matrix
class MultiHeadAtt(nn.Module):
def __init__(self, in_channels, channels,
radix=4, reduction_factor=4,
rectify=False, norm_layer=nn.BatchNorm2d):
super(MultiHeadAtt, self).__init__()
inter_channels = max(in_channels*radix//reduction_factor, 32)
self.radix = radix
self.channels = channels
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Conv2d(channels, inter_channels, 1, groups=1)
self.bn1 = norm_layer(inter_channels)
self.fc2 = nn.Conv2d(inter_channels, channels*radix, 1, groups=1)
def forward(self, x):
batch, channel = x.shape[:2]
splited = torch.split(x, channel//self.radix, dim=1)
gap = sum(splited)
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap).view((batch, self.radix, self.channels))
atten = F.softmax(atten, dim=1).view(batch, -1, 1, 1)
atten = torch.split(atten, channel//self.radix, dim=1)
out= torch.cat([att*split for (att, split) in zip(atten, splited)],1)
return out.contiguous()
class BN2d(nn.Module):
def __init__(self, planes):
super(BN2d, self).__init__()
self.bottleneck2 = nn.BatchNorm2d(planes)
self.bottleneck2.bias.requires_grad_(False)
self.bottleneck2.apply(weights_init_kaiming)
def forward(self, x):
return self.bottleneck2(x)
class Baseline(nn.Module):
in_planes = 2048
def __init__(self, num_classes, last_stride, model_path, using_cal):
super(Baseline, self).__init__()
self.using_cal = using_cal
self.base = ResNet(last_stride)
self.base.load_param(model_path)
self.radix = 2
self.base_1 = nn.Sequential(*list(self.base.children())[0:3])
self.BN1 = BN2d(64)
self.att1 = SELayer(64,8)
self.att_s1=MultiHeadAtt(64,int(64/self.radix),radix=self.radix)
self.base_2 = nn.Sequential(*list(self.base.children())[3:4])
self.BN2 = BN2d(256)
self.att2 = SELayer(256,32)
self.att_s2=MultiHeadAtt(256,int(256/self.radix),radix=self.radix)
self.base_3 = nn.Sequential(*list(self.base.children())[4:5])
self.BN3 = BN2d(512)
self.att3 = SELayer(512,64)
self.att_s3 = MultiHeadAtt(512,int(512/self.radix),radix=self.radix)
self.base_4 = nn.Sequential(*list(self.base.children())[5:6])
self.BN4 = BN2d(1024)
self.att4 = SELayer(1024,128)
self.att_s4=MultiHeadAtt(1024,int(1024/self.radix),radix=self.radix)
self.base_5 = nn.Sequential(*list(self.base.children())[6:])
self.BN5 = BN2d(2048)
self.att5 = SELayer(2048,256)
self.att_s5=MultiHeadAtt(2048,int(2048/self.radix),radix=self.radix)
self.M = 8
self.attentions = BasicConv2d(2048, self.M, kernel_size=1)
self.bap = BAP(pool='GAP')
self.gap = nn.AdaptiveAvgPool2d(1)
self.num_classes = num_classes
self.bottleneck = nn.BatchNorm1d(self.in_planes)
self.bottleneck.bias.requires_grad_(False)
self.bottleneck.apply(weights_init_kaiming)
self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)
self.classifier_bap = nn.Linear(self.in_planes*self.M, self.in_planes, bias=False)
self.classifier.apply(weights_init_classifier)
self.classifier_bap.apply(weights_init_classifier)
def forward(self, x):
x_1 = self.BN1(x_1)
y_1 = self.att1(x_1)
x_att1=x_1*y_1.expand_as(x_1)
x_2 = self.base_2(x_att1)
x_2 = self.att_s2(x_2)
x_2 = self.BN2(x_2)
y_2 = self.att2(x_2)
x_att2=x_2*y_2.expand_as(x_2)
x_3 = self.base_3(x_att2)
x_3 = self.att_s3(x_3)
x_3 = self.BN3(x_3)
y_3 = self.att3(x_3)
x_att3=x_3*y_3.expand_as(x_3)
x_4 = self.base_4(x_att3)
x_4 = self.att_s4(x_4)
x_4 = self.BN4(x_4)
y_4 = self.att4(x_4)
x_att4=x_4*y_4.expand_as(x_4)
x_5 = self.base_5(x_att4)
x_5 = self.att_s5(x_5)
x_5 = self.BN5(x_5)
y_5 = self.att5(x_5)
x=x_5*y_5.expand_as(x_5)
global_feat,global_feat_hat = self.bap(x, attention_maps,counterfactual=True)
global_feat = global_feat.view(global_feat.shape[0], -1)
global_feat_hat = global_feat_hat.view(global_feat.shape[0], -1)
global_feat = self.classifier_bap(global_feat)
global_feat_hat = self.classifier_bap(global_feat_hat)
feat_hat = self.bottleneck(global_feat_hat)
feat = self.bottleneck(global_feat)
cls_score = self.classifier(feat)
cls_score_hat = self.classifier(feat_hat)
if self.training:
if self.using_cal:
return cls_score, cls_score-cls_score_hat, global_feat
else:
return cls_score, global_feat
else:
return cls_score
| true
| true
|
f7162d7130bd56f5b174a8b6dfb6e67c7c002e00
| 1,034
|
py
|
Python
|
tests/test_runner.py
|
hwmrocker/boerewors
|
2e9b901debb105d9c10e78c8d6f33929aa743daa
|
[
"Apache-2.0"
] | 10
|
2017-10-16T10:59:17.000Z
|
2019-11-28T03:04:16.000Z
|
tests/test_runner.py
|
hwmrocker/boerewors
|
2e9b901debb105d9c10e78c8d6f33929aa743daa
|
[
"Apache-2.0"
] | 1
|
2017-10-27T02:32:59.000Z
|
2017-11-02T03:37:49.000Z
|
tests/test_runner.py
|
hwmrocker/boerewors
|
2e9b901debb105d9c10e78c8d6f33929aa743daa
|
[
"Apache-2.0"
] | 5
|
2017-10-16T11:08:20.000Z
|
2019-11-07T09:02:41.000Z
|
# Copyright 2017 trivago N.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from context import runners
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
class MyRunner(runners.Runner):
def get_stages(self):
return [1, 2, 3]
def test_stages():
runner = MyRunner()
assert list(runner.get_stages()) == [1, 2, 3]
assert list(runner.stages) == [1, 2, 3]
def test_setup_parser():
runner = MyRunner()
parser = Mock()
runner.setup_parser(parser)
assert True
| 27.210526
| 74
| 0.713733
|
from context import runners
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
class MyRunner(runners.Runner):
def get_stages(self):
return [1, 2, 3]
def test_stages():
runner = MyRunner()
assert list(runner.get_stages()) == [1, 2, 3]
assert list(runner.stages) == [1, 2, 3]
def test_setup_parser():
runner = MyRunner()
parser = Mock()
runner.setup_parser(parser)
assert True
| true
| true
|
f7162d7be06b9aa23574e675f52177aea117cc8e
| 401
|
py
|
Python
|
reddit_backend/reddit/migrations/0003_alter_userprofile_name.py
|
cursedclock/reddit-backend
|
fb5989c758f5459e510f6599c9b9798424c17ba9
|
[
"MIT"
] | 1
|
2022-01-30T17:27:44.000Z
|
2022-01-30T17:27:44.000Z
|
reddit_backend/reddit/migrations/0003_alter_userprofile_name.py
|
cursedclock/reddit-backend
|
fb5989c758f5459e510f6599c9b9798424c17ba9
|
[
"MIT"
] | null | null | null |
reddit_backend/reddit/migrations/0003_alter_userprofile_name.py
|
cursedclock/reddit-backend
|
fb5989c758f5459e510f6599c9b9798424c17ba9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2022-01-25 18:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit', '0002_auto_20220125_1727'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='name',
field=models.CharField(max_length=50, unique=True),
),
]
| 21.105263
| 63
| 0.605985
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit', '0002_auto_20220125_1727'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='name',
field=models.CharField(max_length=50, unique=True),
),
]
| true
| true
|
f7162de5e6c0ff9b0b6fde9307f43c043b924a3c
| 570
|
py
|
Python
|
void/serve.py
|
claymation/void
|
38055975a624dd9050f7604a73068c58f1185d01
|
[
"MIT"
] | null | null | null |
void/serve.py
|
claymation/void
|
38055975a624dd9050f7604a73068c58f1185d01
|
[
"MIT"
] | null | null | null |
void/serve.py
|
claymation/void
|
38055975a624dd9050f7604a73068c58f1185d01
|
[
"MIT"
] | null | null | null |
import http.server
import socketserver
class TCPServer(socketserver.TCPServer):
allow_reuse_address = True
def serve(root, port):
class Handler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=root, **kwargs)
print("Listening for requests on http://localhost:{}/".format(port))
with TCPServer(("", port), Handler) as httpd:
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
httpd.server_close()
| 25.909091
| 72
| 0.649123
|
import http.server
import socketserver
class TCPServer(socketserver.TCPServer):
allow_reuse_address = True
def serve(root, port):
class Handler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=root, **kwargs)
print("Listening for requests on http://localhost:{}/".format(port))
with TCPServer(("", port), Handler) as httpd:
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
httpd.server_close()
| true
| true
|
f7162e0882fce0df624513ea516d61c4ad04a16b
| 1,510
|
py
|
Python
|
conntestd/speed_test.py
|
robputt796/ConTestD
|
e9e0f6377520e699afb4e038f79b2bc3b5bbbf64
|
[
"Apache-2.0"
] | 5
|
2018-03-18T21:16:24.000Z
|
2019-05-23T16:30:18.000Z
|
conntestd/speed_test.py
|
robputt796/ConTestD
|
e9e0f6377520e699afb4e038f79b2bc3b5bbbf64
|
[
"Apache-2.0"
] | null | null | null |
conntestd/speed_test.py
|
robputt796/ConTestD
|
e9e0f6377520e699afb4e038f79b2bc3b5bbbf64
|
[
"Apache-2.0"
] | 1
|
2021-12-01T16:30:07.000Z
|
2021-12-01T16:30:07.000Z
|
import datetime
import logging
import sys
from speedtest import Speedtest
from conntestd.db import get_db_session
from conntestd.db import SpeedTestResult
from conntestd.config import DB_CONN
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO,
stream=sys.stdout)
def run_speedtest():
logging.info("Starting periodic connection test job.")
db = get_db_session(DB_CONN)
db_result = SpeedTestResult(dt=datetime.datetime.now(),
status='running')
db.add(db_result)
db.commit()
try:
s = Speedtest()
s.get_best_server()
s.download()
s.upload()
result = s.results.dict()
download = result['download']
upload = result['upload']
ping = result['ping']
country = result['server']['country']
town = result['server']['name']
sponsor = result['server']['sponsor']
db_result.status = 'complete'
db_result.download = download
db_result.upload = upload
db_result.ping = ping
db_result.country = country
db_result.town = town
db_result.sponsor = sponsor
db.commit()
logging.info("Periodic connection test job completed.")
except Exception as err:
logging.error("Error occured during periodic connection test job: %s" % str(err))
db_result.status = 'error'
db.commit()
finally:
db.close()
| 28.490566
| 89
| 0.613245
|
import datetime
import logging
import sys
from speedtest import Speedtest
from conntestd.db import get_db_session
from conntestd.db import SpeedTestResult
from conntestd.config import DB_CONN
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO,
stream=sys.stdout)
def run_speedtest():
logging.info("Starting periodic connection test job.")
db = get_db_session(DB_CONN)
db_result = SpeedTestResult(dt=datetime.datetime.now(),
status='running')
db.add(db_result)
db.commit()
try:
s = Speedtest()
s.get_best_server()
s.download()
s.upload()
result = s.results.dict()
download = result['download']
upload = result['upload']
ping = result['ping']
country = result['server']['country']
town = result['server']['name']
sponsor = result['server']['sponsor']
db_result.status = 'complete'
db_result.download = download
db_result.upload = upload
db_result.ping = ping
db_result.country = country
db_result.town = town
db_result.sponsor = sponsor
db.commit()
logging.info("Periodic connection test job completed.")
except Exception as err:
logging.error("Error occured during periodic connection test job: %s" % str(err))
db_result.status = 'error'
db.commit()
finally:
db.close()
| true
| true
|
f7162ec5ccfdab45c868e7d4895b0d5b40c0f2d9
| 33,222
|
py
|
Python
|
openstack_controller/tests/test_simple_api.py
|
brentm5/integrations-core
|
5cac8788c95d8820435ef9c5d32d6a5463cf491d
|
[
"BSD-3-Clause"
] | null | null | null |
openstack_controller/tests/test_simple_api.py
|
brentm5/integrations-core
|
5cac8788c95d8820435ef9c5d32d6a5463cf491d
|
[
"BSD-3-Clause"
] | null | null | null |
openstack_controller/tests/test_simple_api.py
|
brentm5/integrations-core
|
5cac8788c95d8820435ef9c5d32d6a5463cf491d
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import mock
import logging
import copy
import pytest
import simplejson as json
import requests
from datadog_checks.openstack_controller.api import ApiFactory, SimpleApi, Authenticator, Credential
from datadog_checks.openstack_controller.exceptions import (
IncompleteIdentity,
MissingNovaEndpoint,
MissingNeutronEndpoint,
AuthenticationNeeded,
InstancePowerOffFailure,
RetryLimitExceeded,
)
from . import common
log = logging.getLogger('test_openstack_controller')
def test_get_endpoint():
authenticator = Authenticator()
assert authenticator._get_nova_endpoint(
common.EXAMPLE_AUTH_RESPONSE) == u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876'
with pytest.raises(MissingNovaEndpoint):
authenticator._get_nova_endpoint({})
assert authenticator._get_neutron_endpoint(common.EXAMPLE_AUTH_RESPONSE) == u'http://10.0.2.15:9292'
with pytest.raises(MissingNeutronEndpoint):
authenticator._get_neutron_endpoint({})
assert authenticator._get_valid_endpoint({}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {}}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {"catalog": []}}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {"catalog": []}}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{}]}}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'type': u'compute',
u'name': u'nova'}]}}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'endpoints': [],
u'type': u'compute',
u'name': u'nova'}]}}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'endpoints': [{}],
u'type': u'compute',
u'name': u'nova'}]}}, 'nova', 'compute') is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'endpoints': [{u'url': u'dummy_url', u'interface': u'dummy'}],
u'type': u'compute',
u'name': u'nova'}]}}, 'nova', 'compute') is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'endpoints': [{u'url': u'dummy_url'}],
u'type': u'compute',
u'name': u'nova'}]}}, 'nova', 'compute') is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'endpoints': [{u'interface': u'public'}],
u'type': u'compute',
u'name': u'nova'}]}}, 'nova', 'compute') is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'endpoints': [{u'url': u'dummy_url', u'interface': u'internal'}],
u'type': u'compute',
u'name': u'nova'}]}}, 'nova', 'compute') == 'dummy_url'
BAD_USERS = [
{'user': {}},
{'user': {'name': ''}},
{'user': {'name': 'test_name', 'password': ''}},
{'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {}}},
{'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {'id': ''}}},
]
GOOD_USERS = [
{'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {'id': 'test_id'}}},
]
def _test_bad_user(user):
authenticator = Authenticator()
with pytest.raises(IncompleteIdentity):
authenticator._get_user_identity(user['user'])
def test_get_user_identity():
authenticator = Authenticator()
for user in BAD_USERS:
_test_bad_user(user)
for user in GOOD_USERS:
parsed_user = authenticator._get_user_identity(user['user'])
assert parsed_user == {'methods': ['password'], 'password': user}
class MockHTTPResponse(object):
def __init__(self, response_dict, headers):
self.response_dict = response_dict
self.headers = headers
def json(self):
return self.response_dict
PROJECTS_RESPONSE = [
{},
{
"domain_id": "0000",
},
{
"domain_id": "1111",
"id": "0000",
},
{
"domain_id": "2222",
"id": "1111",
"name": "name 1"
},
{
"domain_id": "3333",
"id": "2222",
"name": "name 2"
},
]
PROJECT_RESPONSE = [
{
"domain_id": "1111",
"id": "3333",
"name": "name 1"
}
]
def test_from_config():
mock_http_response = copy.deepcopy(common.EXAMPLE_AUTH_RESPONSE)
mock_response = MockHTTPResponse(response_dict=mock_http_response, headers={'X-Subject-Token': 'fake_token'})
with mock.patch('datadog_checks.openstack_controller.api.Authenticator._post_auth_token',
return_value=mock_response):
with mock.patch('datadog_checks.openstack_controller.api.Authenticator._get_auth_projects',
return_value=PROJECTS_RESPONSE):
cred = Authenticator.from_config(log, 'http://10.0.2.15:5000', GOOD_USERS[0]['user'])
assert isinstance(cred, Credential)
assert cred.auth_token == "fake_token"
assert cred.name == "name 2"
assert cred.domain_id == "3333"
assert cred.tenant_id == "2222"
assert cred.nova_endpoint == "http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876"
assert cred.neutron_endpoint == "http://10.0.2.15:9292"
def test_from_config_with_missing_name():
mock_http_response = copy.deepcopy(common.EXAMPLE_AUTH_RESPONSE)
mock_response = MockHTTPResponse(response_dict=mock_http_response, headers={'X-Subject-Token': 'fake_token'})
project_response_without_name = copy.deepcopy(PROJECT_RESPONSE)
del project_response_without_name[0]["name"]
with mock.patch('datadog_checks.openstack_controller.api.Authenticator._post_auth_token',
return_value=mock_response):
with mock.patch('datadog_checks.openstack_controller.api.Authenticator._get_auth_projects',
return_value=project_response_without_name):
cred = Authenticator.from_config(log, 'http://10.0.2.15:5000', GOOD_USERS[0]['user'])
assert cred is None
def test_from_config_with_missing_id():
mock_http_response = copy.deepcopy(common.EXAMPLE_AUTH_RESPONSE)
mock_response = MockHTTPResponse(response_dict=mock_http_response, headers={'X-Subject-Token': 'fake_token'})
project_response_without_name = copy.deepcopy(PROJECT_RESPONSE)
del project_response_without_name[0]["id"]
with mock.patch('datadog_checks.openstack_controller.api.Authenticator._post_auth_token',
return_value=mock_response):
with mock.patch('datadog_checks.openstack_controller.api.Authenticator._get_auth_projects',
return_value=project_response_without_name):
cred = Authenticator.from_config(log, 'http://10.0.2.15:5000', GOOD_USERS[0]['user'])
assert cred is None
def get_os_hypervisor_uptime_pre_v2_52_response(url, header, params=None, timeout=None):
return json.loads("""{
"hypervisor": {
"hypervisor_hostname": "fake-mini",
"id": 1,
"state": "up",
"status": "enabled",
"uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
}
}""")
def get_os_hypervisor_uptime_post_v2_53_response(url, header, params=None, timeout=None):
return json.loads("""{
"hypervisor": {
"hypervisor_hostname": "fake-mini",
"id": "b1e43b5f-eec1-44e0-9f10-7b4945c0226d",
"state": "up",
"status": "enabled",
"uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
}
}""")
def test_get_os_hypervisor_uptime(aggregator):
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_os_hypervisor_uptime_pre_v2_52_response):
api = SimpleApi(None, None)
assert api.get_os_hypervisor_uptime(1) == \
" 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_os_hypervisor_uptime_post_v2_53_response):
api = SimpleApi(None, None)
assert api.get_os_hypervisor_uptime(1) == \
" 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
def get_os_aggregates_response(url, headers, params=None, timeout=None):
return json.loads("""{
"aggregates": [
{
"availability_zone": "london",
"created_at": "2016-12-27T23:47:32.911515",
"deleted": false,
"deleted_at": null,
"hosts": [
"compute"
],
"id": 1,
"metadata": {
"availability_zone": "london"
},
"name": "name",
"updated_at": null,
"uuid": "6ba28ba7-f29b-45cc-a30b-6e3a40c2fb14"
}
]
}""")
def test_get_os_aggregates(aggregator):
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_os_aggregates_response):
api = SimpleApi(None, None)
aggregates = api.get_os_aggregates()
for i in range(len(aggregates)):
for key, value in common.EXAMPLE_GET_OS_AGGREGATES_RETURN_VALUE[i].items():
assert value == aggregates[i][key]
def get_os_hypervisors_detail_post_v2_33_response(url, headers, params=None, timeout=None):
return json.loads("""{
"hypervisors": [
{
"cpu_info": {
"arch": "x86_64",
"model": "Nehalem",
"vendor": "Intel",
"features": [
"pge",
"clflush"
],
"topology": {
"cores": 1,
"threads": 1,
"sockets": 4
}
},
"current_workload": 0,
"status": "enabled",
"state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "host1",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": 2,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "host1",
"id": 7,
"disabled_reason": null
},
"vcpus": 2,
"vcpus_used": 0
}
],
"hypervisors_links": [
{
"href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors/detail?limit=1&marker=2",
"rel": "next"
}
]
}""") # noqa: E501
def get_os_hypervisors_detail_post_v2_53_response(url, headers, params=None, timeout=None):
return json.loads("""{
"hypervisors": [
{
"cpu_info": {
"arch": "x86_64",
"model": "Nehalem",
"vendor": "Intel",
"features": [
"pge",
"clflush"
],
"topology": {
"cores": 1,
"threads": 1,
"sockets": 4
}
},
"current_workload": 0,
"status": "enabled",
"state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "host2",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": "1bb62a04-c576-402c-8147-9e89757a09e3",
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "host1",
"id": "62f62f6e-a713-4cbe-87d3-3ecf8a1e0f8d",
"disabled_reason": null
},
"vcpus": 2,
"vcpus_used": 0
}
],
"hypervisors_links": [
{
"href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors/detail?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3",
"rel": "next"
}
]
}""") # noqa: E501
def test_get_os_hypervisors_detail(aggregator):
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_os_hypervisors_detail_post_v2_33_response):
api = SimpleApi(None, None)
assert api.get_os_hypervisors_detail() == common.EXAMPLE_GET_OS_HYPERVISORS_RETURN_VALUE
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_os_hypervisors_detail_post_v2_53_response):
api = SimpleApi(None, None)
assert api.get_os_hypervisors_detail() == [
{
"cpu_info": {
"arch": "x86_64",
"model": "Nehalem",
"vendor": "Intel",
"features": [
"pge",
"clflush"
],
"topology": {
"cores": 1,
"threads": 1,
"sockets": 4
}
},
"current_workload": 0,
"status": "enabled",
"state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "host2",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": "1bb62a04-c576-402c-8147-9e89757a09e3",
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "host1",
"id": "62f62f6e-a713-4cbe-87d3-3ecf8a1e0f8d",
"disabled_reason": None
},
"vcpus": 2,
"vcpus_used": 0
}]
def get_servers_detail_post_v2_63_response(url, headers, params=None, timeout=None):
return json.loads("""{
"servers": [
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "new-server-test",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-y0w4v32k",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": null,
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-10-10T15:49:09.516729",
"OS-SRV-USG:terminated_at": null,
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"addresses": {
"private": [
{
"OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
"OS-EXT-IPS:type": "fixed",
"addr": "192.168.0.3",
"version": 4
}
]
},
"config_drive": "",
"created": "2017-10-10T15:49:08Z",
"description": null,
"flavor": {
"disk": 1,
"ephemeral": 0,
"extra_specs": {
"hw:cpu_policy": "dedicated",
"hw:mem_page_size": "2048"
},
"original_name": "m1.tiny.specs",
"ram": 512,
"swap": 0,
"vcpus": 1
},
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "569f39f9-7c76-42a1-9c2d-8394e2638a6d",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
"href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
},
"key_name": null,
"links": [
{
"href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d",
"rel": "self"
},
{
"href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d",
"rel": "bookmark"
}
],
"locked": false,
"metadata": {
"My Server Name": "Apache1"
},
"name": "new-server-test",
"os-extended-volumes:volumes_attached": [],
"progress": 0,
"security_groups": [
{
"name": "default"
}
],
"status": "ACTIVE",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"trusted_image_certificates": [
"0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8",
"674736e3-f25c-405c-8362-bbf991e0ce0a"
],
"updated": "2017-10-10T15:49:09Z",
"user_id": "fake"
}
]
}""") # noqa: E501
def test_get_servers_detail(aggregator):
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_servers_detail_post_v2_63_response):
api = SimpleApi(None, None)
assert api.get_servers_detail(None) == [
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "new-server-test",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-y0w4v32k",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-10-10T15:49:09.516729",
"OS-SRV-USG:terminated_at": None,
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"addresses": {
"private": [
{
"OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
"OS-EXT-IPS:type": "fixed",
"addr": "192.168.0.3",
"version": 4
}
]
},
"config_drive": "",
"created": "2017-10-10T15:49:08Z",
"description": None,
"flavor": {
"disk": 1,
"ephemeral": 0,
"extra_specs": {
"hw:cpu_policy": "dedicated",
"hw:mem_page_size": "2048"
},
"original_name": "m1.tiny.specs",
"ram": 512,
"swap": 0,
"vcpus": 1
},
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "569f39f9-7c76-42a1-9c2d-8394e2638a6d",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
"href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", # noqa: E501
"rel": "bookmark"
}
]
},
"key_name": None,
"links": [
{
"href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d", # noqa: E501
"rel": "self"
},
{
"href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d", # noqa: E501
"rel": "bookmark"
}
],
"locked": False,
"metadata": {
"My Server Name": "Apache1"
},
"name": "new-server-test",
"os-extended-volumes:volumes_attached": [],
"progress": 0,
"security_groups": [
{
"name": "default"
}
],
"status": "ACTIVE",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"trusted_image_certificates": [
"0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8",
"674736e3-f25c-405c-8362-bbf991e0ce0a"
],
"updated": "2017-10-10T15:49:09Z",
"user_id": "fake"
}
]
def test__get_paginated_list():
log = mock.MagicMock()
instance = copy.deepcopy(common.MOCK_CONFIG["instances"][0])
instance["paginated_limit"] = 4
with mock.patch("datadog_checks.openstack_controller.api.SimpleApi.connect"):
api = ApiFactory.create(log, None, instance)
with mock.patch(
"datadog_checks.openstack_controller.api.SimpleApi._make_request",
side_effect=[
# First call: 3 exceptions -> failure
requests.exceptions.HTTPError,
requests.exceptions.HTTPError,
requests.exceptions.HTTPError,
]
):
# First call
with pytest.raises(RetryLimitExceeded):
api._get_paginated_list("url", "obj", {})
assert log.debug.call_count == 3
log.reset_mock()
with mock.patch(
"datadog_checks.openstack_controller.api.SimpleApi._make_request",
side_effect=[
# Second call: all good, 1 page with 4 results, one with 1
{
"obj": [{"id": 0}, {"id": 1}, {"id": 2}, {"id": 3}],
"obj_links": "test"
},
{
"obj": [{"id": 4}]
},
]
):
# Second call
assert api.paginated_limit == 4
result = api._get_paginated_list("url", "obj", {})
assert log.debug.call_count == 0
assert result == [{"id": 0}, {"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}]
with mock.patch(
"datadog_checks.openstack_controller.api.SimpleApi._make_request",
side_effect=[
# Third call: 1 exception, limit is divided once by 2
requests.exceptions.HTTPError,
{
"obj": [{"id": 0}, {"id": 1}],
"obj_links": "test"
},
{
"obj": [{"id": 2}, {"id": 3}],
"obj_links": "test"
},
{
"obj": [{"id": 4}]
}
]
):
# Third call
result = api._get_paginated_list("url", "obj", {})
assert log.debug.call_count == 1
assert result == [{"id": 0}, {"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}]
log.reset_mock()
with mock.patch(
"datadog_checks.openstack_controller.api.SimpleApi._make_request",
side_effect=[
# Fourth call: 1 AuthenticationNeeded exception -> no retries
AuthenticationNeeded,
# Fifth call: any other exception -> no retries
Exception,
]
):
with pytest.raises(AuthenticationNeeded):
api._get_paginated_list("url", "obj", {})
with pytest.raises(Exception):
api._get_paginated_list("url", "obj", {})
def test__make_request_failure():
log = mock.MagicMock()
instance = copy.deepcopy(common.MOCK_CONFIG["instances"][0])
instance["paginated_limit"] = 4
with mock.patch("datadog_checks.openstack_controller.api.SimpleApi.connect"):
api = ApiFactory.create(log, None, instance)
response_mock = mock.MagicMock()
with mock.patch(
"datadog_checks.openstack_controller.api.requests.get",
return_value=response_mock
):
response_mock.raise_for_status.side_effect = requests.exceptions.HTTPError
response_mock.status_code = 401
with pytest.raises(AuthenticationNeeded):
api._make_request("", {})
response_mock.status_code = 409
with pytest.raises(InstancePowerOffFailure):
api._make_request("", {})
response_mock.status_code = 500
with pytest.raises(requests.exceptions.HTTPError):
api._make_request("", {})
response_mock.raise_for_status.side_effect = Exception
with pytest.raises(Exception):
api._make_request("", {})
def get_server_diagnostics_post_v2_48_response(url, headers, params=None, timeout=None):
return json.loads("""{
"config_drive": true,
"cpu_details": [
{
"id": 0,
"time": 17300000000,
"utilisation": 15
}
],
"disk_details": [
{
"errors_count": 1,
"read_bytes": 262144,
"read_requests": 112,
"write_bytes": 5778432,
"write_requests": 488
}
],
"driver": "libvirt",
"hypervisor": "kvm",
"hypervisor_os": "ubuntu",
"memory_details": {
"maximum": 524288,
"used": 0
},
"nic_details": [
{
"mac_address": "01:23:45:67:89:ab",
"rx_drop": 200,
"rx_errors": 100,
"rx_octets": 2070139,
"rx_packets": 26701,
"rx_rate": 300,
"tx_drop": 500,
"tx_errors": 400,
"tx_octets": 140208,
"tx_packets": 662,
"tx_rate": 600
}
],
"num_cpus": 1,
"num_disks": 1,
"num_nics": 1,
"state": "running",
"uptime": 46664
}""")
def get_server_diagnostics_post_v2_1_response(url, headers, params=None, timeout=None):
return json.loads("""{
"cpu0_time": 17300000000,
"memory": 524288,
"vda_errors": -1,
"vda_read": 262144,
"vda_read_req": 112,
"vda_write": 5778432,
"vda_write_req": 488,
"vnet1_rx": 2070139,
"vnet1_rx_drop": 0,
"vnet1_rx_errors": 0,
"vnet1_rx_packets": 26701,
"vnet1_tx": 140208,
"vnet1_tx_drop": 0,
"vnet1_tx_errors": 0,
"vnet1_tx_packets": 662
}""")
def test_get_server_diagnostics(aggregator):
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_server_diagnostics_post_v2_48_response):
api = SimpleApi(None, None)
assert api.get_server_diagnostics(None) == {
"config_drive": True,
"cpu_details": [
{
"id": 0,
"time": 17300000000,
"utilisation": 15
}
],
"disk_details": [
{
"errors_count": 1,
"read_bytes": 262144,
"read_requests": 112,
"write_bytes": 5778432,
"write_requests": 488
}
],
"driver": "libvirt",
"hypervisor": "kvm",
"hypervisor_os": "ubuntu",
"memory_details": {
"maximum": 524288,
"used": 0
},
"nic_details": [
{
"mac_address": "01:23:45:67:89:ab",
"rx_drop": 200,
"rx_errors": 100,
"rx_octets": 2070139,
"rx_packets": 26701,
"rx_rate": 300,
"tx_drop": 500,
"tx_errors": 400,
"tx_octets": 140208,
"tx_packets": 662,
"tx_rate": 600
}
],
"num_cpus": 1,
"num_disks": 1,
"num_nics": 1,
"state": "running",
"uptime": 46664
}
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_server_diagnostics_post_v2_1_response):
api = SimpleApi(None, None)
assert api.get_server_diagnostics(None) == {
"cpu0_time": 17300000000,
"memory": 524288,
"vda_errors": -1,
"vda_read": 262144,
"vda_read_req": 112,
"vda_write": 5778432,
"vda_write_req": 488,
"vnet1_rx": 2070139,
"vnet1_rx_drop": 0,
"vnet1_rx_errors": 0,
"vnet1_rx_packets": 26701,
"vnet1_tx": 140208,
"vnet1_tx_drop": 0,
"vnet1_tx_errors": 0,
"vnet1_tx_packets": 662
}
def get_project_limits_response(url, headers, params=None, timeout=None):
return json.loads("""{
"limits": {
"absolute": {
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
"maxServerGroups": 10,
"maxServerGroupMembers": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
"totalFloatingIpsUsed": 0,
"totalServerGroupsUsed": 0
},
"rate": []
}
}""")
def test_get_project_limits(aggregator):
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_project_limits_response):
api = SimpleApi(None, None)
assert api.get_project_limits(None) == common.EXAMPLE_GET_PROJECT_LIMITS_RETURN_VALUE
| 36.913333
| 164
| 0.498796
|
import mock
import logging
import copy
import pytest
import simplejson as json
import requests
from datadog_checks.openstack_controller.api import ApiFactory, SimpleApi, Authenticator, Credential
from datadog_checks.openstack_controller.exceptions import (
IncompleteIdentity,
MissingNovaEndpoint,
MissingNeutronEndpoint,
AuthenticationNeeded,
InstancePowerOffFailure,
RetryLimitExceeded,
)
from . import common
log = logging.getLogger('test_openstack_controller')
def test_get_endpoint():
authenticator = Authenticator()
assert authenticator._get_nova_endpoint(
common.EXAMPLE_AUTH_RESPONSE) == u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876'
with pytest.raises(MissingNovaEndpoint):
authenticator._get_nova_endpoint({})
assert authenticator._get_neutron_endpoint(common.EXAMPLE_AUTH_RESPONSE) == u'http://10.0.2.15:9292'
with pytest.raises(MissingNeutronEndpoint):
authenticator._get_neutron_endpoint({})
assert authenticator._get_valid_endpoint({}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {}}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {"catalog": []}}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {"catalog": []}}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{}]}}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'type': u'compute',
u'name': u'nova'}]}}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'endpoints': [],
u'type': u'compute',
u'name': u'nova'}]}}, None, None) is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'endpoints': [{}],
u'type': u'compute',
u'name': u'nova'}]}}, 'nova', 'compute') is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'endpoints': [{u'url': u'dummy_url', u'interface': u'dummy'}],
u'type': u'compute',
u'name': u'nova'}]}}, 'nova', 'compute') is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'endpoints': [{u'url': u'dummy_url'}],
u'type': u'compute',
u'name': u'nova'}]}}, 'nova', 'compute') is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'endpoints': [{u'interface': u'public'}],
u'type': u'compute',
u'name': u'nova'}]}}, 'nova', 'compute') is None
assert authenticator._get_valid_endpoint({'token': {"catalog": [{
u'endpoints': [{u'url': u'dummy_url', u'interface': u'internal'}],
u'type': u'compute',
u'name': u'nova'}]}}, 'nova', 'compute') == 'dummy_url'
BAD_USERS = [
{'user': {}},
{'user': {'name': ''}},
{'user': {'name': 'test_name', 'password': ''}},
{'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {}}},
{'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {'id': ''}}},
]
GOOD_USERS = [
{'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {'id': 'test_id'}}},
]
def _test_bad_user(user):
authenticator = Authenticator()
with pytest.raises(IncompleteIdentity):
authenticator._get_user_identity(user['user'])
def test_get_user_identity():
authenticator = Authenticator()
for user in BAD_USERS:
_test_bad_user(user)
for user in GOOD_USERS:
parsed_user = authenticator._get_user_identity(user['user'])
assert parsed_user == {'methods': ['password'], 'password': user}
class MockHTTPResponse(object):
def __init__(self, response_dict, headers):
self.response_dict = response_dict
self.headers = headers
def json(self):
return self.response_dict
PROJECTS_RESPONSE = [
{},
{
"domain_id": "0000",
},
{
"domain_id": "1111",
"id": "0000",
},
{
"domain_id": "2222",
"id": "1111",
"name": "name 1"
},
{
"domain_id": "3333",
"id": "2222",
"name": "name 2"
},
]
PROJECT_RESPONSE = [
{
"domain_id": "1111",
"id": "3333",
"name": "name 1"
}
]
def test_from_config():
mock_http_response = copy.deepcopy(common.EXAMPLE_AUTH_RESPONSE)
mock_response = MockHTTPResponse(response_dict=mock_http_response, headers={'X-Subject-Token': 'fake_token'})
with mock.patch('datadog_checks.openstack_controller.api.Authenticator._post_auth_token',
return_value=mock_response):
with mock.patch('datadog_checks.openstack_controller.api.Authenticator._get_auth_projects',
return_value=PROJECTS_RESPONSE):
cred = Authenticator.from_config(log, 'http://10.0.2.15:5000', GOOD_USERS[0]['user'])
assert isinstance(cred, Credential)
assert cred.auth_token == "fake_token"
assert cred.name == "name 2"
assert cred.domain_id == "3333"
assert cred.tenant_id == "2222"
assert cred.nova_endpoint == "http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876"
assert cred.neutron_endpoint == "http://10.0.2.15:9292"
def test_from_config_with_missing_name():
mock_http_response = copy.deepcopy(common.EXAMPLE_AUTH_RESPONSE)
mock_response = MockHTTPResponse(response_dict=mock_http_response, headers={'X-Subject-Token': 'fake_token'})
project_response_without_name = copy.deepcopy(PROJECT_RESPONSE)
del project_response_without_name[0]["name"]
with mock.patch('datadog_checks.openstack_controller.api.Authenticator._post_auth_token',
return_value=mock_response):
with mock.patch('datadog_checks.openstack_controller.api.Authenticator._get_auth_projects',
return_value=project_response_without_name):
cred = Authenticator.from_config(log, 'http://10.0.2.15:5000', GOOD_USERS[0]['user'])
assert cred is None
def test_from_config_with_missing_id():
mock_http_response = copy.deepcopy(common.EXAMPLE_AUTH_RESPONSE)
mock_response = MockHTTPResponse(response_dict=mock_http_response, headers={'X-Subject-Token': 'fake_token'})
project_response_without_name = copy.deepcopy(PROJECT_RESPONSE)
del project_response_without_name[0]["id"]
with mock.patch('datadog_checks.openstack_controller.api.Authenticator._post_auth_token',
return_value=mock_response):
with mock.patch('datadog_checks.openstack_controller.api.Authenticator._get_auth_projects',
return_value=project_response_without_name):
cred = Authenticator.from_config(log, 'http://10.0.2.15:5000', GOOD_USERS[0]['user'])
assert cred is None
def get_os_hypervisor_uptime_pre_v2_52_response(url, header, params=None, timeout=None):
return json.loads("""{
"hypervisor": {
"hypervisor_hostname": "fake-mini",
"id": 1,
"state": "up",
"status": "enabled",
"uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
}
}""")
def get_os_hypervisor_uptime_post_v2_53_response(url, header, params=None, timeout=None):
return json.loads("""{
"hypervisor": {
"hypervisor_hostname": "fake-mini",
"id": "b1e43b5f-eec1-44e0-9f10-7b4945c0226d",
"state": "up",
"status": "enabled",
"uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
}
}""")
def test_get_os_hypervisor_uptime(aggregator):
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_os_hypervisor_uptime_pre_v2_52_response):
api = SimpleApi(None, None)
assert api.get_os_hypervisor_uptime(1) == \
" 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_os_hypervisor_uptime_post_v2_53_response):
api = SimpleApi(None, None)
assert api.get_os_hypervisor_uptime(1) == \
" 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
def get_os_aggregates_response(url, headers, params=None, timeout=None):
return json.loads("""{
"aggregates": [
{
"availability_zone": "london",
"created_at": "2016-12-27T23:47:32.911515",
"deleted": false,
"deleted_at": null,
"hosts": [
"compute"
],
"id": 1,
"metadata": {
"availability_zone": "london"
},
"name": "name",
"updated_at": null,
"uuid": "6ba28ba7-f29b-45cc-a30b-6e3a40c2fb14"
}
]
}""")
def test_get_os_aggregates(aggregator):
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_os_aggregates_response):
api = SimpleApi(None, None)
aggregates = api.get_os_aggregates()
for i in range(len(aggregates)):
for key, value in common.EXAMPLE_GET_OS_AGGREGATES_RETURN_VALUE[i].items():
assert value == aggregates[i][key]
def get_os_hypervisors_detail_post_v2_33_response(url, headers, params=None, timeout=None):
return json.loads("""{
"hypervisors": [
{
"cpu_info": {
"arch": "x86_64",
"model": "Nehalem",
"vendor": "Intel",
"features": [
"pge",
"clflush"
],
"topology": {
"cores": 1,
"threads": 1,
"sockets": 4
}
},
"current_workload": 0,
"status": "enabled",
"state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "host1",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": 2,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "host1",
"id": 7,
"disabled_reason": null
},
"vcpus": 2,
"vcpus_used": 0
}
],
"hypervisors_links": [
{
"href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors/detail?limit=1&marker=2",
"rel": "next"
}
]
}""")
def get_os_hypervisors_detail_post_v2_53_response(url, headers, params=None, timeout=None):
return json.loads("""{
"hypervisors": [
{
"cpu_info": {
"arch": "x86_64",
"model": "Nehalem",
"vendor": "Intel",
"features": [
"pge",
"clflush"
],
"topology": {
"cores": 1,
"threads": 1,
"sockets": 4
}
},
"current_workload": 0,
"status": "enabled",
"state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "host2",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": "1bb62a04-c576-402c-8147-9e89757a09e3",
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "host1",
"id": "62f62f6e-a713-4cbe-87d3-3ecf8a1e0f8d",
"disabled_reason": null
},
"vcpus": 2,
"vcpus_used": 0
}
],
"hypervisors_links": [
{
"href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors/detail?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3",
"rel": "next"
}
]
}""")
def test_get_os_hypervisors_detail(aggregator):
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_os_hypervisors_detail_post_v2_33_response):
api = SimpleApi(None, None)
assert api.get_os_hypervisors_detail() == common.EXAMPLE_GET_OS_HYPERVISORS_RETURN_VALUE
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_os_hypervisors_detail_post_v2_53_response):
api = SimpleApi(None, None)
assert api.get_os_hypervisors_detail() == [
{
"cpu_info": {
"arch": "x86_64",
"model": "Nehalem",
"vendor": "Intel",
"features": [
"pge",
"clflush"
],
"topology": {
"cores": 1,
"threads": 1,
"sockets": 4
}
},
"current_workload": 0,
"status": "enabled",
"state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "host2",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": "1bb62a04-c576-402c-8147-9e89757a09e3",
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {
"host": "host1",
"id": "62f62f6e-a713-4cbe-87d3-3ecf8a1e0f8d",
"disabled_reason": None
},
"vcpus": 2,
"vcpus_used": 0
}]
def get_servers_detail_post_v2_63_response(url, headers, params=None, timeout=None):
return json.loads("""{
"servers": [
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "new-server-test",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-y0w4v32k",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": null,
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-10-10T15:49:09.516729",
"OS-SRV-USG:terminated_at": null,
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"addresses": {
"private": [
{
"OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
"OS-EXT-IPS:type": "fixed",
"addr": "192.168.0.3",
"version": 4
}
]
},
"config_drive": "",
"created": "2017-10-10T15:49:08Z",
"description": null,
"flavor": {
"disk": 1,
"ephemeral": 0,
"extra_specs": {
"hw:cpu_policy": "dedicated",
"hw:mem_page_size": "2048"
},
"original_name": "m1.tiny.specs",
"ram": 512,
"swap": 0,
"vcpus": 1
},
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "569f39f9-7c76-42a1-9c2d-8394e2638a6d",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
"href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
},
"key_name": null,
"links": [
{
"href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d",
"rel": "self"
},
{
"href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d",
"rel": "bookmark"
}
],
"locked": false,
"metadata": {
"My Server Name": "Apache1"
},
"name": "new-server-test",
"os-extended-volumes:volumes_attached": [],
"progress": 0,
"security_groups": [
{
"name": "default"
}
],
"status": "ACTIVE",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"trusted_image_certificates": [
"0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8",
"674736e3-f25c-405c-8362-bbf991e0ce0a"
],
"updated": "2017-10-10T15:49:09Z",
"user_id": "fake"
}
]
}""")
def test_get_servers_detail(aggregator):
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_servers_detail_post_v2_63_response):
api = SimpleApi(None, None)
assert api.get_servers_detail(None) == [
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "new-server-test",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-y0w4v32k",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-10-10T15:49:09.516729",
"OS-SRV-USG:terminated_at": None,
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"addresses": {
"private": [
{
"OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
"OS-EXT-IPS:type": "fixed",
"addr": "192.168.0.3",
"version": 4
}
]
},
"config_drive": "",
"created": "2017-10-10T15:49:08Z",
"description": None,
"flavor": {
"disk": 1,
"ephemeral": 0,
"extra_specs": {
"hw:cpu_policy": "dedicated",
"hw:mem_page_size": "2048"
},
"original_name": "m1.tiny.specs",
"ram": 512,
"swap": 0,
"vcpus": 1
},
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "569f39f9-7c76-42a1-9c2d-8394e2638a6d",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
"href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
},
"key_name": None,
"links": [
{
"href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d",
"rel": "self"
},
{
"href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d",
"rel": "bookmark"
}
],
"locked": False,
"metadata": {
"My Server Name": "Apache1"
},
"name": "new-server-test",
"os-extended-volumes:volumes_attached": [],
"progress": 0,
"security_groups": [
{
"name": "default"
}
],
"status": "ACTIVE",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"trusted_image_certificates": [
"0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8",
"674736e3-f25c-405c-8362-bbf991e0ce0a"
],
"updated": "2017-10-10T15:49:09Z",
"user_id": "fake"
}
]
def test__get_paginated_list():
log = mock.MagicMock()
instance = copy.deepcopy(common.MOCK_CONFIG["instances"][0])
instance["paginated_limit"] = 4
with mock.patch("datadog_checks.openstack_controller.api.SimpleApi.connect"):
api = ApiFactory.create(log, None, instance)
with mock.patch(
"datadog_checks.openstack_controller.api.SimpleApi._make_request",
side_effect=[
requests.exceptions.HTTPError,
requests.exceptions.HTTPError,
requests.exceptions.HTTPError,
]
):
with pytest.raises(RetryLimitExceeded):
api._get_paginated_list("url", "obj", {})
assert log.debug.call_count == 3
log.reset_mock()
with mock.patch(
"datadog_checks.openstack_controller.api.SimpleApi._make_request",
side_effect=[
{
"obj": [{"id": 0}, {"id": 1}, {"id": 2}, {"id": 3}],
"obj_links": "test"
},
{
"obj": [{"id": 4}]
},
]
):
assert api.paginated_limit == 4
result = api._get_paginated_list("url", "obj", {})
assert log.debug.call_count == 0
assert result == [{"id": 0}, {"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}]
with mock.patch(
"datadog_checks.openstack_controller.api.SimpleApi._make_request",
side_effect=[
requests.exceptions.HTTPError,
{
"obj": [{"id": 0}, {"id": 1}],
"obj_links": "test"
},
{
"obj": [{"id": 2}, {"id": 3}],
"obj_links": "test"
},
{
"obj": [{"id": 4}]
}
]
):
result = api._get_paginated_list("url", "obj", {})
assert log.debug.call_count == 1
assert result == [{"id": 0}, {"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}]
log.reset_mock()
with mock.patch(
"datadog_checks.openstack_controller.api.SimpleApi._make_request",
side_effect=[
AuthenticationNeeded,
Exception,
]
):
with pytest.raises(AuthenticationNeeded):
api._get_paginated_list("url", "obj", {})
with pytest.raises(Exception):
api._get_paginated_list("url", "obj", {})
def test__make_request_failure():
log = mock.MagicMock()
instance = copy.deepcopy(common.MOCK_CONFIG["instances"][0])
instance["paginated_limit"] = 4
with mock.patch("datadog_checks.openstack_controller.api.SimpleApi.connect"):
api = ApiFactory.create(log, None, instance)
response_mock = mock.MagicMock()
with mock.patch(
"datadog_checks.openstack_controller.api.requests.get",
return_value=response_mock
):
response_mock.raise_for_status.side_effect = requests.exceptions.HTTPError
response_mock.status_code = 401
with pytest.raises(AuthenticationNeeded):
api._make_request("", {})
response_mock.status_code = 409
with pytest.raises(InstancePowerOffFailure):
api._make_request("", {})
response_mock.status_code = 500
with pytest.raises(requests.exceptions.HTTPError):
api._make_request("", {})
response_mock.raise_for_status.side_effect = Exception
with pytest.raises(Exception):
api._make_request("", {})
def get_server_diagnostics_post_v2_48_response(url, headers, params=None, timeout=None):
return json.loads("""{
"config_drive": true,
"cpu_details": [
{
"id": 0,
"time": 17300000000,
"utilisation": 15
}
],
"disk_details": [
{
"errors_count": 1,
"read_bytes": 262144,
"read_requests": 112,
"write_bytes": 5778432,
"write_requests": 488
}
],
"driver": "libvirt",
"hypervisor": "kvm",
"hypervisor_os": "ubuntu",
"memory_details": {
"maximum": 524288,
"used": 0
},
"nic_details": [
{
"mac_address": "01:23:45:67:89:ab",
"rx_drop": 200,
"rx_errors": 100,
"rx_octets": 2070139,
"rx_packets": 26701,
"rx_rate": 300,
"tx_drop": 500,
"tx_errors": 400,
"tx_octets": 140208,
"tx_packets": 662,
"tx_rate": 600
}
],
"num_cpus": 1,
"num_disks": 1,
"num_nics": 1,
"state": "running",
"uptime": 46664
}""")
def get_server_diagnostics_post_v2_1_response(url, headers, params=None, timeout=None):
return json.loads("""{
"cpu0_time": 17300000000,
"memory": 524288,
"vda_errors": -1,
"vda_read": 262144,
"vda_read_req": 112,
"vda_write": 5778432,
"vda_write_req": 488,
"vnet1_rx": 2070139,
"vnet1_rx_drop": 0,
"vnet1_rx_errors": 0,
"vnet1_rx_packets": 26701,
"vnet1_tx": 140208,
"vnet1_tx_drop": 0,
"vnet1_tx_errors": 0,
"vnet1_tx_packets": 662
}""")
def test_get_server_diagnostics(aggregator):
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_server_diagnostics_post_v2_48_response):
api = SimpleApi(None, None)
assert api.get_server_diagnostics(None) == {
"config_drive": True,
"cpu_details": [
{
"id": 0,
"time": 17300000000,
"utilisation": 15
}
],
"disk_details": [
{
"errors_count": 1,
"read_bytes": 262144,
"read_requests": 112,
"write_bytes": 5778432,
"write_requests": 488
}
],
"driver": "libvirt",
"hypervisor": "kvm",
"hypervisor_os": "ubuntu",
"memory_details": {
"maximum": 524288,
"used": 0
},
"nic_details": [
{
"mac_address": "01:23:45:67:89:ab",
"rx_drop": 200,
"rx_errors": 100,
"rx_octets": 2070139,
"rx_packets": 26701,
"rx_rate": 300,
"tx_drop": 500,
"tx_errors": 400,
"tx_octets": 140208,
"tx_packets": 662,
"tx_rate": 600
}
],
"num_cpus": 1,
"num_disks": 1,
"num_nics": 1,
"state": "running",
"uptime": 46664
}
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_server_diagnostics_post_v2_1_response):
api = SimpleApi(None, None)
assert api.get_server_diagnostics(None) == {
"cpu0_time": 17300000000,
"memory": 524288,
"vda_errors": -1,
"vda_read": 262144,
"vda_read_req": 112,
"vda_write": 5778432,
"vda_write_req": 488,
"vnet1_rx": 2070139,
"vnet1_rx_drop": 0,
"vnet1_rx_errors": 0,
"vnet1_rx_packets": 26701,
"vnet1_tx": 140208,
"vnet1_tx_drop": 0,
"vnet1_tx_errors": 0,
"vnet1_tx_packets": 662
}
def get_project_limits_response(url, headers, params=None, timeout=None):
return json.loads("""{
"limits": {
"absolute": {
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
"maxServerGroups": 10,
"maxServerGroupMembers": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
"totalFloatingIpsUsed": 0,
"totalServerGroupsUsed": 0
},
"rate": []
}
}""")
def test_get_project_limits(aggregator):
with mock.patch('datadog_checks.openstack_controller.api.SimpleApi._make_request',
side_effect=get_project_limits_response):
api = SimpleApi(None, None)
assert api.get_project_limits(None) == common.EXAMPLE_GET_PROJECT_LIMITS_RETURN_VALUE
| true
| true
|
f716302a4a1b0faf4eeecb2c309b0e786e32105d
| 14,220
|
py
|
Python
|
sample_application/__init__.py
|
cheewoei1997/sentiment-analysis
|
e936824de57a8cd40586a1a19145c6205b6c0843
|
[
"MIT"
] | null | null | null |
sample_application/__init__.py
|
cheewoei1997/sentiment-analysis
|
e936824de57a8cd40586a1a19145c6205b6c0843
|
[
"MIT"
] | null | null | null |
sample_application/__init__.py
|
cheewoei1997/sentiment-analysis
|
e936824de57a8cd40586a1a19145c6205b6c0843
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, flash, request
from flask_bootstrap import Bootstrap
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
import nltk
from nltk.corpus import stopwords
# from nltk.classify import SklearnClassifier
from nltk.classify import NaiveBayesClassifier
from nltk.collocations import BigramCollocationFinder
import sklearn
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import os
from random import shuffle
nltk.download('punkt')
# from analyser import set_data
class SentimentForm(Form):
sentence = TextField('Type your sentence here', validators=[Required()])
classifier = RadioField('This is a radio field', choices=[
('lsvc', 'LinearSVC'),
('bernb', 'BernoulliNB'),
('multi', 'Multinomial'),
('logreg', 'Logistic Regression'),
('svc', 'SVC'),
])
submit_button = SubmitField('Submit')
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Bootstrap(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
# form = ExampleForm()
form = SentimentForm()
form.validate_on_submit() # to get error messages to the browser
# flash('critical message', 'critical')
# flash('error message', 'error')
# flash('warning message', 'warning')
# flash('info message', 'info')
# flash('debug message', 'debug')
# flash('different message', 'different')
# flash('uncategorized message')
sentences = ['the show is not only great, but also fantastic and a masterpiece',
'today is definitely a day for walking the dog',]
if form.validate_on_submit():
if request.method == 'POST':
# switch out request.form with the 20 sentences
result = request.form
input_sentence = set_data(result)
train_data = get_dataset(input_sentence)
choice = result['classifier']
choice_dict = {
'bernb': 'Bernoulli Naive Bayes',
'multi': 'Multinomial Naive Bayes',
'logreg': 'Logistic Regression',
'svc': 'Support Vector Classifier',
'lsvc': 'Linear Support Vector Classifier',
}
if choice == 'bernb':
stats = set_classifier(BernoulliNB(), train_data, input_sentence)
elif choice == 'multi':
stats = set_classifier(MultinomialNB(), train_data, input_sentence)
elif choice == 'logreg':
stats = set_classifier(LogisticRegression(), train_data, input_sentence)
elif choice == 'svc':
stats = set_classifier(SVC(), train_data, input_sentence)
elif choice == 'lsvc':
stats = set_classifier(LinearSVC(), train_data, input_sentence)
else:
print('Something went terribly wrong')
stats_dict = {
'posPercent': stats[0],
'negPercent': stats[1],
'pos': stats[2],
'neg': stats[3],
'sentence': result['sentence'],
'train_data': train_data,
'choice': choice_dict[str(choice)],
}
return render_template('result.html', context=stats_dict)
else:
print('ELSEEEE')
print(request.form)
# print(form.csrf_token)
return render_template('error.html', form=form)
return render_template('index.html', form=form)
# @app.route('/result/')
# def result():
# print('Hola this is result')
# return render_template('result.html')
return app
def word_feats(words):
return dict([(words, True)])
def set_data(requested):
sentence = requested['sentence']
target = sentence.lower()
target = nltk.word_tokenize(target)
return target
def get_dataset(target):
# Loads the positive and negative words
pos_words = open(os.path.join('datasets', 'positive-words.txt'), 'r').read()
neg_words = open(os.path.join('datasets', 'negative-words.txt'), 'r').read()
# Tokenize the words
pos_words = nltk.word_tokenize(pos_words)
neg_words = nltk.word_tokenize(neg_words)
shuffle(pos_words)
shuffle(neg_words)
neg_words = neg_words[:2139]
# Keep both positive and negative into posneg
posneg = pos_words + neg_words
neu_words = []
[neu_words.append(neu) for neu in target if neu not in posneg]
positive_features = [(word_feats(pos), 'pos') for pos in pos_words]
negative_features = [(word_feats(neg), 'neg') for neg in neg_words]
neutral_features = [(word_feats(neu.lower()), 'neu') for neu in neu_words]
print('Positive feats:', len(positive_features))
print('Negative feats:', len(negative_features))
print('Neutral feats:', neutral_features)
train_set = positive_features + negative_features + neutral_features
return train_set
def set_classifier(chosen_classifier, train_set, sentence):
classifier = SklearnClassifier(chosen_classifier)
classifier.train(train_set)
neg = 0
pos = 0
print('set_classifier', sentence)
for word in sentence:
classResult = classifier.classify(word_feats(word))
print(word_feats(word))
print(classResult)
if classResult == 'neg':
neg = neg + 1
if classResult == 'pos':
pos = pos + 1
posPercent = str(float(pos)/len(sentence))
negPercent = str(float(neg)/len(sentence))
# print ('Accuracy:', nltk.classify.util.accuracy(classifier, sentence))
# classifier.show_most_informative_features()
# print('Score:', score)
print('Positive: ' + posPercent)
print('Negative: ' + negPercent)
print('Pos', pos)
print('Neg', neg)
return posPercent, negPercent, pos, neg
if __name__ == '__main__':
create_app().run(debug=True)
# ==============================================================================
# from flask import Flask, render_template, flash, request
# from flask_bootstrap import Bootstrap
# from flask_appconfig import AppConfig
# from flask_wtf import Form, RecaptchaField
# from flask_wtf.file import FileField
# from wtforms import TextField, HiddenField, ValidationError, RadioField,\
# BooleanField, SubmitField, IntegerField, FormField, validators
# from wtforms.validators import Required
# import nltk
# from nltk.corpus import stopwords
# # from nltk.classify import SklearnClassifier
# from nltk.classify import NaiveBayesClassifier
# from nltk.collocations import BigramCollocationFinder
# import sklearn
# from nltk.classify.scikitlearn import SklearnClassifier
# from sklearn.svm import SVC, LinearSVC, NuSVC
# from sklearn.naive_bayes import MultinomialNB, BernoulliNB
# from sklearn.linear_model import LogisticRegression
# from sklearn.metrics import accuracy_score
# import os
# from random import shuffle
# nltk.download('punkt')
# # from analyser import set_data
# class SentimentForm(Form):
# sentence = TextField('Type your sentence here', validators=[Required()])
# classifier = RadioField('This is a radio field', choices=[
# ('lsvc', 'LinearSVC'),
# ('bernb', 'BernoulliNB'),
# ('multi', 'Multinomial'),
# ('logreg', 'Logistic Regression'),
# ('svc', 'SVC'),
# ])
# submit_button = SubmitField('Submit')
# def create_app(configfile=None):
# app = Flask(__name__)
# AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# # highly recommend =)
# # https://github.com/mbr/flask-appconfig
# Bootstrap(app)
# # in a real app, these should be configured through Flask-Appconfig
# app.config['SECRET_KEY'] = 'devkey'
# app.config['RECAPTCHA_PUBLIC_KEY'] = \
# '6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
# @app.route('/', methods=('GET', 'POST'))
# def index():
# # form = ExampleForm()
# form = SentimentForm()
# form.validate_on_submit() # to get error messages to the browser
# # flash('critical message', 'critical')
# # flash('error message', 'error')
# # flash('warning message', 'warning')
# # flash('info message', 'info')
# # flash('debug message', 'debug')
# # flash('different message', 'different')
# # flash('uncategorized message')
# if form.validate_on_submit():
# if request.method == 'POST':
# # switch out request.form with the 20 sentences
# result = request.form
# input_sentence = set_data(result)
# train_data = get_dataset(input_sentence)
# choice = result['classifier']
# choice_dict = {
# 'bernb': 'Bernoulli Naive Bayes',
# 'multi': 'Multinomial Naive Bayes',
# 'logreg': 'Logistic Regression',
# 'svc': 'Support Vector Classifier',
# 'lsvc': 'Linear Support Vector Classifier',
# }
# if choice == 'bernb':
# stats = set_classifier(BernoulliNB(), train_data, input_sentence)
# elif choice == 'multi':
# stats = set_classifier(MultinomialNB(), train_data, input_sentence)
# elif choice == 'logreg':
# stats = set_classifier(LogisticRegression(), train_data, input_sentence)
# elif choice == 'svc':
# stats = set_classifier(SVC(), train_data, input_sentence)
# elif choice == 'lsvc':
# stats = set_classifier(LinearSVC(), train_data, input_sentence)
# else:
# print('Something went terribly wrong')
# stats_dict = {
# 'posPercent': stats[0],
# 'negPercent': stats[1],
# 'pos': stats[2],
# 'neg': stats[3],
# 'sentence': result['sentence'],
# 'train_data': train_data,
# 'choice': choice_dict[str(choice)],
# }
# return render_template('result.html', context=stats_dict)
# else:
# print('ELSEEEE')
# print(request.form)
# # print(form.csrf_token)
# return render_template('error.html', form=form)
# return render_template('index.html', form=form)
# # @app.route('/result/')
# # def result():
# # print('Hola this is result')
# # return render_template('result.html')
# return app
# def word_feats(words):
# return dict([(words, True)])
# def set_data(requested):
# sentence = requested['sentence']
# target = sentence.lower()
# target = nltk.word_tokenize(target)
# return target
# def get_dataset(target):
# # Loads the positive and negative words
# pos_words = open(os.path.join('datasets', 'positive-words.txt'), 'r').read()
# neg_words = open(os.path.join('datasets', 'negative-words.txt'), 'r').read()
# # Tokenize the words
# pos_words = nltk.word_tokenize(pos_words)
# neg_words = nltk.word_tokenize(neg_words)
# shuffle(pos_words)
# shuffle(neg_words)
# neg_words = neg_words[:2139]
# # Keep both positive and negative into posneg
# posneg = pos_words + neg_words
# neu_words = []
# [neu_words.append(neu) for neu in target if neu not in posneg]
# positive_features = [(word_feats(pos), 'pos') for pos in pos_words]
# negative_features = [(word_feats(neg), 'neg') for neg in neg_words]
# neutral_features = [(word_feats(neu.lower()), 'neu') for neu in neu_words]
# print('Positive feats:', len(positive_features))
# print('Negative feats:', len(negative_features))
# print('Neutral feats:', neutral_features)
# train_set = positive_features + negative_features + neutral_features
# return train_set
# def set_classifier(chosen_classifier, train_set, sentence):
# classifier = SklearnClassifier(chosen_classifier)
# classifier.train(train_set)
# neg = 0
# pos = 0
# print('set_classifier', sentence)
# for word in sentence:
# classResult = classifier.classify(word_feats(word))
# print(word_feats(word))
# print(classResult)
# if classResult == 'neg':
# neg = neg + 1
# if classResult == 'pos':
# pos = pos + 1
# posPercent = str(float(pos)/len(sentence))
# negPercent = str(float(neg)/len(sentence))
# # print ('Accuracy:', nltk.classify.util.accuracy(classifier, sentence))
# # classifier.show_most_informative_features()
# # print('Score:', score)
# print('Positive: ' + posPercent)
# print('Negative: ' + negPercent)
# print('Pos', pos)
# print('Neg', neg)
# return posPercent, negPercent, pos, neg
# if __name__ == '__main__':
# create_app().run(debug=True)
| 34.019139
| 94
| 0.600563
|
from flask import Flask, render_template, flash, request
from flask_bootstrap import Bootstrap
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
import nltk
from nltk.corpus import stopwords
from nltk.classify import NaiveBayesClassifier
from nltk.collocations import BigramCollocationFinder
import sklearn
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import os
from random import shuffle
nltk.download('punkt')
class SentimentForm(Form):
sentence = TextField('Type your sentence here', validators=[Required()])
classifier = RadioField('This is a radio field', choices=[
('lsvc', 'LinearSVC'),
('bernb', 'BernoulliNB'),
('multi', 'Multinomial'),
('logreg', 'Logistic Regression'),
('svc', 'SVC'),
])
submit_button = SubmitField('Submit')
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile)
Bootstrap(app)
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
form = SentimentForm()
form.validate_on_submit()
sentences = ['the show is not only great, but also fantastic and a masterpiece',
'today is definitely a day for walking the dog',]
if form.validate_on_submit():
if request.method == 'POST':
result = request.form
input_sentence = set_data(result)
train_data = get_dataset(input_sentence)
choice = result['classifier']
choice_dict = {
'bernb': 'Bernoulli Naive Bayes',
'multi': 'Multinomial Naive Bayes',
'logreg': 'Logistic Regression',
'svc': 'Support Vector Classifier',
'lsvc': 'Linear Support Vector Classifier',
}
if choice == 'bernb':
stats = set_classifier(BernoulliNB(), train_data, input_sentence)
elif choice == 'multi':
stats = set_classifier(MultinomialNB(), train_data, input_sentence)
elif choice == 'logreg':
stats = set_classifier(LogisticRegression(), train_data, input_sentence)
elif choice == 'svc':
stats = set_classifier(SVC(), train_data, input_sentence)
elif choice == 'lsvc':
stats = set_classifier(LinearSVC(), train_data, input_sentence)
else:
print('Something went terribly wrong')
stats_dict = {
'posPercent': stats[0],
'negPercent': stats[1],
'pos': stats[2],
'neg': stats[3],
'sentence': result['sentence'],
'train_data': train_data,
'choice': choice_dict[str(choice)],
}
return render_template('result.html', context=stats_dict)
else:
print('ELSEEEE')
print(request.form)
return render_template('error.html', form=form)
return render_template('index.html', form=form)
return app
def word_feats(words):
return dict([(words, True)])
def set_data(requested):
sentence = requested['sentence']
target = sentence.lower()
target = nltk.word_tokenize(target)
return target
def get_dataset(target):
pos_words = open(os.path.join('datasets', 'positive-words.txt'), 'r').read()
neg_words = open(os.path.join('datasets', 'negative-words.txt'), 'r').read()
pos_words = nltk.word_tokenize(pos_words)
neg_words = nltk.word_tokenize(neg_words)
shuffle(pos_words)
shuffle(neg_words)
neg_words = neg_words[:2139]
posneg = pos_words + neg_words
neu_words = []
[neu_words.append(neu) for neu in target if neu not in posneg]
positive_features = [(word_feats(pos), 'pos') for pos in pos_words]
negative_features = [(word_feats(neg), 'neg') for neg in neg_words]
neutral_features = [(word_feats(neu.lower()), 'neu') for neu in neu_words]
print('Positive feats:', len(positive_features))
print('Negative feats:', len(negative_features))
print('Neutral feats:', neutral_features)
train_set = positive_features + negative_features + neutral_features
return train_set
def set_classifier(chosen_classifier, train_set, sentence):
classifier = SklearnClassifier(chosen_classifier)
classifier.train(train_set)
neg = 0
pos = 0
print('set_classifier', sentence)
for word in sentence:
classResult = classifier.classify(word_feats(word))
print(word_feats(word))
print(classResult)
if classResult == 'neg':
neg = neg + 1
if classResult == 'pos':
pos = pos + 1
posPercent = str(float(pos)/len(sentence))
negPercent = str(float(neg)/len(sentence))
print('Positive: ' + posPercent)
print('Negative: ' + negPercent)
print('Pos', pos)
print('Neg', neg)
return posPercent, negPercent, pos, neg
if __name__ == '__main__':
create_app().run(debug=True)
| true
| true
|
f71630796ef66bcc8f7e0ca2c0d9cde2f3b48935
| 20,454
|
py
|
Python
|
zerver/lib/test_runner.py
|
N-Shar-ma/zulip
|
95303a9929424b55a1f7c7cce9313c4619a9533b
|
[
"Apache-2.0"
] | 4
|
2021-09-16T16:46:55.000Z
|
2022-02-06T13:00:21.000Z
|
zerver/lib/test_runner.py
|
jai2201/zulip
|
95303a9929424b55a1f7c7cce9313c4619a9533b
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/test_runner.py
|
jai2201/zulip
|
95303a9929424b55a1f7c7cce9313c4619a9533b
|
[
"Apache-2.0"
] | null | null | null |
import multiprocessing
import os
import random
import shutil
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union, cast
from unittest import TestLoader, TestSuite, mock, runner
from unittest.result import TestResult
from django.conf import settings
from django.db import connections
from django.test import TestCase
from django.test import runner as django_runner
from django.test.runner import DiscoverRunner
from django.test.signals import template_rendered
from scripts.lib.zulip_tools import (
TEMPLATE_DATABASE_DIR,
get_dev_uuid_var_path,
get_or_create_dev_uuid_var_path,
)
from zerver.lib import test_helpers
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.test_helpers import append_instrumentation_data, write_instrumentation_reports
# We need to pick an ID for this test-backend invocation, and store it
# in this global so it can be used in init_worker; this is used to
# ensure the database IDs we select are unique for each `test-backend`
# run. This probably should use a locking mechanism rather than the
# below hack, which fails 1/10000000 of the time.
random_id_range_start = str(random.randint(1, 10000000))
def get_database_id(worker_id: Optional[int] = None) -> str:
if worker_id:
return f"{random_id_range_start}_{worker_id}"
return random_id_range_start
# The root directory for this run of the test suite.
TEST_RUN_DIR = get_or_create_dev_uuid_var_path(
os.path.join("test-backend", f"run_{get_database_id()}")
)
_worker_id = 0 # Used to identify the worker process.
class TextTestResult(runner.TextTestResult):
"""
This class has unpythonic function names because base class follows
this style.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.failed_tests: List[str] = []
def addInfo(self, test: TestCase, msg: str) -> None:
self.stream.write(msg)
self.stream.flush()
def addInstrumentation(self, test: TestCase, data: Dict[str, Any]) -> None:
append_instrumentation_data(data)
def startTest(self, test: TestCase) -> None:
TestResult.startTest(self, test)
self.stream.writeln(f"Running {test.id()}") # type: ignore[attr-defined] # https://github.com/python/typeshed/issues/3139
self.stream.flush()
def addSuccess(self, *args: Any, **kwargs: Any) -> None:
TestResult.addSuccess(self, *args, **kwargs)
def addError(self, *args: Any, **kwargs: Any) -> None:
TestResult.addError(self, *args, **kwargs)
test_name = args[0].id()
self.failed_tests.append(test_name)
def addFailure(self, *args: Any, **kwargs: Any) -> None:
TestResult.addFailure(self, *args, **kwargs)
test_name = args[0].id()
self.failed_tests.append(test_name)
def addSkip(self, test: TestCase, reason: str) -> None:
TestResult.addSkip(self, test, reason)
self.stream.writeln( # type: ignore[attr-defined] # https://github.com/python/typeshed/issues/3139
f"** Skipping {test.id()}: {reason}"
)
self.stream.flush()
class RemoteTestResult(django_runner.RemoteTestResult):
"""
The class follows the unpythonic style of function names of the
base class.
"""
def addInfo(self, test: TestCase, msg: str) -> None:
self.events.append(("addInfo", self.test_index, msg))
def addInstrumentation(self, test: TestCase, data: Dict[str, Any]) -> None:
# Some elements of data['info'] cannot be serialized.
if "info" in data:
del data["info"]
self.events.append(("addInstrumentation", self.test_index, data))
def process_instrumented_calls(func: Callable[[Dict[str, Any]], None]) -> None:
for call in test_helpers.INSTRUMENTED_CALLS:
func(call)
SerializedSubsuite = Tuple[Type[TestSuite], List[str]]
SubsuiteArgs = Tuple[Type["RemoteTestRunner"], int, SerializedSubsuite, bool]
def run_subsuite(args: SubsuiteArgs) -> Tuple[int, Any]:
# Reset the accumulated INSTRUMENTED_CALLS before running this subsuite.
test_helpers.INSTRUMENTED_CALLS = []
# The first argument is the test runner class but we don't need it
# because we run our own version of the runner class.
_, subsuite_index, subsuite, failfast = args
runner = RemoteTestRunner(failfast=failfast)
result = runner.run(deserialize_suite(subsuite))
# Now we send instrumentation related events. This data will be
# appended to the data structure in the main thread. For Mypy,
# type of Partial is different from Callable. All the methods of
# TestResult are passed TestCase as the first argument but
# addInstrumentation does not need it.
process_instrumented_calls(partial(result.addInstrumentation, None))
return subsuite_index, result.events
# Monkey-patch django.test.runner to allow using multiprocessing
# inside tests without a “daemonic processes are not allowed to have
# children” error.
class NoDaemonContext(multiprocessing.context.ForkContext):
class Process(multiprocessing.context.ForkProcess):
daemon = cast(bool, property(lambda self: False, lambda self, value: None))
django_runner.multiprocessing = NoDaemonContext()
def destroy_test_databases(worker_id: Optional[int] = None) -> None:
for alias in connections:
connection = connections[alias]
def monkey_patched_destroy_test_db(test_database_name: str, verbosity: Any) -> None:
"""
We need to monkey-patch connection.creation._destroy_test_db to
use the IF EXISTS parameter - we don't have a guarantee that the
database we're cleaning up actually exists and since Django 3.1 the original implementation
throws an ugly `RuntimeError: generator didn't stop after throw()` exception and triggers
a confusing warnings.warn inside the postgresql backend implementation in _nodb_cursor()
if the database doesn't exist.
https://code.djangoproject.com/ticket/32376
"""
with connection.creation._nodb_cursor() as cursor:
quoted_name = connection.creation.connection.ops.quote_name(test_database_name)
query = f"DROP DATABASE IF EXISTS {quoted_name}"
cursor.execute(query)
with mock.patch.object(
connection.creation, "_destroy_test_db", monkey_patched_destroy_test_db
):
# In the parallel mode, the test databases are created
# through the N=self.parallel child processes, and in the
# parent process (which calls `destroy_test_databases`),
# `settings_dict` remains unchanged, with the original
# template database name (zulip_test_template). So to
# delete the database zulip_test_template_<number>, we
# need to pass `number` to `destroy_test_db`.
#
# When we run in serial mode (self.parallel=1), we don't
# fork and thus both creation and destruction occur in the
# same process, which means `settings_dict` has been
# updated to have `zulip_test_template_<number>` as its
# database name by the creation code. As a result, to
# delete that database, we need to not pass a number
# argument to destroy_test_db.
if worker_id is not None:
"""Modified from the Django original to"""
database_id = get_database_id(worker_id)
connection.creation.destroy_test_db(suffix=database_id)
else:
connection.creation.destroy_test_db()
def create_test_databases(worker_id: int) -> None:
database_id = get_database_id(worker_id)
for alias in connections:
connection = connections[alias]
connection.creation.clone_test_db(
suffix=database_id,
keepdb=True,
)
settings_dict = connection.creation.get_test_db_clone_settings(database_id)
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
connection.settings_dict.update(settings_dict)
connection.close()
def init_worker(counter: "multiprocessing.sharedctypes.Synchronized[int]") -> None:
"""
This function runs only under parallel mode. It initializes the
individual processes which are also called workers.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
"""
You can now use _worker_id.
"""
# Clear the cache
from zerver.lib.cache import get_cache_backend
cache = get_cache_backend(None)
cache.clear()
# Close all connections
connections.close_all()
destroy_test_databases(_worker_id)
create_test_databases(_worker_id)
initialize_worker_path(_worker_id)
# We manually update the upload directory path in the URL regex.
from zproject.dev_urls import avatars_url
assert settings.LOCAL_UPLOADS_DIR is not None
assert avatars_url.default_args is not None
new_root = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")
avatars_url.default_args["document_root"] = new_root
class ParallelTestSuite(django_runner.ParallelTestSuite):
run_subsuite = run_subsuite
init_worker = init_worker
def __init__(self, suite: TestSuite, processes: int, failfast: bool) -> None:
super().__init__(suite, processes, failfast)
# We can't specify a consistent type for self.subsuites, since
# the whole idea here is to monkey-patch that so we can use
# most of django_runner.ParallelTestSuite with our own suite
# definitions.
assert not isinstance(self.subsuites, SubSuiteList)
self.subsuites: Union[SubSuiteList, List[TestSuite]] = SubSuiteList(self.subsuites)
def check_import_error(test_name: str) -> None:
try:
# Directly using __import__ is not recommended, but here it gives
# clearer traceback as compared to importlib.import_module.
__import__(test_name)
except ImportError as exc:
raise exc from exc # Disable exception chaining in Python 3.
def initialize_worker_path(worker_id: int) -> None:
# Allow each test worker process to write to a unique directory
# within `TEST_RUN_DIR`.
worker_path = os.path.join(TEST_RUN_DIR, f"worker_{_worker_id}")
os.makedirs(worker_path, exist_ok=True)
settings.TEST_WORKER_DIR = worker_path
# Every process should upload to a separate directory so that
# race conditions can be avoided.
settings.LOCAL_UPLOADS_DIR = get_or_create_dev_uuid_var_path(
os.path.join(
"test-backend",
os.path.basename(TEST_RUN_DIR),
os.path.basename(worker_path),
"test_uploads",
)
)
settings.SENDFILE_ROOT = os.path.join(settings.LOCAL_UPLOADS_DIR, "files")
class Runner(DiscoverRunner):
parallel_test_suite = ParallelTestSuite
def __init__(self, *args: Any, **kwargs: Any) -> None:
DiscoverRunner.__init__(self, *args, **kwargs)
# `templates_rendered` holds templates which were rendered
# in proper logical tests.
self.templates_rendered: Set[str] = set()
# `shallow_tested_templates` holds templates which were rendered
# in `zerver.tests.test_templates`.
self.shallow_tested_templates: Set[str] = set()
template_rendered.connect(self.on_template_rendered)
def get_resultclass(self) -> Optional[Type[TextTestResult]]:
return TextTestResult
def on_template_rendered(self, sender: Any, context: Dict[str, Any], **kwargs: Any) -> None:
if hasattr(sender, "template"):
template_name = sender.template.name
if template_name not in self.templates_rendered:
if context.get("shallow_tested") and template_name not in self.templates_rendered:
self.shallow_tested_templates.add(template_name)
else:
self.templates_rendered.add(template_name)
self.shallow_tested_templates.discard(template_name)
def get_shallow_tested_templates(self) -> Set[str]:
return self.shallow_tested_templates
def setup_test_environment(self, *args: Any, **kwargs: Any) -> Any:
settings.DATABASES["default"]["NAME"] = settings.BACKEND_DATABASE_TEMPLATE
# We create/destroy the test databases in run_tests to avoid
# duplicate work when running in parallel mode.
# Write the template database ids to a file that we can
# reference for cleaning them up if they leak.
filepath = os.path.join(get_dev_uuid_var_path(), TEMPLATE_DATABASE_DIR, get_database_id())
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "w") as f:
if self.parallel > 1:
for index in range(self.parallel):
f.write(get_database_id(index + 1) + "\n")
else:
f.write(get_database_id() + "\n")
# Check if we are in serial mode to avoid unnecessarily making a directory.
# We add "worker_0" in the path for consistency with parallel mode.
if self.parallel == 1:
initialize_worker_path(0)
return super().setup_test_environment(*args, **kwargs)
def teardown_test_environment(self, *args: Any, **kwargs: Any) -> Any:
# The test environment setup clones the zulip_test_template
# database, creating databases with names:
# 'zulip_test_template_N_<worker_id>',
# where N is `random_id_range_start`, and `worker_id` is a
# value between <1, self.parallel>.
#
# We need to delete those databases to avoid leaking disk
# (Django is smart and calls this on SIGINT too).
if self.parallel > 1:
for index in range(self.parallel):
destroy_test_databases(index + 1)
else:
destroy_test_databases()
# Clean up our record of which databases this process created.
filepath = os.path.join(get_dev_uuid_var_path(), TEMPLATE_DATABASE_DIR, get_database_id())
os.remove(filepath)
# Clean up our test runs root directory.
try:
shutil.rmtree(TEST_RUN_DIR)
except OSError:
print("Unable to clean up the test run's directory.")
return super().teardown_test_environment(*args, **kwargs)
def test_imports(
self, test_labels: List[str], suite: Union[TestSuite, ParallelTestSuite]
) -> None:
prefix_old = "unittest.loader.ModuleImportFailure." # Python <= 3.4
prefix_new = "unittest.loader._FailedTest." # Python > 3.4
error_prefixes = [prefix_old, prefix_new]
for test_name in get_test_names(suite):
for prefix in error_prefixes:
if test_name.startswith(prefix):
test_name = test_name[len(prefix) :]
for label in test_labels:
# This code block is for Python 3.5 when test label is
# directly provided, for example:
# ./tools/test-backend zerver.tests.test_alert_words.py
#
# In this case, the test name is of this form:
# 'unittest.loader._FailedTest.test_alert_words'
#
# Whereas check_import_error requires test names of
# this form:
# 'unittest.loader._FailedTest.zerver.tests.test_alert_words'.
if test_name in label:
test_name = label
break
check_import_error(test_name)
def run_tests(
self,
test_labels: List[str],
extra_tests: Optional[List[TestCase]] = None,
full_suite: bool = False,
include_webhooks: bool = False,
**kwargs: Any,
) -> Tuple[bool, List[str]]:
self.setup_test_environment()
try:
suite = self.build_suite(test_labels, extra_tests)
except AttributeError:
# We are likely to get here only when running tests in serial
# mode on Python 3.4 or lower.
# test_labels are always normalized to include the correct prefix.
# If we run the command with ./tools/test-backend test_alert_words,
# test_labels will be equal to ['zerver.tests.test_alert_words'].
for test_label in test_labels:
check_import_error(test_label)
# I think we won't reach this line under normal circumstances, but
# for some unforeseen scenario in which the AttributeError was not
# caused by an import error, let's re-raise the exception for
# debugging purposes.
raise
self.test_imports(test_labels, suite)
if self.parallel == 1:
# We are running in serial mode so create the databases here.
# For parallel mode, the databases are created in init_worker.
# We don't want to create and destroy DB in setup_test_environment
# because it will be called for both serial and parallel modes.
# However, at this point we know in which mode we would be running
# since that decision has already been made in build_suite().
#
# We pass a _worker_id, which in this code path is always 0
destroy_test_databases(_worker_id)
create_test_databases(_worker_id)
# We have to do the next line to avoid flaky scenarios where we
# run a single test and getting an SA connection causes data from
# a Django connection to be rolled back mid-test.
with get_sqlalchemy_connection():
result = self.run_suite(suite)
self.teardown_test_environment()
failed = self.suite_result(suite, result)
if not failed:
write_instrumentation_reports(full_suite=full_suite, include_webhooks=include_webhooks)
return failed, result.failed_tests
def get_test_names(suite: Union[TestSuite, ParallelTestSuite]) -> List[str]:
if isinstance(suite, ParallelTestSuite):
# suite is ParallelTestSuite. It will have a subsuites parameter of
# type SubSuiteList. Each element of a SubsuiteList is a tuple whose
# first element is the type of TestSuite and the second element is a
# list of test names in that test suite. See serialize_suite() for the
# implementation details.
assert isinstance(suite.subsuites, SubSuiteList)
return [name for subsuite in suite.subsuites for name in subsuite[1]]
else:
return [t.id() for t in get_tests_from_suite(suite)]
def get_tests_from_suite(suite: TestSuite) -> TestCase:
for test in suite:
if isinstance(test, TestSuite):
yield from get_tests_from_suite(test)
else:
yield test
def serialize_suite(suite: TestSuite) -> Tuple[Type[TestSuite], List[str]]:
return type(suite), get_test_names(suite)
def deserialize_suite(args: Tuple[Type[TestSuite], List[str]]) -> TestSuite:
suite_class, test_names = args
suite = suite_class()
tests = TestLoader().loadTestsFromNames(test_names)
for test in get_tests_from_suite(tests):
suite.addTest(test)
return suite
class RemoteTestRunner(django_runner.RemoteTestRunner):
resultclass = RemoteTestResult
class SubSuiteList(List[Tuple[Type[TestSuite], List[str]]]):
"""
This class allows us to avoid changing the main logic of
ParallelTestSuite and still make it serializable.
"""
def __init__(self, suites: List[TestSuite]) -> None:
serialized_suites = [serialize_suite(s) for s in suites]
super().__init__(serialized_suites)
def __getitem__(self, index: Any) -> Any:
suite = super().__getitem__(index)
return deserialize_suite(suite)
| 41.321212
| 130
| 0.669453
|
import multiprocessing
import os
import random
import shutil
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union, cast
from unittest import TestLoader, TestSuite, mock, runner
from unittest.result import TestResult
from django.conf import settings
from django.db import connections
from django.test import TestCase
from django.test import runner as django_runner
from django.test.runner import DiscoverRunner
from django.test.signals import template_rendered
from scripts.lib.zulip_tools import (
TEMPLATE_DATABASE_DIR,
get_dev_uuid_var_path,
get_or_create_dev_uuid_var_path,
)
from zerver.lib import test_helpers
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.test_helpers import append_instrumentation_data, write_instrumentation_reports
random_id_range_start = str(random.randint(1, 10000000))
def get_database_id(worker_id: Optional[int] = None) -> str:
if worker_id:
return f"{random_id_range_start}_{worker_id}"
return random_id_range_start
TEST_RUN_DIR = get_or_create_dev_uuid_var_path(
os.path.join("test-backend", f"run_{get_database_id()}")
)
_worker_id = 0
class TextTestResult(runner.TextTestResult):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.failed_tests: List[str] = []
def addInfo(self, test: TestCase, msg: str) -> None:
self.stream.write(msg)
self.stream.flush()
def addInstrumentation(self, test: TestCase, data: Dict[str, Any]) -> None:
append_instrumentation_data(data)
def startTest(self, test: TestCase) -> None:
TestResult.startTest(self, test)
self.stream.writeln(f"Running {test.id()}") (self, *args: Any, **kwargs: Any) -> None:
TestResult.addSuccess(self, *args, **kwargs)
def addError(self, *args: Any, **kwargs: Any) -> None:
TestResult.addError(self, *args, **kwargs)
test_name = args[0].id()
self.failed_tests.append(test_name)
def addFailure(self, *args: Any, **kwargs: Any) -> None:
TestResult.addFailure(self, *args, **kwargs)
test_name = args[0].id()
self.failed_tests.append(test_name)
def addSkip(self, test: TestCase, reason: str) -> None:
TestResult.addSkip(self, test, reason)
self.stream.writeln( "
)
self.stream.flush()
class RemoteTestResult(django_runner.RemoteTestResult):
def addInfo(self, test: TestCase, msg: str) -> None:
self.events.append(("addInfo", self.test_index, msg))
def addInstrumentation(self, test: TestCase, data: Dict[str, Any]) -> None:
if "info" in data:
del data["info"]
self.events.append(("addInstrumentation", self.test_index, data))
def process_instrumented_calls(func: Callable[[Dict[str, Any]], None]) -> None:
for call in test_helpers.INSTRUMENTED_CALLS:
func(call)
SerializedSubsuite = Tuple[Type[TestSuite], List[str]]
SubsuiteArgs = Tuple[Type["RemoteTestRunner"], int, SerializedSubsuite, bool]
def run_subsuite(args: SubsuiteArgs) -> Tuple[int, Any]:
test_helpers.INSTRUMENTED_CALLS = []
# because we run our own version of the runner class.
_, subsuite_index, subsuite, failfast = args
runner = RemoteTestRunner(failfast=failfast)
result = runner.run(deserialize_suite(subsuite))
# Now we send instrumentation related events. This data will be
# appended to the data structure in the main thread. For Mypy,
# type of Partial is different from Callable. All the methods of
# TestResult are passed TestCase as the first argument but
# addInstrumentation does not need it.
process_instrumented_calls(partial(result.addInstrumentation, None))
return subsuite_index, result.events
# Monkey-patch django.test.runner to allow using multiprocessing
# inside tests without a “daemonic processes are not allowed to have
# children” error.
class NoDaemonContext(multiprocessing.context.ForkContext):
class Process(multiprocessing.context.ForkProcess):
daemon = cast(bool, property(lambda self: False, lambda self, value: None))
django_runner.multiprocessing = NoDaemonContext()
def destroy_test_databases(worker_id: Optional[int] = None) -> None:
for alias in connections:
connection = connections[alias]
def monkey_patched_destroy_test_db(test_database_name: str, verbosity: Any) -> None:
with connection.creation._nodb_cursor() as cursor:
quoted_name = connection.creation.connection.ops.quote_name(test_database_name)
query = f"DROP DATABASE IF EXISTS {quoted_name}"
cursor.execute(query)
with mock.patch.object(
connection.creation, "_destroy_test_db", monkey_patched_destroy_test_db
):
# In the parallel mode, the test databases are created
# through the N=self.parallel child processes, and in the
# parent process (which calls `destroy_test_databases`),
# `settings_dict` remains unchanged, with the original
# template database name (zulip_test_template). So to
# delete the database zulip_test_template_<number>, we
# need to pass `number` to `destroy_test_db`.
#
# When we run in serial mode (self.parallel=1), we don't
if worker_id is not None:
database_id = get_database_id(worker_id)
connection.creation.destroy_test_db(suffix=database_id)
else:
connection.creation.destroy_test_db()
def create_test_databases(worker_id: int) -> None:
database_id = get_database_id(worker_id)
for alias in connections:
connection = connections[alias]
connection.creation.clone_test_db(
suffix=database_id,
keepdb=True,
)
settings_dict = connection.creation.get_test_db_clone_settings(database_id)
connection.settings_dict.update(settings_dict)
connection.close()
def init_worker(counter: "multiprocessing.sharedctypes.Synchronized[int]") -> None:
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
from zerver.lib.cache import get_cache_backend
cache = get_cache_backend(None)
cache.clear()
connections.close_all()
destroy_test_databases(_worker_id)
create_test_databases(_worker_id)
initialize_worker_path(_worker_id)
from zproject.dev_urls import avatars_url
assert settings.LOCAL_UPLOADS_DIR is not None
assert avatars_url.default_args is not None
new_root = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")
avatars_url.default_args["document_root"] = new_root
class ParallelTestSuite(django_runner.ParallelTestSuite):
run_subsuite = run_subsuite
init_worker = init_worker
def __init__(self, suite: TestSuite, processes: int, failfast: bool) -> None:
super().__init__(suite, processes, failfast)
# the whole idea here is to monkey-patch that so we can use
# most of django_runner.ParallelTestSuite with our own suite
# definitions.
assert not isinstance(self.subsuites, SubSuiteList)
self.subsuites: Union[SubSuiteList, List[TestSuite]] = SubSuiteList(self.subsuites)
def check_import_error(test_name: str) -> None:
try:
# Directly using __import__ is not recommended, but here it gives
# clearer traceback as compared to importlib.import_module.
__import__(test_name)
except ImportError as exc:
raise exc from exc # Disable exception chaining in Python 3.
def initialize_worker_path(worker_id: int) -> None:
# Allow each test worker process to write to a unique directory
# within `TEST_RUN_DIR`.
worker_path = os.path.join(TEST_RUN_DIR, f"worker_{_worker_id}")
os.makedirs(worker_path, exist_ok=True)
settings.TEST_WORKER_DIR = worker_path
# Every process should upload to a separate directory so that
# race conditions can be avoided.
settings.LOCAL_UPLOADS_DIR = get_or_create_dev_uuid_var_path(
os.path.join(
"test-backend",
os.path.basename(TEST_RUN_DIR),
os.path.basename(worker_path),
"test_uploads",
)
)
settings.SENDFILE_ROOT = os.path.join(settings.LOCAL_UPLOADS_DIR, "files")
class Runner(DiscoverRunner):
parallel_test_suite = ParallelTestSuite
def __init__(self, *args: Any, **kwargs: Any) -> None:
DiscoverRunner.__init__(self, *args, **kwargs)
# `templates_rendered` holds templates which were rendered
# in proper logical tests.
self.templates_rendered: Set[str] = set()
# `shallow_tested_templates` holds templates which were rendered
# in `zerver.tests.test_templates`.
self.shallow_tested_templates: Set[str] = set()
template_rendered.connect(self.on_template_rendered)
def get_resultclass(self) -> Optional[Type[TextTestResult]]:
return TextTestResult
def on_template_rendered(self, sender: Any, context: Dict[str, Any], **kwargs: Any) -> None:
if hasattr(sender, "template"):
template_name = sender.template.name
if template_name not in self.templates_rendered:
if context.get("shallow_tested") and template_name not in self.templates_rendered:
self.shallow_tested_templates.add(template_name)
else:
self.templates_rendered.add(template_name)
self.shallow_tested_templates.discard(template_name)
def get_shallow_tested_templates(self) -> Set[str]:
return self.shallow_tested_templates
def setup_test_environment(self, *args: Any, **kwargs: Any) -> Any:
settings.DATABASES["default"]["NAME"] = settings.BACKEND_DATABASE_TEMPLATE
# We create/destroy the test databases in run_tests to avoid
# duplicate work when running in parallel mode.
# Write the template database ids to a file that we can
# reference for cleaning them up if they leak.
filepath = os.path.join(get_dev_uuid_var_path(), TEMPLATE_DATABASE_DIR, get_database_id())
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "w") as f:
if self.parallel > 1:
for index in range(self.parallel):
f.write(get_database_id(index + 1) + "\n")
else:
f.write(get_database_id() + "\n")
# Check if we are in serial mode to avoid unnecessarily making a directory.
# We add "worker_0" in the path for consistency with parallel mode.
if self.parallel == 1:
initialize_worker_path(0)
return super().setup_test_environment(*args, **kwargs)
def teardown_test_environment(self, *args: Any, **kwargs: Any) -> Any:
# The test environment setup clones the zulip_test_template
# database, creating databases with names:
# 'zulip_test_template_N_<worker_id>',
# where N is `random_id_range_start`, and `worker_id` is a
# value between <1, self.parallel>.
#
# We need to delete those databases to avoid leaking disk
# (Django is smart and calls this on SIGINT too).
if self.parallel > 1:
for index in range(self.parallel):
destroy_test_databases(index + 1)
else:
destroy_test_databases()
# Clean up our record of which databases this process created.
filepath = os.path.join(get_dev_uuid_var_path(), TEMPLATE_DATABASE_DIR, get_database_id())
os.remove(filepath)
# Clean up our test runs root directory.
try:
shutil.rmtree(TEST_RUN_DIR)
except OSError:
print("Unable to clean up the test run's directory.")
return super().teardown_test_environment(*args, **kwargs)
def test_imports(
self, test_labels: List[str], suite: Union[TestSuite, ParallelTestSuite]
) -> None:
prefix_old = "unittest.loader.ModuleImportFailure."
prefix_new = "unittest.loader._FailedTest."
error_prefixes = [prefix_old, prefix_new]
for test_name in get_test_names(suite):
for prefix in error_prefixes:
if test_name.startswith(prefix):
test_name = test_name[len(prefix) :]
for label in test_labels:
if test_name in label:
test_name = label
break
check_import_error(test_name)
def run_tests(
self,
test_labels: List[str],
extra_tests: Optional[List[TestCase]] = None,
full_suite: bool = False,
include_webhooks: bool = False,
**kwargs: Any,
) -> Tuple[bool, List[str]]:
self.setup_test_environment()
try:
suite = self.build_suite(test_labels, extra_tests)
except AttributeError:
for test_label in test_labels:
check_import_error(test_label)
# for some unforeseen scenario in which the AttributeError was not
# caused by an import error, let's re-raise the exception for
raise
self.test_imports(test_labels, suite)
if self.parallel == 1:
# because it will be called for both serial and parallel modes.
# However, at this point we know in which mode we would be running
# since that decision has already been made in build_suite().
#
# We pass a _worker_id, which in this code path is always 0
destroy_test_databases(_worker_id)
create_test_databases(_worker_id)
# We have to do the next line to avoid flaky scenarios where we
# run a single test and getting an SA connection causes data from
# a Django connection to be rolled back mid-test.
with get_sqlalchemy_connection():
result = self.run_suite(suite)
self.teardown_test_environment()
failed = self.suite_result(suite, result)
if not failed:
write_instrumentation_reports(full_suite=full_suite, include_webhooks=include_webhooks)
return failed, result.failed_tests
def get_test_names(suite: Union[TestSuite, ParallelTestSuite]) -> List[str]:
if isinstance(suite, ParallelTestSuite):
# suite is ParallelTestSuite. It will have a subsuites parameter of
# type SubSuiteList. Each element of a SubsuiteList is a tuple whose
# first element is the type of TestSuite and the second element is a
# list of test names in that test suite. See serialize_suite() for the
# implementation details.
assert isinstance(suite.subsuites, SubSuiteList)
return [name for subsuite in suite.subsuites for name in subsuite[1]]
else:
return [t.id() for t in get_tests_from_suite(suite)]
def get_tests_from_suite(suite: TestSuite) -> TestCase:
for test in suite:
if isinstance(test, TestSuite):
yield from get_tests_from_suite(test)
else:
yield test
def serialize_suite(suite: TestSuite) -> Tuple[Type[TestSuite], List[str]]:
return type(suite), get_test_names(suite)
def deserialize_suite(args: Tuple[Type[TestSuite], List[str]]) -> TestSuite:
suite_class, test_names = args
suite = suite_class()
tests = TestLoader().loadTestsFromNames(test_names)
for test in get_tests_from_suite(tests):
suite.addTest(test)
return suite
class RemoteTestRunner(django_runner.RemoteTestRunner):
resultclass = RemoteTestResult
class SubSuiteList(List[Tuple[Type[TestSuite], List[str]]]):
def __init__(self, suites: List[TestSuite]) -> None:
serialized_suites = [serialize_suite(s) for s in suites]
super().__init__(serialized_suites)
def __getitem__(self, index: Any) -> Any:
suite = super().__getitem__(index)
return deserialize_suite(suite)
| true
| true
|
f71631e249536be2614c39b0ec54682cd0027c08
| 1,177
|
py
|
Python
|
setup.py
|
d-nery/nyuki
|
f185fababee380660930243515652093855acfe7
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
d-nery/nyuki
|
f185fababee380660930243515652093855acfe7
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
d-nery/nyuki
|
f185fababee380660930243515652093855acfe7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from pip.req import parse_requirements
from setuptools import setup, find_packages
try:
with open('VERSION.txt', 'r') as v:
version = v.read().strip()
except FileNotFoundError:
version = '0.0.0.dev0'
with open('DESCRIPTION', 'r') as d:
long_description = d.read()
# Requirements
install_reqs = parse_requirements('requirements.txt', session='dummy')
reqs = [str(ir.req) for ir in install_reqs]
setup(
name='nyuki',
description='Allowing the creation of independent unit to deal with stream processing while exposing an MQTT and REST API.',
long_description=long_description,
url='http://www.surycat.com',
author='Optiflows R&D',
author_email='rand@surycat.com',
version=version,
install_requires=reqs,
packages=find_packages(exclude=['tests']),
license='Apache 2.0',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
],
)
| 29.425
| 128
| 0.6661
|
from pip.req import parse_requirements
from setuptools import setup, find_packages
try:
with open('VERSION.txt', 'r') as v:
version = v.read().strip()
except FileNotFoundError:
version = '0.0.0.dev0'
with open('DESCRIPTION', 'r') as d:
long_description = d.read()
install_reqs = parse_requirements('requirements.txt', session='dummy')
reqs = [str(ir.req) for ir in install_reqs]
setup(
name='nyuki',
description='Allowing the creation of independent unit to deal with stream processing while exposing an MQTT and REST API.',
long_description=long_description,
url='http://www.surycat.com',
author='Optiflows R&D',
author_email='rand@surycat.com',
version=version,
install_requires=reqs,
packages=find_packages(exclude=['tests']),
license='Apache 2.0',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
],
)
| true
| true
|
f716332bfa3e033470b3ce76020eb7c792a7ea54
| 8,579
|
py
|
Python
|
doc/source/conf.py
|
josh-friedlander-kando/arviz
|
8bd1de30cbea184c1493f3272fdca8ec1e6bcc8e
|
[
"Apache-2.0"
] | null | null | null |
doc/source/conf.py
|
josh-friedlander-kando/arviz
|
8bd1de30cbea184c1493f3272fdca8ec1e6bcc8e
|
[
"Apache-2.0"
] | 1
|
2021-07-23T19:32:21.000Z
|
2021-07-23T19:32:21.000Z
|
doc/source/conf.py
|
josh-friedlander-kando/arviz
|
8bd1de30cbea184c1493f3272fdca8ec1e6bcc8e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ArviZ documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 11 18:33:59 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import re
import sys
from typing import Dict
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import arviz
arviz.rcParams["data.load"] = "eager"
arviz.Numba.disable_numba()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.insert(0, os.path.abspath("../sphinxext"))
thumb_directory = "example_thumbs"
if not os.path.isdir(thumb_directory):
os.mkdir(thumb_directory)
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.coverage",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.autosummary",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"matplotlib.sphinxext.plot_directive",
"bokeh.sphinxext.bokeh_plot",
"numpydoc",
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"gallery_generator",
"myst_nb",
"sphinx_panels",
"notfound.extension",
]
# ipython directive configuration
ipython_warning_is_error = False
# Copy plot options from Seaborn
# Include the example source for plots in API docs
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
# Generate API documentation when building
autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["../_templates"]
#
# MyST related params
jupyter_execute_notebooks = "auto"
execution_excludepatterns = ["*.ipynb"]
myst_heading_anchors = 3
panels_add_bootstrap_css = False
# The base toctree document.
master_doc = "index"
# General information about the project.
project = "ArviZ"
copyright = "2018, ArviZ devs"
author = "ArviZ devs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
branch_name = os.environ.get("BUILD_SOURCEBRANCHNAME", "")
if branch_name == "main":
version = "dev"
else:
# The short X.Y version.
version = arviz.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "build", "Thumbs.db", ".DS_Store", "notebooks/.ipynb_checkpoints"]
# configure notfound extension to not add any prefix to the urls
notfound_urls_prefix = "/arviz/"
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"icon_links": [
{
"name": "GitHub",
"url": "https://github.com/arviz-devs/arviz",
"icon": "fab fa-github-square",
},
{
"name": "Twitter",
"url": "https://twitter.com/arviz_devs",
"icon": "fab fa-twitter-square",
},
],
"navbar_start": ["navbar-logo", "navbar-version"],
"use_edit_page_button": False, # TODO: see how to skip of fix for generated pages
"google_analytics_id": "G-W1G68W77YV",
}
html_context = {
"github_user": "arviz-devs",
"github_repo": "arviz",
"github_version": "main",
"doc_path": "doc/source/",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_static_path = ["_static", thumb_directory]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
# html_sidebars = {}
# use additional pages to add a 404 page
html_additional_pages = {
"404": "404.html",
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "ArviZdoc"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "ArviZ"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo.png"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# -- Options for LaTeX output ---------------------------------------------
latex_elements: Dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, "ArviZ.tex", "ArviZ Documentation", "ArviZ devs", "manual")]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "arviz", "ArviZ Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"ArviZ",
"ArviZ Documentation",
author,
"ArviZ",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# Example configuration for intersphinx
intersphinx_mapping = {
"xarray": ("http://xarray.pydata.org/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"pymc3": ("https://docs.pymc.io/", None),
"mpl": ("https://matplotlib.org/", None),
"bokeh": ("https://docs.bokeh.org/en/latest/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"zarr": ("https://zarr.readthedocs.io/en/stable/", None),
}
| 30.530249
| 96
| 0.681198
|
import os
import re
import sys
from typing import Dict
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import arviz
arviz.rcParams["data.load"] = "eager"
arviz.Numba.disable_numba()
sys.path.insert(0, os.path.abspath("../sphinxext"))
thumb_directory = "example_thumbs"
if not os.path.isdir(thumb_directory):
os.mkdir(thumb_directory)
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.coverage",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.autosummary",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"matplotlib.sphinxext.plot_directive",
"bokeh.sphinxext.bokeh_plot",
"numpydoc",
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"gallery_generator",
"myst_nb",
"sphinx_panels",
"notfound.extension",
]
ipython_warning_is_error = False
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
autosummary_generate = True
numpydoc_show_class_members = False
templates_path = ["../_templates"]
jupyter_execute_notebooks = "auto"
execution_excludepatterns = ["*.ipynb"]
myst_heading_anchors = 3
panels_add_bootstrap_css = False
master_doc = "index"
project = "ArviZ"
copyright = "2018, ArviZ devs"
author = "ArviZ devs"
# |version| and |release|, also used in various other places throughout the
# built documents.
branch_name = os.environ.get("BUILD_SOURCEBRANCHNAME", "")
if branch_name == "main":
version = "dev"
else:
# The short X.Y version.
version = arviz.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "build", "Thumbs.db", ".DS_Store", "notebooks/.ipynb_checkpoints"]
# configure notfound extension to not add any prefix to the urls
notfound_urls_prefix = "/arviz/"
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"icon_links": [
{
"name": "GitHub",
"url": "https://github.com/arviz-devs/arviz",
"icon": "fab fa-github-square",
},
{
"name": "Twitter",
"url": "https://twitter.com/arviz_devs",
"icon": "fab fa-twitter-square",
},
],
"navbar_start": ["navbar-logo", "navbar-version"],
"use_edit_page_button": False, # TODO: see how to skip of fix for generated pages
"google_analytics_id": "G-W1G68W77YV",
}
html_context = {
"github_user": "arviz-devs",
"github_repo": "arviz",
"github_version": "main",
"doc_path": "doc/source/",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_static_path = ["_static", thumb_directory]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
# html_sidebars = {}
# use additional pages to add a 404 page
html_additional_pages = {
"404": "404.html",
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "ArviZdoc"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "ArviZ"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo.png"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# -- Options for LaTeX output ---------------------------------------------
latex_elements: Dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, "ArviZ.tex", "ArviZ Documentation", "ArviZ devs", "manual")]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "arviz", "ArviZ Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"ArviZ",
"ArviZ Documentation",
author,
"ArviZ",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# Example configuration for intersphinx
intersphinx_mapping = {
"xarray": ("http://xarray.pydata.org/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"pymc3": ("https://docs.pymc.io/", None),
"mpl": ("https://matplotlib.org/", None),
"bokeh": ("https://docs.bokeh.org/en/latest/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"zarr": ("https://zarr.readthedocs.io/en/stable/", None),
}
| true
| true
|
f716336d6299fcdb7bed0490151a1ca232af284a
| 290
|
py
|
Python
|
sklift/datasets/__init__.py
|
rishawsingh/scikit-uplift
|
a46f11d24025f8489577640271abfc4d847d0334
|
[
"MIT"
] | 403
|
2019-12-21T09:36:57.000Z
|
2022-03-30T09:36:56.000Z
|
sklift/datasets/__init__.py
|
fspofficial/scikit-uplift
|
c9dd56aa0277e81ef7c4be62bf2fd33432e46f36
|
[
"MIT"
] | 100
|
2020-02-29T11:52:21.000Z
|
2022-03-29T23:14:33.000Z
|
sklift/datasets/__init__.py
|
fspofficial/scikit-uplift
|
c9dd56aa0277e81ef7c4be62bf2fd33432e46f36
|
[
"MIT"
] | 81
|
2019-12-26T08:28:44.000Z
|
2022-03-22T09:08:54.000Z
|
from .datasets import (
get_data_dir,
clear_data_dir,
fetch_x5, fetch_lenta,
fetch_criteo, fetch_hillstrom,
fetch_megafon
)
__all__ = [
'get_data_dir',
'clear_data_dir',
'fetch_x5', 'fetch_lenta',
'fetch_criteo', 'fetch_hillstrom',
'fetch_megafon'
]
| 19.333333
| 38
| 0.672414
|
from .datasets import (
get_data_dir,
clear_data_dir,
fetch_x5, fetch_lenta,
fetch_criteo, fetch_hillstrom,
fetch_megafon
)
__all__ = [
'get_data_dir',
'clear_data_dir',
'fetch_x5', 'fetch_lenta',
'fetch_criteo', 'fetch_hillstrom',
'fetch_megafon'
]
| true
| true
|
f71633d94eec3d43c9c771dca70dfe474a05d300
| 491
|
py
|
Python
|
build/sensor_actuator/catkin_generated/pkg.installspace.context.pc.py
|
kaiodt/kaio_ros_ws
|
d9ee0edb97d16cf2a0a6074fecd049db7367a032
|
[
"BSD-2-Clause"
] | null | null | null |
build/sensor_actuator/catkin_generated/pkg.installspace.context.pc.py
|
kaiodt/kaio_ros_ws
|
d9ee0edb97d16cf2a0a6074fecd049db7367a032
|
[
"BSD-2-Clause"
] | null | null | null |
build/sensor_actuator/catkin_generated/pkg.installspace.context.pc.py
|
kaiodt/kaio_ros_ws
|
d9ee0edb97d16cf2a0a6074fecd049db7367a032
|
[
"BSD-2-Clause"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/kaiodt/kaio_ros_ws/install/include".split(';') if "/home/kaiodt/kaio_ros_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;actionlib_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "sensor_actuator"
PROJECT_SPACE_DIR = "/home/kaiodt/kaio_ros_ws/install"
PROJECT_VERSION = "0.0.0"
| 54.555556
| 147
| 0.753564
|
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/kaiodt/kaio_ros_ws/install/include".split(';') if "/home/kaiodt/kaio_ros_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;actionlib_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "sensor_actuator"
PROJECT_SPACE_DIR = "/home/kaiodt/kaio_ros_ws/install"
PROJECT_VERSION = "0.0.0"
| true
| true
|
f71633e76aaac65d45ee61243fd61709c015ce9a
| 2,671
|
py
|
Python
|
download.py
|
HYUNMIN-HWANG/image-gpt
|
457bbb212d8435d4bb20a416120301359cb3686b
|
[
"MIT"
] | 1,641
|
2020-06-17T18:25:14.000Z
|
2022-03-29T08:04:07.000Z
|
download.py
|
HYUNMIN-HWANG/image-gpt
|
457bbb212d8435d4bb20a416120301359cb3686b
|
[
"MIT"
] | 16
|
2020-06-17T20:08:03.000Z
|
2021-12-06T03:18:33.000Z
|
download.py
|
HYUNMIN-HWANG/image-gpt
|
457bbb212d8435d4bb20a416120301359cb3686b
|
[
"MIT"
] | 263
|
2020-06-17T18:53:24.000Z
|
2022-03-27T11:39:04.000Z
|
import argparse
import json
import os
import sys
import requests
from tqdm import tqdm
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--download_dir", type=str, default="/root/downloads/")
parser.add_argument("--bert", action="store_true", help="download a bert model (default: ar)")
parser.add_argument("--model", type=str, choices=["s", "m", "l"], help="parameter counts are s:76M, m:455M, l:1362M")
parser.add_argument("--ckpt", type=str, choices=["131000", "262000", "524000", "1000000"])
parser.add_argument("--clusters", action="store_true", help="download the color clusters file")
parser.add_argument("--dataset", type=str, choices=["imagenet", "cifar10"])
args = parser.parse_args()
print("input args:\n", json.dumps(vars(args), indent=4, separators=(",", ":")))
return args
def main(args):
if not os.path.exists(args.download_dir):
os.makedirs(args.download_dir)
urls = []
# download the checkpoint
if args.model and args.ckpt:
base_url = f"https://openaipublic.blob.core.windows.net/image-gpt/checkpoints/igpt-{args.model}{'-bert' if args.bert else ''}/{args.ckpt}"
size_to_shards = {"s": 32, "m": 32, "l": 64}
shards = size_to_shards[args.model]
for filename in [f"model.ckpt-{args.ckpt}.data-{i:05d}-of-{shards:05d}" for i in range(shards)]:
urls.append(f"{base_url}/{filename}")
urls.append(f"{base_url}/model.ckpt-{args.ckpt}.index")
urls.append(f"{base_url}/model.ckpt-{args.ckpt}.meta")
# download the color clusters file
if args.clusters:
urls.append("https://openaipublic.blob.core.windows.net/image-gpt/color-clusters/kmeans_centers.npy")
# download color clustered dataset
if args.dataset:
for split in ["trX", "trY", "vaX", "vaY", "teX", "teY"]:
urls.append(f"https://openaipublic.blob.core.windows.net/image-gpt/datasets/{args.dataset}_{split}.npy")
# run the download
for url in urls:
filename = url.split("/")[-1]
r = requests.get(url, stream=True)
with open(f"{args.download_dir}/{filename}", "wb") as f:
file_size = int(r.headers["content-length"])
chunk_size = 1000
with tqdm(ncols=80, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
pbar.update(chunk_size)
if __name__ == "__main__":
args = parse_arguments()
main(args)
| 39.865672
| 146
| 0.639835
|
import argparse
import json
import os
import sys
import requests
from tqdm import tqdm
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--download_dir", type=str, default="/root/downloads/")
parser.add_argument("--bert", action="store_true", help="download a bert model (default: ar)")
parser.add_argument("--model", type=str, choices=["s", "m", "l"], help="parameter counts are s:76M, m:455M, l:1362M")
parser.add_argument("--ckpt", type=str, choices=["131000", "262000", "524000", "1000000"])
parser.add_argument("--clusters", action="store_true", help="download the color clusters file")
parser.add_argument("--dataset", type=str, choices=["imagenet", "cifar10"])
args = parser.parse_args()
print("input args:\n", json.dumps(vars(args), indent=4, separators=(",", ":")))
return args
def main(args):
if not os.path.exists(args.download_dir):
os.makedirs(args.download_dir)
urls = []
if args.model and args.ckpt:
base_url = f"https://openaipublic.blob.core.windows.net/image-gpt/checkpoints/igpt-{args.model}{'-bert' if args.bert else ''}/{args.ckpt}"
size_to_shards = {"s": 32, "m": 32, "l": 64}
shards = size_to_shards[args.model]
for filename in [f"model.ckpt-{args.ckpt}.data-{i:05d}-of-{shards:05d}" for i in range(shards)]:
urls.append(f"{base_url}/{filename}")
urls.append(f"{base_url}/model.ckpt-{args.ckpt}.index")
urls.append(f"{base_url}/model.ckpt-{args.ckpt}.meta")
if args.clusters:
urls.append("https://openaipublic.blob.core.windows.net/image-gpt/color-clusters/kmeans_centers.npy")
if args.dataset:
for split in ["trX", "trY", "vaX", "vaY", "teX", "teY"]:
urls.append(f"https://openaipublic.blob.core.windows.net/image-gpt/datasets/{args.dataset}_{split}.npy")
for url in urls:
filename = url.split("/")[-1]
r = requests.get(url, stream=True)
with open(f"{args.download_dir}/{filename}", "wb") as f:
file_size = int(r.headers["content-length"])
chunk_size = 1000
with tqdm(ncols=80, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
pbar.update(chunk_size)
if __name__ == "__main__":
args = parse_arguments()
main(args)
| true
| true
|
f716363d3776ba3009c25f34e002fe11df367a34
| 9,600
|
py
|
Python
|
openstack/tests/unit/test_connection.py
|
IamFive/sdk-python
|
223b04f90477f7de0f00b3e652d8672ba73271c8
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
openstack/tests/unit/test_connection.py
|
IamFive/sdk-python
|
223b04f90477f7de0f00b3e652d8672ba73271c8
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
openstack/tests/unit/test_connection.py
|
IamFive/sdk-python
|
223b04f90477f7de0f00b3e652d8672ba73271c8
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from keystoneauth1 import session as ksa_session
import mock
import os_client_config
from openstack import connection
from openstack import exceptions
from openstack import profile
from openstack import session
from openstack.tests.unit import base
CONFIG_AUTH_URL = "http://127.0.0.1:5000/v2.0"
CONFIG_USERNAME = "BozoTheClown"
CONFIG_PASSWORD = "TopSecret"
CONFIG_PROJECT = "TheGrandPrizeGame"
CONFIG_CACERT = "TrustMe"
CLOUD_CONFIG = """
clouds:
sample:
region_name: RegionOne
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
insecure:
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
cacert: {cacert}
insecure: True
cacert:
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
cacert: {cacert}
insecure: False
""".format(auth_url=CONFIG_AUTH_URL, username=CONFIG_USERNAME,
password=CONFIG_PASSWORD, project=CONFIG_PROJECT,
cacert=CONFIG_CACERT)
class TestConnection(base.TestCase):
@mock.patch("openstack.session.Session")
def test_other_parameters(self, mock_session_init):
mock_session_init.return_value = mock_session_init
mock_profile = mock.Mock()
mock_profile.get_services = mock.Mock(return_value=[])
conn = connection.Connection(profile=mock_profile, authenticator='2',
verify=True, cert='cert', user_agent='1')
args = {'auth': '2', 'user_agent': '1', 'verify': True, 'cert': 'cert'}
mock_session_init.assert_called_with(mock_profile, **args)
self.assertEqual(mock_session_init, conn.session)
def test_session_provided(self):
mock_session = mock.Mock(spec=session.Session)
mock_profile = mock.Mock()
mock_profile.get_services = mock.Mock(return_value=[])
conn = connection.Connection(session=mock_session,
profile=mock_profile,
user_agent='1')
self.assertEqual(mock_session, conn.session)
def test_ksa_session_provided(self):
mock_session = mock.Mock(spec=ksa_session.Session)
mock_profile = mock.Mock()
mock_profile.get_services = mock.Mock(return_value=[])
self.assertRaises(exceptions.SDKException, connection.Connection,
session=mock_session, profile=mock_profile,
user_agent='1')
@mock.patch("keystoneauth1.loading.base.get_plugin_loader")
def test_create_authenticator(self, mock_get_plugin):
mock_plugin = mock.Mock()
mock_loader = mock.Mock()
mock_options = [
mock.Mock(dest="auth_url"),
mock.Mock(dest="password"),
mock.Mock(dest="username"),
]
mock_loader.get_options = mock.Mock(return_value=mock_options)
mock_loader.load_from_options = mock.Mock(return_value=mock_plugin)
mock_get_plugin.return_value = mock_loader
auth_args = {
'auth_url': '0',
'username': '1',
'password': '2',
}
conn = connection.Connection(auth_plugin='v2password', **auth_args)
mock_get_plugin.assert_called_with('v2password')
mock_loader.load_from_options.assert_called_with(**auth_args)
self.assertEqual(mock_plugin, conn.authenticator)
@mock.patch("keystoneauth1.loading.base.get_plugin_loader")
def test_default_plugin(self, mock_get_plugin):
connection.Connection()
self.assertTrue(mock_get_plugin.called)
self.assertEqual(mock_get_plugin.call_args, mock.call("password"))
@mock.patch("keystoneauth1.loading.base.get_plugin_loader")
def test_pass_authenticator(self, mock_get_plugin):
mock_plugin = mock.Mock()
mock_get_plugin.return_value = None
conn = connection.Connection(authenticator=mock_plugin)
self.assertFalse(mock_get_plugin.called)
self.assertEqual(mock_plugin, conn.authenticator)
def test_create_session(self):
auth = mock.Mock()
prof = profile.Profile()
conn = connection.Connection(authenticator=auth, profile=prof)
self.assertEqual(auth, conn.authenticator)
self.assertEqual(prof, conn.profile)
self.assertEqual('openstack.telemetry.alarm.v2._proxy',
conn.alarm.__class__.__module__)
self.assertEqual('openstack.cluster.v1._proxy',
conn.cluster.__class__.__module__)
self.assertEqual('openstack.compute.v2._proxy',
conn.compute.__class__.__module__)
self.assertEqual('openstack.database.v1._proxy',
conn.database.__class__.__module__)
self.assertEqual('openstack.identity.v3._proxy',
conn.identity.__class__.__module__)
self.assertEqual('openstack.image.v2._proxy',
conn.image.__class__.__module__)
self.assertEqual('openstack.network.v2._proxy',
conn.network.__class__.__module__)
self.assertEqual('openstack.object_store.v1._proxy',
conn.object_store.__class__.__module__)
self.assertEqual('openstack.load_balancer.v1._proxy',
conn.load_balancer.__class__.__module__)
self.assertEqual('openstack.orchestration.v1._proxy',
conn.orchestration.__class__.__module__)
self.assertEqual('openstack.telemetry.v2._proxy',
conn.telemetry.__class__.__module__)
self.assertEqual('openstack.workflow.v2._proxy',
conn.workflow.__class__.__module__)
def _prepare_test_config(self):
# Create a temporary directory where our test config will live
# and insert it into the search path via OS_CLIENT_CONFIG_FILE.
config_dir = self.useFixture(fixtures.TempDir()).path
config_path = os.path.join(config_dir, "clouds.yaml")
with open(config_path, "w") as conf:
conf.write(CLOUD_CONFIG)
self.useFixture(fixtures.EnvironmentVariable(
"OS_CLIENT_CONFIG_FILE", config_path))
def test_from_config_given_data(self):
self._prepare_test_config()
data = os_client_config.OpenStackConfig().get_one_cloud("sample")
sot = connection.from_config(cloud_config=data)
self.assertEqual(CONFIG_USERNAME,
sot.authenticator._username)
self.assertEqual(CONFIG_PASSWORD,
sot.authenticator._password)
self.assertEqual(CONFIG_AUTH_URL,
sot.authenticator.auth_url)
self.assertEqual(CONFIG_PROJECT,
sot.authenticator._project_name)
def test_from_config_given_name(self):
self._prepare_test_config()
sot = connection.from_config(cloud_name="sample")
self.assertEqual(CONFIG_USERNAME,
sot.authenticator._username)
self.assertEqual(CONFIG_PASSWORD,
sot.authenticator._password)
self.assertEqual(CONFIG_AUTH_URL,
sot.authenticator.auth_url)
self.assertEqual(CONFIG_PROJECT,
sot.authenticator._project_name)
def test_from_config_given_options(self):
self._prepare_test_config()
version = "100"
class Opts(object):
compute_api_version = version
sot = connection.from_config(cloud_name="sample", options=Opts)
pref = sot.session.profile.get_filter("compute")
# NOTE: Along the way, the `v` prefix gets added so we can build
# up URLs with it.
self.assertEqual("v" + version, pref.version)
def test_from_config_verify(self):
self._prepare_test_config()
sot = connection.from_config(cloud_name="insecure")
self.assertFalse(sot.session.verify)
sot = connection.from_config(cloud_name="cacert")
self.assertEqual(CONFIG_CACERT, sot.session.verify)
def test_authorize_works(self):
fake_session = mock.Mock(spec=session.Session)
fake_headers = {'X-Auth-Token': 'FAKE_TOKEN'}
fake_session.get_auth_headers.return_value = fake_headers
sot = connection.Connection(session=fake_session,
authenticator=mock.Mock())
res = sot.authorize()
self.assertEqual('FAKE_TOKEN', res)
def test_authorize_silent_failure(self):
fake_session = mock.Mock(spec=session.Session)
fake_session.get_auth_headers.return_value = None
fake_session.__module__ = 'openstack.session'
sot = connection.Connection(session=fake_session,
authenticator=mock.Mock())
res = sot.authorize()
self.assertIsNone(res)
| 39.183673
| 79
| 0.655729
|
import os
import fixtures
from keystoneauth1 import session as ksa_session
import mock
import os_client_config
from openstack import connection
from openstack import exceptions
from openstack import profile
from openstack import session
from openstack.tests.unit import base
CONFIG_AUTH_URL = "http://127.0.0.1:5000/v2.0"
CONFIG_USERNAME = "BozoTheClown"
CONFIG_PASSWORD = "TopSecret"
CONFIG_PROJECT = "TheGrandPrizeGame"
CONFIG_CACERT = "TrustMe"
CLOUD_CONFIG = """
clouds:
sample:
region_name: RegionOne
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
insecure:
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
cacert: {cacert}
insecure: True
cacert:
auth:
auth_url: {auth_url}
username: {username}
password: {password}
project_name: {project}
cacert: {cacert}
insecure: False
""".format(auth_url=CONFIG_AUTH_URL, username=CONFIG_USERNAME,
password=CONFIG_PASSWORD, project=CONFIG_PROJECT,
cacert=CONFIG_CACERT)
class TestConnection(base.TestCase):
@mock.patch("openstack.session.Session")
def test_other_parameters(self, mock_session_init):
mock_session_init.return_value = mock_session_init
mock_profile = mock.Mock()
mock_profile.get_services = mock.Mock(return_value=[])
conn = connection.Connection(profile=mock_profile, authenticator='2',
verify=True, cert='cert', user_agent='1')
args = {'auth': '2', 'user_agent': '1', 'verify': True, 'cert': 'cert'}
mock_session_init.assert_called_with(mock_profile, **args)
self.assertEqual(mock_session_init, conn.session)
def test_session_provided(self):
mock_session = mock.Mock(spec=session.Session)
mock_profile = mock.Mock()
mock_profile.get_services = mock.Mock(return_value=[])
conn = connection.Connection(session=mock_session,
profile=mock_profile,
user_agent='1')
self.assertEqual(mock_session, conn.session)
def test_ksa_session_provided(self):
mock_session = mock.Mock(spec=ksa_session.Session)
mock_profile = mock.Mock()
mock_profile.get_services = mock.Mock(return_value=[])
self.assertRaises(exceptions.SDKException, connection.Connection,
session=mock_session, profile=mock_profile,
user_agent='1')
@mock.patch("keystoneauth1.loading.base.get_plugin_loader")
def test_create_authenticator(self, mock_get_plugin):
mock_plugin = mock.Mock()
mock_loader = mock.Mock()
mock_options = [
mock.Mock(dest="auth_url"),
mock.Mock(dest="password"),
mock.Mock(dest="username"),
]
mock_loader.get_options = mock.Mock(return_value=mock_options)
mock_loader.load_from_options = mock.Mock(return_value=mock_plugin)
mock_get_plugin.return_value = mock_loader
auth_args = {
'auth_url': '0',
'username': '1',
'password': '2',
}
conn = connection.Connection(auth_plugin='v2password', **auth_args)
mock_get_plugin.assert_called_with('v2password')
mock_loader.load_from_options.assert_called_with(**auth_args)
self.assertEqual(mock_plugin, conn.authenticator)
@mock.patch("keystoneauth1.loading.base.get_plugin_loader")
def test_default_plugin(self, mock_get_plugin):
connection.Connection()
self.assertTrue(mock_get_plugin.called)
self.assertEqual(mock_get_plugin.call_args, mock.call("password"))
@mock.patch("keystoneauth1.loading.base.get_plugin_loader")
def test_pass_authenticator(self, mock_get_plugin):
mock_plugin = mock.Mock()
mock_get_plugin.return_value = None
conn = connection.Connection(authenticator=mock_plugin)
self.assertFalse(mock_get_plugin.called)
self.assertEqual(mock_plugin, conn.authenticator)
def test_create_session(self):
auth = mock.Mock()
prof = profile.Profile()
conn = connection.Connection(authenticator=auth, profile=prof)
self.assertEqual(auth, conn.authenticator)
self.assertEqual(prof, conn.profile)
self.assertEqual('openstack.telemetry.alarm.v2._proxy',
conn.alarm.__class__.__module__)
self.assertEqual('openstack.cluster.v1._proxy',
conn.cluster.__class__.__module__)
self.assertEqual('openstack.compute.v2._proxy',
conn.compute.__class__.__module__)
self.assertEqual('openstack.database.v1._proxy',
conn.database.__class__.__module__)
self.assertEqual('openstack.identity.v3._proxy',
conn.identity.__class__.__module__)
self.assertEqual('openstack.image.v2._proxy',
conn.image.__class__.__module__)
self.assertEqual('openstack.network.v2._proxy',
conn.network.__class__.__module__)
self.assertEqual('openstack.object_store.v1._proxy',
conn.object_store.__class__.__module__)
self.assertEqual('openstack.load_balancer.v1._proxy',
conn.load_balancer.__class__.__module__)
self.assertEqual('openstack.orchestration.v1._proxy',
conn.orchestration.__class__.__module__)
self.assertEqual('openstack.telemetry.v2._proxy',
conn.telemetry.__class__.__module__)
self.assertEqual('openstack.workflow.v2._proxy',
conn.workflow.__class__.__module__)
def _prepare_test_config(self):
config_dir = self.useFixture(fixtures.TempDir()).path
config_path = os.path.join(config_dir, "clouds.yaml")
with open(config_path, "w") as conf:
conf.write(CLOUD_CONFIG)
self.useFixture(fixtures.EnvironmentVariable(
"OS_CLIENT_CONFIG_FILE", config_path))
def test_from_config_given_data(self):
self._prepare_test_config()
data = os_client_config.OpenStackConfig().get_one_cloud("sample")
sot = connection.from_config(cloud_config=data)
self.assertEqual(CONFIG_USERNAME,
sot.authenticator._username)
self.assertEqual(CONFIG_PASSWORD,
sot.authenticator._password)
self.assertEqual(CONFIG_AUTH_URL,
sot.authenticator.auth_url)
self.assertEqual(CONFIG_PROJECT,
sot.authenticator._project_name)
def test_from_config_given_name(self):
self._prepare_test_config()
sot = connection.from_config(cloud_name="sample")
self.assertEqual(CONFIG_USERNAME,
sot.authenticator._username)
self.assertEqual(CONFIG_PASSWORD,
sot.authenticator._password)
self.assertEqual(CONFIG_AUTH_URL,
sot.authenticator.auth_url)
self.assertEqual(CONFIG_PROJECT,
sot.authenticator._project_name)
def test_from_config_given_options(self):
self._prepare_test_config()
version = "100"
class Opts(object):
compute_api_version = version
sot = connection.from_config(cloud_name="sample", options=Opts)
pref = sot.session.profile.get_filter("compute")
self.assertEqual("v" + version, pref.version)
def test_from_config_verify(self):
self._prepare_test_config()
sot = connection.from_config(cloud_name="insecure")
self.assertFalse(sot.session.verify)
sot = connection.from_config(cloud_name="cacert")
self.assertEqual(CONFIG_CACERT, sot.session.verify)
def test_authorize_works(self):
fake_session = mock.Mock(spec=session.Session)
fake_headers = {'X-Auth-Token': 'FAKE_TOKEN'}
fake_session.get_auth_headers.return_value = fake_headers
sot = connection.Connection(session=fake_session,
authenticator=mock.Mock())
res = sot.authorize()
self.assertEqual('FAKE_TOKEN', res)
def test_authorize_silent_failure(self):
fake_session = mock.Mock(spec=session.Session)
fake_session.get_auth_headers.return_value = None
fake_session.__module__ = 'openstack.session'
sot = connection.Connection(session=fake_session,
authenticator=mock.Mock())
res = sot.authorize()
self.assertIsNone(res)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.