hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72cd9912a6e17b5b29517176042c7b23c21a2e0 | 557 | py | Python | DiseaseIdentifier/DiseaseClassify/migrations/0001_initial.py | Rosan93/Disease-Identifier | 6bf311c833ecaa3769ebf09c6d752a9ec7ebfdb4 | [
"Apache-2.0"
] | null | null | null | DiseaseIdentifier/DiseaseClassify/migrations/0001_initial.py | Rosan93/Disease-Identifier | 6bf311c833ecaa3769ebf09c6d752a9ec7ebfdb4 | [
"Apache-2.0"
] | 18 | 2020-01-28T22:44:38.000Z | 2021-06-10T18:55:20.000Z | DiseaseIdentifier/DiseaseClassify/migrations/0001_initial.py | RoshanGurungSr/Disease-Identifier | 6bf311c833ecaa3769ebf09c6d752a9ec7ebfdb4 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.1 on 2019-05-15 08:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UploadImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('predict_image', models.FileField(upload_to='')),
],
),
]
| 24.217391 | 114 | 0.574506 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UploadImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('predict_image', models.FileField(upload_to='')),
],
),
]
| true | true |
f72cdab5d2b024368267933b507634792f303004 | 472 | py | Python | venv/Scripts/easy_install-3.8-script.py | rushermonza/CoronavirusWebScraper | 4c7d31dbb51ae7d791c620673ca6f36d1ef43e3e | [
"MIT"
] | 1 | 2020-04-04T04:55:20.000Z | 2020-04-04T04:55:20.000Z | venv/Scripts/easy_install-3.8-script.py | AntoData/CoronavirusWebScraper | 4c7d31dbb51ae7d791c620673ca6f36d1ef43e3e | [
"MIT"
] | null | null | null | venv/Scripts/easy_install-3.8-script.py | AntoData/CoronavirusWebScraper | 4c7d31dbb51ae7d791c620673ca6f36d1ef43e3e | [
"MIT"
] | null | null | null | #!C:\Users\ingov\PycharmProjects\CoronavirusWebScraper\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| 36.307692 | 87 | 0.70339 |
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| true | true |
f72cdb329eab3ceed1d52538e15d5d30dc8b84c0 | 9,798 | py | Python | sphinx-doc/conf.py | kmoskovtsev/HOOMD-Blue-fork | 99560563a5ba9e082b513764bae51a84f48fdc70 | [
"BSD-3-Clause"
] | null | null | null | sphinx-doc/conf.py | kmoskovtsev/HOOMD-Blue-fork | 99560563a5ba9e082b513764bae51a84f48fdc70 | [
"BSD-3-Clause"
] | null | null | null | sphinx-doc/conf.py | kmoskovtsev/HOOMD-Blue-fork | 99560563a5ba9e082b513764bae51a84f48fdc70 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# HOOMD-blue documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 13 13:14:54 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax'
]
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
autodoc_docstring_signature = True;
autodoc_default_flags = ['inherited-members'];
autodoc_mock_imports = ['numpy'];
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
exclude_patterns = ['_build', '_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HOOMD-blue'
copyright = '2016, The Regents of the University of Michigan'
author = 'The Regents of the University of Michigan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.9'
# The full version, including alpha/beta/rc tags.
release = '2.1.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'HOOMD-blue-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'HOOMD-blue.tex', 'HOOMD-blue Documentation',
'The Regents of the University of Michigan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hoomd-blue', 'HOOMD-blue Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'HOOMD-blue', 'HOOMD-blue Documentation',
author, 'HOOMD-blue', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
ipython_mplbackend = None;
# ipython_execlines = ['import gsd.fl', 'import gsd.hoomd', 'import gsd.pygsd', 'import numpy']
| 32.66 | 95 | 0.716881 |
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax'
]
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
autodoc_docstring_signature = True;
autodoc_default_flags = ['inherited-members'];
autodoc_mock_imports = ['numpy'];
templates_path = ['_templates']
exclude_patterns = ['_build', '_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'HOOMD-blue'
copyright = '2016, The Regents of the University of Michigan'
author = 'The Regents of the University of Michigan'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.9'
# The full version, including alpha/beta/rc tags.
release = '2.1.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'HOOMD-blue-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'HOOMD-blue.tex', 'HOOMD-blue Documentation',
'The Regents of the University of Michigan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hoomd-blue', 'HOOMD-blue Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'HOOMD-blue', 'HOOMD-blue Documentation',
author, 'HOOMD-blue', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
ipython_mplbackend = None;
| true | true |
f72cdbc3ff47642e64c41dd8ddf1126531344739 | 1,481 | py | Python | trac/upgrades/db45.py | NetSpida/trac | 6ad75b926591e114ba504f6a72a38fd305d77fb1 | [
"BSD-3-Clause"
] | null | null | null | trac/upgrades/db45.py | NetSpida/trac | 6ad75b926591e114ba504f6a72a38fd305d77fb1 | [
"BSD-3-Clause"
] | null | null | null | trac/upgrades/db45.py | NetSpida/trac | 6ad75b926591e114ba504f6a72a38fd305d77fb1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import re
from trac.upgrades import backup_config_file
def do_upgrade(env, version, cursor):
"""Change [notification] ticket_subject_template and [notification]
batch_subject_template to use syntax compatible with Jinja2.
"""
config = env.config
section = 'notification'
re_template_var = re.compile(r'\$([\w.]+)')
def update_template(name):
old_value = config.get(section, name)
if old_value:
if re_template_var.match(old_value):
new_value = re_template_var.sub(r'${\1}', old_value)
env.log.info("Replaced value of [%s] %s: %s -> %s",
section, name, old_value, new_value)
config.set(section, name, new_value)
return True
return False
updated = update_template('ticket_subject_template')
updated |= update_template('batch_subject_template')
if updated:
backup_config_file(env, '.db45.bak')
config.save()
| 33.659091 | 72 | 0.665766 |
import re
from trac.upgrades import backup_config_file
def do_upgrade(env, version, cursor):
config = env.config
section = 'notification'
re_template_var = re.compile(r'\$([\w.]+)')
def update_template(name):
old_value = config.get(section, name)
if old_value:
if re_template_var.match(old_value):
new_value = re_template_var.sub(r'${\1}', old_value)
env.log.info("Replaced value of [%s] %s: %s -> %s",
section, name, old_value, new_value)
config.set(section, name, new_value)
return True
return False
updated = update_template('ticket_subject_template')
updated |= update_template('batch_subject_template')
if updated:
backup_config_file(env, '.db45.bak')
config.save()
| true | true |
f72cdbeb98f60ad142bf63a1d749cae5063e128a | 13,596 | py | Python | train_shape.py | EXJUSTICE/pointnet2 | 749a38fde6370fc7dee535855008bc5bc8468c0e | [
"MIT"
] | null | null | null | train_shape.py | EXJUSTICE/pointnet2 | 749a38fde6370fc7dee535855008bc5bc8468c0e | [
"MIT"
] | null | null | null | train_shape.py | EXJUSTICE/pointnet2 | 749a38fde6370fc7dee535855008bc5bc8468c0e | [
"MIT"
] | null | null | null | '''
Single-GPU training.
Will use H5 dataset in default. If using normal, will shift to the normal dataset.
'''
import argparse
import math
from datetime import datetime
import h5py
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
import tf_util
import modelnet_dataset
import modelnet_h5_dataset
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='pointnet2_cls_ssg', help='Model name [default: pointnet2_cls_ssg]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=251, help='Epoch to run [default: 251]')
parser.add_argument('--batch_size', type=int, default=16, help='Batch Size during training [default: 16]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
parser.add_argument('--normal', action='store_true', help='Whether to use normal information')
FLAGS = parser.parse_args()
EPOCH_CNT = 0
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(ROOT_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
NUM_CLASSES = 40
# Shapenet official train/test split Note if there is a problem it probably stems from ModelNetDataset switching pyversions
if FLAGS.normal:
assert(NUM_POINT<=10000)
DATA_PATH = os.path.join(ROOT_DIR, 'data/modelnet40_normal_resampled')
TRAIN_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='train', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE)
TEST_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='test', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE)
else:
assert(NUM_POINT<=2048)
TRAIN_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=True)
TEST_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=False)
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
#pointclouds_pl is the variable we are keen on so check it here
print("batch size:" + str(BATCH_SIZE))
print("num point:" + str(NUM_POINT))
print("initial pointclouds shape:" +str(pointclouds_pl.shape))
print("model placeholder inputs": +str(MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT))
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter
# for you every time it trains.
batch = tf.get_variable('batch', [],
initializer=tf.constant_initializer(0), trainable=False)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
MODEL.get_loss(pred, labels_pl, end_points)
losses = tf.get_collection('losses')
total_loss = tf.add_n(losses, name='total_loss')
tf.summary.scalar('total_loss', total_loss)
for l in losses + [total_loss]:
tf.summary.scalar(l.op.name, l)
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
tf.summary.scalar('accuracy', accuracy)
print("--- Get training operator")
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(total_loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph)
# Init variables
init = tf.global_variables_initializer()
sess.run(init)
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': total_loss,
'train_op': train_op,
'merged': merged,
'step': batch,
'end_points': end_points}
best_acc = -1
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
#Disabled while we check above
#train_one_epoch(sess, ops, train_writer)
#Since we are simply checking size
"""
eval_one_epoch(sess, ops, test_writer)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
"""
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
log_string(str(datetime.now()))
# Make sure batch data is of same size
cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TRAIN_DATASET.num_channel()))
cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)
total_correct = 0
total_seen = 0
loss_sum = 0
batch_idx = 0
while TRAIN_DATASET.has_next_batch():
batch_data, batch_label = TRAIN_DATASET.next_batch(augment=True)
#batch_data = provider.random_point_dropout(batch_data)
bsize = batch_data.shape[0]
cur_batch_data[0:bsize,...] = batch_data
cur_batch_label[0:bsize] = batch_label
# So crucial step here is size analysis so we will go through all of it
print("---------------------------------- VALUES----------------------------------")
print("pointclouds_pl: " + str(cur_batch_data))
print("labels_pl: "+ str(cur_batch_label))
print("is_training_pl: "+ str(is_training))
print("merged: "+str(ops['merged']))
print("step: " + str(ops['step']))
print("train_op: "+str(ops['train_op']))
print("loss: " +str(ops['loss']))
print("pred: " + str(ops['pred']))
print("----------------------------------------------------------------------------")
print("----------------------------------SHAPES----------------------------------")
print("pointclouds_pl: " + str(cur_batch_data.shape))
print("labels_pl: "+ str(cur_batch_label.shape))
print("is_training_pl: "+ str(is_training.shape))
print("merged: "+str(ops['merged'].shape))
print("step: " + str(ops['step'].shape))
print("train_op: "+str(ops['train_op'].shape))
print("loss: " +str(ops['loss'].shape))
print("pred: " + str(ops['pred'].shape))
"""
feed_dict = {ops['pointclouds_pl']: cur_batch_data,
ops['labels_pl']: cur_batch_label,
ops['is_training_pl']: is_training,}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
total_correct += correct
total_seen += bsize
loss_sum += loss_val
if (batch_idx+1)%50 == 0:
log_string(' ---- batch: %03d ----' % (batch_idx+1))
log_string('mean loss: %f' % (loss_sum / 50))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
total_correct = 0
total_seen = 0
loss_sum = 0
batch_idx += 1
"""
TRAIN_DATASET.reset()
def eval_one_epoch(sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
global EPOCH_CNT
is_training = False
# Make sure batch data is of same size
cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TEST_DATASET.num_channel()))
cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)
total_correct = 0
total_seen = 0
loss_sum = 0
batch_idx = 0
shape_ious = []
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
log_string(str(datetime.now()))
log_string('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT))
while TEST_DATASET.has_next_batch():
batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
bsize = batch_data.shape[0]
# for the last batch in the epoch, the bsize:end are from last batch
cur_batch_data[0:bsize,...] = batch_data
cur_batch_label[0:bsize] = batch_label
feed_dict = {ops['pointclouds_pl']: cur_batch_data,
ops['labels_pl']: cur_batch_label,
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
test_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
total_correct += correct
total_seen += bsize
loss_sum += loss_val
batch_idx += 1
for i in range(0, bsize):
l = batch_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i] == l)
log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
EPOCH_CNT += 1
TEST_DATASET.reset()
return total_correct/float(total_seen)
if __name__ == "__main__":
log_string('pid: %s'%(str(os.getpid())))
train()
LOG_FOUT.close()
| 42.62069 | 186 | 0.629009 | '''
Single-GPU training.
Will use H5 dataset in default. If using normal, will shift to the normal dataset.
'''
import argparse
import math
from datetime import datetime
import h5py
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
import tf_util
import modelnet_dataset
import modelnet_h5_dataset
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='pointnet2_cls_ssg', help='Model name [default: pointnet2_cls_ssg]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=251, help='Epoch to run [default: 251]')
parser.add_argument('--batch_size', type=int, default=16, help='Batch Size during training [default: 16]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
parser.add_argument('--normal', action='store_true', help='Whether to use normal information')
FLAGS = parser.parse_args()
EPOCH_CNT = 0
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
MODEL = importlib.import_module(FLAGS.model)
MODEL_FILE = os.path.join(ROOT_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR))
os.system('cp train.py %s' % (LOG_DIR))
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
NUM_CLASSES = 40
if FLAGS.normal:
assert(NUM_POINT<=10000)
DATA_PATH = os.path.join(ROOT_DIR, 'data/modelnet40_normal_resampled')
TRAIN_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='train', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE)
TEST_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='test', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE)
else:
assert(NUM_POINT<=2048)
TRAIN_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=True)
TEST_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=False)
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE,
batch * BATCH_SIZE,
DECAY_STEP,
DECAY_RATE,
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001)
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
print("batch size:" + str(BATCH_SIZE))
print("num point:" + str(NUM_POINT))
print("initial pointclouds shape:" +str(pointclouds_pl.shape))
print("model placeholder inputs": +str(MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT))
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
batch = tf.get_variable('batch', [],
initializer=tf.constant_initializer(0), trainable=False)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
MODEL.get_loss(pred, labels_pl, end_points)
losses = tf.get_collection('losses')
total_loss = tf.add_n(losses, name='total_loss')
tf.summary.scalar('total_loss', total_loss)
for l in losses + [total_loss]:
tf.summary.scalar(l.op.name, l)
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
tf.summary.scalar('accuracy', accuracy)
print("--- Get training operator")
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(total_loss, global_step=batch)
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': total_loss,
'train_op': train_op,
'merged': merged,
'step': batch,
'end_points': end_points}
best_acc = -1
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
"""
eval_one_epoch(sess, ops, test_writer)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
"""
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
log_string(str(datetime.now()))
cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TRAIN_DATASET.num_channel()))
cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)
total_correct = 0
total_seen = 0
loss_sum = 0
batch_idx = 0
while TRAIN_DATASET.has_next_batch():
batch_data, batch_label = TRAIN_DATASET.next_batch(augment=True)
bsize = batch_data.shape[0]
cur_batch_data[0:bsize,...] = batch_data
cur_batch_label[0:bsize] = batch_label
print("---------------------------------- VALUES----------------------------------")
print("pointclouds_pl: " + str(cur_batch_data))
print("labels_pl: "+ str(cur_batch_label))
print("is_training_pl: "+ str(is_training))
print("merged: "+str(ops['merged']))
print("step: " + str(ops['step']))
print("train_op: "+str(ops['train_op']))
print("loss: " +str(ops['loss']))
print("pred: " + str(ops['pred']))
print("----------------------------------------------------------------------------")
print("----------------------------------SHAPES----------------------------------")
print("pointclouds_pl: " + str(cur_batch_data.shape))
print("labels_pl: "+ str(cur_batch_label.shape))
print("is_training_pl: "+ str(is_training.shape))
print("merged: "+str(ops['merged'].shape))
print("step: " + str(ops['step'].shape))
print("train_op: "+str(ops['train_op'].shape))
print("loss: " +str(ops['loss'].shape))
print("pred: " + str(ops['pred'].shape))
"""
feed_dict = {ops['pointclouds_pl']: cur_batch_data,
ops['labels_pl']: cur_batch_label,
ops['is_training_pl']: is_training,}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
total_correct += correct
total_seen += bsize
loss_sum += loss_val
if (batch_idx+1)%50 == 0:
log_string(' ---- batch: %03d ----' % (batch_idx+1))
log_string('mean loss: %f' % (loss_sum / 50))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
total_correct = 0
total_seen = 0
loss_sum = 0
batch_idx += 1
"""
TRAIN_DATASET.reset()
def eval_one_epoch(sess, ops, test_writer):
""" ops: dict mapping from string to tf ops """
global EPOCH_CNT
is_training = False
cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TEST_DATASET.num_channel()))
cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32)
total_correct = 0
total_seen = 0
loss_sum = 0
batch_idx = 0
shape_ious = []
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
log_string(str(datetime.now()))
log_string('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT))
while TEST_DATASET.has_next_batch():
batch_data, batch_label = TEST_DATASET.next_batch(augment=False)
bsize = batch_data.shape[0]
cur_batch_data[0:bsize,...] = batch_data
cur_batch_label[0:bsize] = batch_label
feed_dict = {ops['pointclouds_pl']: cur_batch_data,
ops['labels_pl']: cur_batch_label,
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
test_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize])
total_correct += correct
total_seen += bsize
loss_sum += loss_val
batch_idx += 1
for i in range(0, bsize):
l = batch_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i] == l)
log_string('eval mean loss: %f' % (loss_sum / float(batch_idx)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
EPOCH_CNT += 1
TEST_DATASET.reset()
return total_correct/float(total_seen)
if __name__ == "__main__":
log_string('pid: %s'%(str(os.getpid())))
train()
LOG_FOUT.close()
| false | true |
f72cdbf3665a2f5be56dde69f5e801552be52144 | 4,726 | py | Python | Code/PostProcessing/Cloud.py | ChimieleCode/OpenSees_Script | 58dcd187e5eda1bf92f8f2c4fc83b74d9108372d | [
"MIT"
] | null | null | null | Code/PostProcessing/Cloud.py | ChimieleCode/OpenSees_Script | 58dcd187e5eda1bf92f8f2c4fc83b74d9108372d | [
"MIT"
] | null | null | null | Code/PostProcessing/Cloud.py | ChimieleCode/OpenSees_Script | 58dcd187e5eda1bf92f8f2c4fc83b74d9108372d | [
"MIT"
] | null | null | null | import csv
import math
from ModelOptions import compute_local_fragility
from PostProcessing.SectionGaps import global_DCR_DS1, global_DCR_DS2, global_DCR_DST, demand_capacity_ratio_DS1_matrix, demand_capacity_ratio_DS2_matrix, demand_capacity_ratio_DST_matrix
from AnalysisDefinition.TimeHistory import spectral_response
# global_DCR_DS1 = [[1, 0.27896174747804386], [2, 0.28126931389396786], [3, 0.44095115696216836], [4, 0.33864425806026355], [5, 0.7645643659027233], [6, 0.8373640081441925], [7, 0.6888659383862444]]
# global_DCR_DS2 = [[1, 0.12933171227895135], [2, 0.13040154181101768], [3, 0.18752803478204755], [4, 0.13911329867854114], [5, 0.31770212049497765], [6, 0.38821710128044673], [7, 0.3193707099542446]]
# spectral_response = [ [1, 0.01], [2, 0.02], [3, 0.03], [4, 0.04], [5, 0.05], [6, 0.06], [7, 0.07], [8, 0.08], [9, 0.09], [10, 0.1], [11, 0.11], [12, 0.12], [13, 0.13], [14, 0.14], [15, 0.15], [16, 0.16], [17, 0.17], [18, 0.18], [19, 0.19], [20, 0.2], [21, 0.21], [22, 0.22], [23, 0.23], [24, 0.24], [25, 0.25], [26, 0.26], [27, 0.27], [28, 0.28], [29, 0.29], [30, 0.3], [31, 0.31], [32, 0.32], [33, 0.33], [34, 0.34], [35, 0.35], [36, 0.36], [37, 0.37], [38, 0.38], [39, 0.39], [40, 0.4], [41, 0.41], [42, 0.42], [43, 0.43], [44, 0.44], [45, 0.45], [46, 0.46], [47, 0.47], [48, 0.48], [49, 0.49], [50, 0.5], [51, 0.51], [52, 0.52], [53, 0.53], [54, 0.54], [55, 0.55], [56, 0.56], [57, 0.57], [58, 0.58], [59, 0.59], [60, 0.6], [61, 0.61], [62, 0.62], [63, 0.63], [64, 0.64], [65, 0.65], [66, 0.66], [67, 0.67], [68, 0.68], [69, 0.69], [70, 0.7], [71, 0.71], [72, 0.72], [73, 0.73], [74, 0.74], [75, 0.75], [76, 0.76], [77, 0.77], [78, 0.78], [79, 0.79], [80, 0.8], [81, 0.81], [82, 0.82], [83, 0.83], [84, 0.84], [85, 0.85], [86, 0.86], [87, 0.87], [88, 0.88], [89, 0.89], [90, 0.9], [91, 0.91], [92, 0.92], [93, 0.93], [94, 0.94], [95, 0.95], [96, 0.96], [97, 0.97], [98, 0.98], [99, 0.99], [100, 1], [101, 1.01], [102, 1.02], [103, 1.03], [104, 1.04], [105, 1.05], [106, 1.06], [107, 1.07], [108, 1.08], [109, 1.09], [110, 1.1], [111, 1.11], [112, 1.12], [113, 1.13], [114, 1.14], [115, 1.15], [116, 1.16], [117, 1.17], [118, 1.18], [119, 1.19], [120, 1.2], [121, 1.21], [122, 1.22], [123, 1.23], [124, 1.24], [125, 1.25], [126, 1.26], [127, 1.27], [128, 1.28], [129, 1.29], [130, 1.3], [131, 1.31], [132, 1.32], [133, 1.33], [134, 1.34], [135, 1.35], [136, 1.36], [137, 1.37], [138, 1.38], [139, 1.39], [140, 1.4], [141, 1.41] ]
cloud_DST = []
cloud_DS1 = []
cloud_DS2 = []
# Header globale
header = ['Time History ID', 'DCR', 'Sa']
# Preparo gli array per scrivere
for i, point in enumerate(spectral_response):
cloud_DS1.append([point[0], global_DCR_DS1[i][1], point[1]])
cloud_DS2.append([point[0], global_DCR_DS2[i][1], point[1]])
cloud_DST.append([point[0], global_DCR_DST[i][1], point[1]])
# Scrivo DS1 globale
with open('Output\Cloud\cloud_DS1.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for point in cloud_DS1:
writer.writerow(point)
# Scrivo DS2 globale
with open('Output\Cloud\cloud_DS2.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for point in cloud_DS2:
writer.writerow(point)
# Scrivo DST globale
with open('Output\Cloud\cloud_DST.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for point in cloud_DST:
writer.writerow(point)
# CLOUD points di singole connessioni
if compute_local_fragility:
# Header locale
header = ['Time History ID', 'Sa', 'DCR1', 'DCR2', 'DRCT']
# Definisco delle funzioni per identificare le connessioni
def floor(i):
return math.floor(i/2)
def vertical(i):
if (i % 2) == 0:
return 'ext'
else:
return 'int'
# Procedo a scrivere in file
for i in range(len(demand_capacity_ratio_DS1_matrix[0])):
with open(f'Output\Connection_Fragility\Data\Cloud\Cloud_{floor(i)}_{vertical(i)}.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for j in range(len(demand_capacity_ratio_DS1_matrix)):
row = []
row.append(spectral_response[j][0])
row.append(spectral_response[j][1])
row.append(demand_capacity_ratio_DS1_matrix[j][i])
row.append(demand_capacity_ratio_DS2_matrix[j][i])
row.append(demand_capacity_ratio_DST_matrix[j][i])
writer.writerow(row)
| 45.883495 | 1,734 | 0.566018 | import csv
import math
from ModelOptions import compute_local_fragility
from PostProcessing.SectionGaps import global_DCR_DS1, global_DCR_DS2, global_DCR_DST, demand_capacity_ratio_DS1_matrix, demand_capacity_ratio_DS2_matrix, demand_capacity_ratio_DST_matrix
from AnalysisDefinition.TimeHistory import spectral_response
cloud_DST = []
cloud_DS1 = []
cloud_DS2 = []
header = ['Time History ID', 'DCR', 'Sa']
for i, point in enumerate(spectral_response):
cloud_DS1.append([point[0], global_DCR_DS1[i][1], point[1]])
cloud_DS2.append([point[0], global_DCR_DS2[i][1], point[1]])
cloud_DST.append([point[0], global_DCR_DST[i][1], point[1]])
with open('Output\Cloud\cloud_DS1.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for point in cloud_DS1:
writer.writerow(point)
with open('Output\Cloud\cloud_DS2.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for point in cloud_DS2:
writer.writerow(point)
with open('Output\Cloud\cloud_DST.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for point in cloud_DST:
writer.writerow(point)
if compute_local_fragility:
header = ['Time History ID', 'Sa', 'DCR1', 'DCR2', 'DRCT']
def floor(i):
return math.floor(i/2)
def vertical(i):
if (i % 2) == 0:
return 'ext'
else:
return 'int'
for i in range(len(demand_capacity_ratio_DS1_matrix[0])):
with open(f'Output\Connection_Fragility\Data\Cloud\Cloud_{floor(i)}_{vertical(i)}.csv', 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
for j in range(len(demand_capacity_ratio_DS1_matrix)):
row = []
row.append(spectral_response[j][0])
row.append(spectral_response[j][1])
row.append(demand_capacity_ratio_DS1_matrix[j][i])
row.append(demand_capacity_ratio_DS2_matrix[j][i])
row.append(demand_capacity_ratio_DST_matrix[j][i])
writer.writerow(row)
| true | true |
f72cdcf0abad5566c247daf49ce163498192f41a | 727 | py | Python | stats/data.py | 1in1/Python-Baseball | 4c76d65330ff7eb88c87057be02bbddb50dd325b | [
"MIT"
] | null | null | null | stats/data.py | 1in1/Python-Baseball | 4c76d65330ff7eb88c87057be02bbddb50dd325b | [
"MIT"
] | null | null | null | stats/data.py | 1in1/Python-Baseball | 4c76d65330ff7eb88c87057be02bbddb50dd325b | [
"MIT"
] | null | null | null | import os
import glob
import pandas as pd
game_files = glob.glob(os.path.join(os.getcwd(), 'games', '*.EVE'))
game_files.sort()
game_frames = []
for game_file in game_files:
game_frame = pd.read_csv(game_file, names=['type','multi2','multi3','multi4','multi5','multi6','event'])
game_frames.append(game_frame)
games = pd.concat(game_frames)
games.loc[games['multi5'] == '??', ['multi5']] = ''
identifiers = games['multi2'].str.extract(r'(.LS(\d{4})\d{5})')
identifiers = identifiers.fillna(method='ffill')
identifiers.columns = ['game_id', 'year']
games = pd.concat([games, identifiers], axis=1, sort=False)
games = games.fillna(' ')
games.loc[:, 'type'] = pd.Categorical(games.loc[:, 'type'])
print(games.head())
| 30.291667 | 108 | 0.678129 | import os
import glob
import pandas as pd
game_files = glob.glob(os.path.join(os.getcwd(), 'games', '*.EVE'))
game_files.sort()
game_frames = []
for game_file in game_files:
game_frame = pd.read_csv(game_file, names=['type','multi2','multi3','multi4','multi5','multi6','event'])
game_frames.append(game_frame)
games = pd.concat(game_frames)
games.loc[games['multi5'] == '??', ['multi5']] = ''
identifiers = games['multi2'].str.extract(r'(.LS(\d{4})\d{5})')
identifiers = identifiers.fillna(method='ffill')
identifiers.columns = ['game_id', 'year']
games = pd.concat([games, identifiers], axis=1, sort=False)
games = games.fillna(' ')
games.loc[:, 'type'] = pd.Categorical(games.loc[:, 'type'])
print(games.head())
| true | true |
f72cdd30f4d4087803dd4184985189860ea51326 | 2,545 | py | Python | examples/other_examples/PyFstat_example_twoF_cumulative.py | RobertRosca/PyFstat | 1c9568bb3dc87c3d33aeb41b3f572e9990665372 | [
"MIT"
] | 16 | 2020-01-28T08:40:02.000Z | 2022-03-02T05:26:50.000Z | examples/other_examples/PyFstat_example_twoF_cumulative.py | RobertRosca/PyFstat | 1c9568bb3dc87c3d33aeb41b3f572e9990665372 | [
"MIT"
] | 294 | 2020-02-04T17:15:26.000Z | 2022-03-30T13:53:48.000Z | examples/other_examples/PyFstat_example_twoF_cumulative.py | RobertRosca/PyFstat | 1c9568bb3dc87c3d33aeb41b3f572e9990665372 | [
"MIT"
] | 10 | 2020-02-04T16:57:55.000Z | 2022-02-03T00:12:25.000Z | """
Cumulative coherent 2F
======================
Compute the cumulative coherent F-statistic of a signal candidate.
"""
import os
import numpy as np
import pyfstat
from pyfstat.helper_functions import get_predict_fstat_parameters_from_dict
label = "PyFstat_example_twoF_cumulative"
outdir = os.path.join("PyFstat_example_data", label)
# Properties of the GW data
gw_data = {
"sqrtSX": 1e-23,
"tstart": 1000000000,
"duration": 100 * 86400,
"detectors": "H1,L1",
"Band": 4,
"Tsft": 1800,
}
# Properties of the signal
depth = 100
phase_parameters = {
"F0": 30.0,
"F1": -1e-10,
"F2": 0,
"Alpha": np.radians(83.6292),
"Delta": np.radians(22.0144),
"tref": gw_data["tstart"],
"asini": 10,
"period": 10 * 3600 * 24,
"tp": gw_data["tstart"] + gw_data["duration"] / 2.0,
"ecc": 0,
"argp": 0,
}
amplitude_parameters = {
"h0": gw_data["sqrtSX"] / depth,
"cosi": 1,
"phi": np.pi,
"psi": np.pi / 8,
}
PFS_input = get_predict_fstat_parameters_from_dict(
{**phase_parameters, **amplitude_parameters}
)
# Let me grab tref here, since it won't really be needed in phase_parameters
tref = phase_parameters.pop("tref")
data = pyfstat.BinaryModulatedWriter(
label=label,
outdir=outdir,
tref=tref,
**gw_data,
**phase_parameters,
**amplitude_parameters,
)
data.make_data()
# The predicted twoF, given by lalapps_predictFstat can be accessed by
twoF = data.predict_fstat()
print("Predicted twoF value: {}\n".format(twoF))
# Create a search object for each of the possible SFT combinations
# (H1 only, L1 only, H1 + L1).
ifo_constraints = ["L1", "H1", None]
compute_fstat_per_ifo = [
pyfstat.ComputeFstat(
sftfilepattern=os.path.join(
data.outdir,
(f"{ifo_constraint[0]}*.sft" if ifo_constraint is not None else "*.sft"),
),
tref=data.tref,
binary=phase_parameters.get("asini", 0),
minCoverFreq=-0.5,
maxCoverFreq=-0.5,
)
for ifo_constraint in ifo_constraints
]
for ind, compute_f_stat in enumerate(compute_fstat_per_ifo):
compute_f_stat.plot_twoF_cumulative(
label=label + (f"_{ifo_constraints[ind]}" if ind < 2 else "_H1L1"),
outdir=outdir,
savefig=True,
CFS_input=phase_parameters,
PFS_input=PFS_input,
custom_ax_kwargs={
"title": "How does 2F accumulate over time?",
"label": "Cumulative 2F"
+ (f" {ifo_constraints[ind]}" if ind < 2 else " H1 + L1"),
},
)
| 25.45 | 85 | 0.633792 |
import os
import numpy as np
import pyfstat
from pyfstat.helper_functions import get_predict_fstat_parameters_from_dict
label = "PyFstat_example_twoF_cumulative"
outdir = os.path.join("PyFstat_example_data", label)
gw_data = {
"sqrtSX": 1e-23,
"tstart": 1000000000,
"duration": 100 * 86400,
"detectors": "H1,L1",
"Band": 4,
"Tsft": 1800,
}
depth = 100
phase_parameters = {
"F0": 30.0,
"F1": -1e-10,
"F2": 0,
"Alpha": np.radians(83.6292),
"Delta": np.radians(22.0144),
"tref": gw_data["tstart"],
"asini": 10,
"period": 10 * 3600 * 24,
"tp": gw_data["tstart"] + gw_data["duration"] / 2.0,
"ecc": 0,
"argp": 0,
}
amplitude_parameters = {
"h0": gw_data["sqrtSX"] / depth,
"cosi": 1,
"phi": np.pi,
"psi": np.pi / 8,
}
PFS_input = get_predict_fstat_parameters_from_dict(
{**phase_parameters, **amplitude_parameters}
)
tref = phase_parameters.pop("tref")
data = pyfstat.BinaryModulatedWriter(
label=label,
outdir=outdir,
tref=tref,
**gw_data,
**phase_parameters,
**amplitude_parameters,
)
data.make_data()
# The predicted twoF, given by lalapps_predictFstat can be accessed by
twoF = data.predict_fstat()
print("Predicted twoF value: {}\n".format(twoF))
# Create a search object for each of the possible SFT combinations
# (H1 only, L1 only, H1 + L1).
ifo_constraints = ["L1", "H1", None]
compute_fstat_per_ifo = [
pyfstat.ComputeFstat(
sftfilepattern=os.path.join(
data.outdir,
(f"{ifo_constraint[0]}*.sft" if ifo_constraint is not None else "*.sft"),
),
tref=data.tref,
binary=phase_parameters.get("asini", 0),
minCoverFreq=-0.5,
maxCoverFreq=-0.5,
)
for ifo_constraint in ifo_constraints
]
for ind, compute_f_stat in enumerate(compute_fstat_per_ifo):
compute_f_stat.plot_twoF_cumulative(
label=label + (f"_{ifo_constraints[ind]}" if ind < 2 else "_H1L1"),
outdir=outdir,
savefig=True,
CFS_input=phase_parameters,
PFS_input=PFS_input,
custom_ax_kwargs={
"title": "How does 2F accumulate over time?",
"label": "Cumulative 2F"
+ (f" {ifo_constraints[ind]}" if ind < 2 else " H1 + L1"),
},
)
| true | true |
f72cdd942be71ea6f27d319f22d0edf089185019 | 69,689 | py | Python | src/transformers/models/unispeech/modeling_unispeech.py | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 | [
"Apache-2.0"
] | 2 | 2022-01-12T13:10:05.000Z | 2022-01-12T13:10:28.000Z | src/transformers/models/unispeech/modeling_unispeech.py | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 | [
"Apache-2.0"
] | 2 | 2022-03-08T04:58:59.000Z | 2022-03-19T03:45:14.000Z | src/transformers/models/unispeech/modeling_unispeech.py | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch UniSpeech model."""
import math
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...deepspeed import is_deepspeed_zero3_enabled
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import torch_int_div
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_unispeech import UniSpeechConfig
logger = logging.get_logger(__name__)
_HIDDEN_STATES_START_POSITION = 2
# General docstring
_CONFIG_FOR_DOC = "UniSpeechConfig"
_PROCESSOR_FOR_DOC = "Wav2Vec2Processor"
# Base docstring
_CHECKPOINT_FOR_DOC = "patrickvonplaten/unispeech-large-1500h-cv-timit"
_EXPECTED_OUTPUT_SHAPE = [1, 292, 1024]
# CTC docstring
_CTC_EXPECTED_OUTPUT = "'mister quilter is the apposl of the midle classes and weare glad to welcom his gosepl'"
_CTC_EXPECTED_LOSS = 17.17
# Audio class docstring
_FEAT_EXTRACTOR_FOR_DOC = "Wav2Vec2FeatureExtractor"
_SEQ_CLASS_CHECKPOINT = "hf-internal-testing/tiny-random-unispeech"
_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'" # TODO(anton) - could you quickly fine-tune a KS WavLM Model
_SEQ_CLASS_EXPECTED_LOSS = 0.66 # TODO(anton) - could you quickly fine-tune a KS WavLM Model
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/unispeech-large-1500h-cv",
"microsoft/unispeech-large-multi-lingual-1500h-cv",
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
]
@dataclass
class UniSpeechForPreTrainingOutput(ModelOutput):
"""
Output type of [`UniSpeechForPreTrainingOutput`], with potential hidden states and attentions.
Args:
loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
projected quantized states.
projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
target vectors for contrastive loss.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
projected_states: torch.FloatTensor = None
projected_quantized_states: torch.FloatTensor = None
codevector_perplexity: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.LongTensor] = None,
min_masks: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.
Args:
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
the first element is the batch size and the second element is the length of the axis to span.
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
independently generated mask spans of length `mask_length` is computed by
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
actual percentage will be smaller.
mask_length: size of the mask
min_masks: minimum number of masked spans
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
each batch dimension.
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
"""Given input length, compute how many spans should be masked"""
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
# make sure num masked span <= sequence_length
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
# make sure num_masked span is also <= input_length - (mask_length - 1)
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
# compute number of masked spans in batch
input_lengths = (
attention_mask.sum(-1).detach().tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
# compute num of masked spans for this input
num_masked_span = compute_num_masked_span(input_length)
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
# Picking first sample just pads those vectors twice.
if len(spec_aug_mask_idx) == 0:
# this case can only happen if `input_length` is strictly smaller then
# `sequence_length` in which case the last token has to be a padding
# token which we can use as a dummy mask id
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
# add offset to the starting indexes so that that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->UniSpeech
class UniSpeechNoLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->UniSpeech
class UniSpeechLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->UniSpeech
class UniSpeechGroupNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->UniSpeech
class UniSpeechPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
)
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
else:
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
self.padding = UniSpeechSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->UniSpeech
class UniSpeechSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, : -self.num_pad_remove]
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->UniSpeech
class UniSpeechFeatureEncoder(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [UniSpeechGroupNormConvLayer(config, layer_id=0)] + [
UniSpeechNoLayerNormConvLayer(config, layer_id=i + 1)
for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
UniSpeechLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
if self._requires_grad and self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(conv_layer),
hidden_states,
)
else:
hidden_states = conv_layer(hidden_states)
return hidden_states
class UniSpeechFeatureExtractor(UniSpeechFeatureEncoder):
def __init__(self, config):
super().__init__(config)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->UniSpeech
class UniSpeechFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
def forward(self, hidden_states):
# non-projected hidden states are needed for quantization
norm_hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states, norm_hidden_states
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->UniSpeech
class UniSpeechAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->UniSpeech
class UniSpeechFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->UniSpeech
class UniSpeechEncoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->UniSpeech
class UniSpeechEncoderLayerStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->UniSpeech
class UniSpeechEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([UniSpeechEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
hidden_states[~attention_mask] = 0.0
# extend attention_mask
attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->UniSpeech
class UniSpeechEncoderStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList(
[UniSpeechEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens are not attended to
hidden_states[~attention_mask] = 0
# extend attention_mask
attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class UniSpeechGumbelVectorQuantizer(nn.Module):
"""
Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH
GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.
"""
def __init__(self, config):
super().__init__()
self.num_groups = config.num_codevector_groups
self.num_vars = config.num_codevectors_per_group
if config.codevector_dim % self.num_groups != 0:
raise ValueError(
f"`config.codevector_dim {config.codevector_dim} must be divisible by `config.num_codevector_groups`"
f" {self.num_groups} for concatenation"
)
# storage for codebook variables (codewords)
self.codevectors = nn.Parameter(
torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)
)
self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)
# can be decayed for training
self.temperature = 2
@staticmethod
def _compute_perplexity(probs):
marginal_probs = probs.mean(dim=0)
perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states):
batch_size, sequence_length, hidden_size = hidden_states.shape
# project to codevector dim
hidden_states = self.weight_proj(hidden_states)
hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
if self.training:
# sample code vector probs via gumbel in differentiateable way
codevector_probs = nn.functional.gumbel_softmax(
hidden_states.float(), tau=self.temperature, hard=True
).type_as(hidden_states)
# compute perplexity
codevector_soft_dist = torch.softmax(
hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
)
perplexity = self._compute_perplexity(codevector_soft_dist)
else:
# take argmax in non-differentiable way
# comptute hard codevector distribution (one hot)
codevector_idx = hidden_states.argmax(dim=-1)
codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
-1, codevector_idx.view(-1, 1), 1.0
)
codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
perplexity = self._compute_perplexity(codevector_probs)
codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
# use probs to retrieve codevectors
codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
return codevectors, perplexity
class UniSpeechPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = UniSpeechConfig
base_model_prefix = "unispeech"
main_input_name = "input_values"
_keys_to_ignore_on_load_missing = [r"position_ids"]
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
# gumbel softmax requires special init
if isinstance(module, UniSpeechGumbelVectorQuantizer):
module.weight_proj.weight.data.normal_(mean=0.0, std=1)
module.weight_proj.bias.data.zero_()
nn.init.uniform_(module.codevectors)
elif isinstance(module, UniSpeechPositionalConvEmbedding):
nn.init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
nn.init.constant_(module.conv.bias, 0)
elif isinstance(module, UniSpeechFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
nn.init.uniform_(module.projection.weight, a=-k, b=k)
nn.init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch_int_div(input_length - kernel_size, stride) + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
# Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (UniSpeechEncoder, UniSpeechEncoderStableLayerNorm, UniSpeechFeatureEncoder)):
module.gradient_checkpointing = value
UNISPEECH_START_DOCSTRING = r"""
UniSpeech was proposed in [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled
Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei,
Michael Zeng, Xuedong Huang.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving etc.).
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`UniSpeechConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UNISPEECH_INPUTS_DOCSTRING = r"""
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install
soundfile*). To prepare the array into *input_values*, the [`UniSpeechProcessor`] should be used for
padding and conversion into a tensor of type *torch.FloatTensor*. See [`UniSpeechProcessor.__call__`] for
details.
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
<Tip warning={true}>
`attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should
**not** be passed to avoid degraded performance when doing batched inference. For such models
`input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these
models also yield slightly different results depending on whether `input_values` is padded or not.
</Tip>
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top.",
UNISPEECH_START_DOCSTRING,
)
class UniSpeechModel(UniSpeechPreTrainedModel):
def __init__(self, config: UniSpeechConfig):
super().__init__(config)
self.config = config
self.feature_extractor = UniSpeechFeatureEncoder(config)
self.feature_projection = UniSpeechFeatureProjection(config)
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
if config.do_stable_layer_norm:
self.encoder = UniSpeechEncoderStableLayerNorm(config)
else:
self.encoder = UniSpeechEncoder(config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://arxiv.org/abs/1904.08779).
"""
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
# generate indices & apply SpecAugment along time axis
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_PROCESSOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Wav2Vec2BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return Wav2Vec2BaseModelOutput(
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""UniSpeech Model with a vector-quantization module and ctc loss for pre-training.""", UNISPEECH_START_DOCSTRING
)
class UniSpeechForPreTraining(UniSpeechPreTrainedModel):
def __init__(self, config: UniSpeechConfig):
super().__init__(config)
self.unispeech = UniSpeechModel(config)
self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)
self.quantizer = UniSpeechGumbelVectorQuantizer(config)
self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)
self.project_hid = nn.Linear(config.proj_codevector_dim, config.hidden_size)
self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes)
self.dropout = nn.Dropout(config.final_dropout)
# Initialize weights and apply final processing
self.post_init()
def set_gumbel_temperature(self, temperature: int):
"""
Set the Gumbel softmax temperature to a given value. Only necessary for training
"""
self.quantizer.temperature = temperature
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech.feature_extractor._freeze_parameters()
@staticmethod
def compute_contrastive_logits(
target_features: torch.FloatTensor,
negative_features: torch.FloatTensor,
predicted_features: torch.FloatTensor,
temperature: int = 1,
):
"""
Compute logits for contrastive loss based using cosine similarity as the distance measure between
`[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.
"""
target_features = torch.cat([target_features, negative_features], dim=0)
logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1)
logits = logits.type_as(target_features)
# apply temperature
logits = logits / temperature
return logits
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=UniSpeechForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, UniSpeechForPreTrainingOutput]:
r"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*):
Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss.
Required input for pre-training.
Returns:
Example:
```python
>>> import torch
>>> from transformers import Wav2Vec2FeatureExtractor, UniSpeechForPreTraining
>>> from transformers.models.unispeech.modeling_unispeech import _compute_mask_indices
>>> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
... "hf-internal-testing/tiny-random-unispeech-sat"
... )
>>> model = UniSpeechForPreTraining.from_pretrained("microsoft/unispeech-large-1500h-cv")
>>> # TODO: Add full pretraining example
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
transformer_features = outputs[0]
# quantize all (unmasked) extracted features and project to final vq dim
extract_features = self.dropout_features(outputs[1])
quantized_features, codevector_perplexity = self.quantizer(extract_features)
# project quantized features twice
quantized_features = self.project_q(quantized_features)
quantized_features = self.project_hid(quantized_features)
prob_replace_matrix = torch.empty(transformer_features.size(0), transformer_features.size(1)).fill_(
self.config.replace_prob
)
prob_replace_matrix = prob_replace_matrix.transpose(0, 1)
sampled_replace_matrix = torch.bernoulli(prob_replace_matrix).bool().to(transformer_features.device)
sampled_replace_matrix = sampled_replace_matrix.transpose(0, 1)
sampled_replace_matrix = sampled_replace_matrix.unsqueeze(-1)
logits = transformer_features.masked_fill(sampled_replace_matrix, 0.0) + (
quantized_features.masked_fill(~sampled_replace_matrix, 0.0)
)
# project to ctc units
logits = self.dropout(logits)
logits = self.ctc_proj(logits)
# TODO(PVP) - add negative sampling & loss computation
loss = None
if not return_dict:
if loss is not None:
return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return UniSpeechForPreTrainingOutput(
loss=loss,
projected_states=transformer_features,
projected_quantized_states=quantized_features,
codevector_perplexity=codevector_perplexity,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
UNISPEECH_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->UniSpeech, wav2vec2->unispeech, WAV_2_VEC_2->UNISPEECH
class UniSpeechForCTC(UniSpeechPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.unispeech = UniSpeechModel(config)
self.dropout = nn.Dropout(config.final_dropout)
if config.vocab_size is None:
raise ValueError(
f"You are trying to instantiate {self.__class__} with a configuration that "
"does not define the vocabulary size of the language model head. Please "
"instantiate the model as follows: `UniSpeechForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
"or define `vocab_size` of your model's configuration."
)
output_hidden_size = (
config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
)
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech.feature_extractor._freeze_parameters()
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_PROCESSOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_CTC_EXPECTED_OUTPUT,
expected_loss=_CTC_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, CausalLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
if labels.max() >= self.config.vocab_size:
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
# retrieve loss input_lengths from attention_mask
attention_mask = (
attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
)
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
# assuming that padded tokens are filled with -100
# when not being attended to
labels_mask = labels >= 0
target_lengths = labels_mask.sum(-1)
flattened_targets = labels.masked_select(labels_mask)
# ctc_loss doesn't support fp16
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = nn.functional.ctc_loss(
log_probs,
flattened_targets,
input_lengths,
target_lengths,
blank=self.config.pad_token_id,
reduction=self.config.ctc_loss_reduction,
zero_infinity=self.config.ctc_zero_infinity,
)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"""
UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
SUPERB Keyword Spotting.
""",
UNISPEECH_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->UniSpeech, wav2vec2->unispeech, WAV_2_VEC_2->UNISPEECH
class UniSpeechForSequenceClassification(UniSpeechPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of UniSpeech adapters (config.add_adapter=True)"
)
self.unispeech = UniSpeechModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.unispeech.parameters():
param.requires_grad = False
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_SEQ_CLASS_CHECKPOINT,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
hidden_states[~padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 43.967823 | 164 | 0.676606 |
import math
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...deepspeed import is_deepspeed_zero3_enabled
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import torch_int_div
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_unispeech import UniSpeechConfig
logger = logging.get_logger(__name__)
_HIDDEN_STATES_START_POSITION = 2
_CONFIG_FOR_DOC = "UniSpeechConfig"
_PROCESSOR_FOR_DOC = "Wav2Vec2Processor"
_CHECKPOINT_FOR_DOC = "patrickvonplaten/unispeech-large-1500h-cv-timit"
_EXPECTED_OUTPUT_SHAPE = [1, 292, 1024]
_CTC_EXPECTED_OUTPUT = "'mister quilter is the apposl of the midle classes and weare glad to welcom his gosepl'"
_CTC_EXPECTED_LOSS = 17.17
_FEAT_EXTRACTOR_FOR_DOC = "Wav2Vec2FeatureExtractor"
_SEQ_CLASS_CHECKPOINT = "hf-internal-testing/tiny-random-unispeech"
_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'"
_SEQ_CLASS_EXPECTED_LOSS = 0.66
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/unispeech-large-1500h-cv",
"microsoft/unispeech-large-multi-lingual-1500h-cv",
]
@dataclass
class UniSpeechForPreTrainingOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
projected_states: torch.FloatTensor = None
projected_quantized_states: torch.FloatTensor = None
codevector_perplexity: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.LongTensor] = None,
min_masks: int = 0,
) -> np.ndarray:
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
input_lengths = (
attention_mask.sum(-1).detach().tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
num_masked_span = compute_num_masked_span(input_length)
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
if len(spec_aug_mask_idx) == 0:
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask
class UniSpeechNoLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
class UniSpeechLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.activation(hidden_states)
return hidden_states
class UniSpeechGroupNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
class UniSpeechPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
)
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
else:
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
self.padding = UniSpeechSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
class UniSpeechSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, : -self.num_pad_remove]
return hidden_states
class UniSpeechFeatureEncoder(nn.Module):
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [UniSpeechGroupNormConvLayer(config, layer_id=0)] + [
UniSpeechNoLayerNormConvLayer(config, layer_id=i + 1)
for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
UniSpeechLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
if self._requires_grad and self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(conv_layer),
hidden_states,
)
else:
hidden_states = conv_layer(hidden_states)
return hidden_states
class UniSpeechFeatureExtractor(UniSpeechFeatureEncoder):
def __init__(self, config):
super().__init__(config)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
)
class UniSpeechFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
def forward(self, hidden_states):
norm_hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states, norm_hidden_states
class UniSpeechAttention(nn.Module):
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states) * self.scaling
if is_cross_attention and past_key_value is not None:
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->UniSpeech
class UniSpeechFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->UniSpeech
class UniSpeechEncoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->UniSpeech
class UniSpeechEncoderLayerStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->UniSpeech
class UniSpeechEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([UniSpeechEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
hidden_states[~attention_mask] = 0.0
# extend attention_mask
attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->UniSpeech
class UniSpeechEncoderStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList(
[UniSpeechEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens are not attended to
hidden_states[~attention_mask] = 0
# extend attention_mask
attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class UniSpeechGumbelVectorQuantizer(nn.Module):
def __init__(self, config):
super().__init__()
self.num_groups = config.num_codevector_groups
self.num_vars = config.num_codevectors_per_group
if config.codevector_dim % self.num_groups != 0:
raise ValueError(
f"`config.codevector_dim {config.codevector_dim} must be divisible by `config.num_codevector_groups`"
f" {self.num_groups} for concatenation"
)
self.codevectors = nn.Parameter(
torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)
)
self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)
self.temperature = 2
@staticmethod
def _compute_perplexity(probs):
marginal_probs = probs.mean(dim=0)
perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states):
batch_size, sequence_length, hidden_size = hidden_states.shape
hidden_states = self.weight_proj(hidden_states)
hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
if self.training:
codevector_probs = nn.functional.gumbel_softmax(
hidden_states.float(), tau=self.temperature, hard=True
).type_as(hidden_states)
codevector_soft_dist = torch.softmax(
hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
)
perplexity = self._compute_perplexity(codevector_soft_dist)
else:
codevector_idx = hidden_states.argmax(dim=-1)
codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
-1, codevector_idx.view(-1, 1), 1.0
)
codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
perplexity = self._compute_perplexity(codevector_probs)
codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
return codevectors, perplexity
class UniSpeechPreTrainedModel(PreTrainedModel):
config_class = UniSpeechConfig
base_model_prefix = "unispeech"
main_input_name = "input_values"
_keys_to_ignore_on_load_missing = [r"position_ids"]
supports_gradient_checkpointing = True
def _init_weights(self, module):
if isinstance(module, UniSpeechGumbelVectorQuantizer):
module.weight_proj.weight.data.normal_(mean=0.0, std=1)
module.weight_proj.bias.data.zero_()
nn.init.uniform_(module.codevectors)
elif isinstance(module, UniSpeechPositionalConvEmbedding):
nn.init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
nn.init.constant_(module.conv.bias, 0)
elif isinstance(module, UniSpeechFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
nn.init.uniform_(module.projection.weight, a=-k, b=k)
nn.init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
def _conv_out_length(input_length, kernel_size, stride):
return torch_int_div(input_length - kernel_size, stride) + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (UniSpeechEncoder, UniSpeechEncoderStableLayerNorm, UniSpeechFeatureEncoder)):
module.gradient_checkpointing = value
UNISPEECH_START_DOCSTRING = r"""
UniSpeech was proposed in [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled
Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei,
Michael Zeng, Xuedong Huang.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving etc.).
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`UniSpeechConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UNISPEECH_INPUTS_DOCSTRING = r"""
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install
soundfile*). To prepare the array into *input_values*, the [`UniSpeechProcessor`] should be used for
padding and conversion into a tensor of type *torch.FloatTensor*. See [`UniSpeechProcessor.__call__`] for
details.
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
<Tip warning={true}>
`attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should
**not** be passed to avoid degraded performance when doing batched inference. For such models
`input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these
models also yield slightly different results depending on whether `input_values` is padded or not.
</Tip>
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top.",
UNISPEECH_START_DOCSTRING,
)
class UniSpeechModel(UniSpeechPreTrainedModel):
def __init__(self, config: UniSpeechConfig):
super().__init__(config)
self.config = config
self.feature_extractor = UniSpeechFeatureEncoder(config)
self.feature_projection = UniSpeechFeatureProjection(config)
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
if config.do_stable_layer_norm:
self.encoder = UniSpeechEncoderStableLayerNorm(config)
else:
self.encoder = UniSpeechEncoder(config)
self.post_init()
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_PROCESSOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Wav2Vec2BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return Wav2Vec2BaseModelOutput(
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""UniSpeech Model with a vector-quantization module and ctc loss for pre-training.""", UNISPEECH_START_DOCSTRING
)
class UniSpeechForPreTraining(UniSpeechPreTrainedModel):
def __init__(self, config: UniSpeechConfig):
super().__init__(config)
self.unispeech = UniSpeechModel(config)
self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)
self.quantizer = UniSpeechGumbelVectorQuantizer(config)
self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)
self.project_hid = nn.Linear(config.proj_codevector_dim, config.hidden_size)
self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes)
self.dropout = nn.Dropout(config.final_dropout)
self.post_init()
def set_gumbel_temperature(self, temperature: int):
self.quantizer.temperature = temperature
def freeze_feature_extractor(self):
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
self.unispeech.feature_extractor._freeze_parameters()
@staticmethod
def compute_contrastive_logits(
target_features: torch.FloatTensor,
negative_features: torch.FloatTensor,
predicted_features: torch.FloatTensor,
temperature: int = 1,
):
target_features = torch.cat([target_features, negative_features], dim=0)
logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1)
logits = logits.type_as(target_features)
logits = logits / temperature
return logits
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=UniSpeechForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, UniSpeechForPreTrainingOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
transformer_features = outputs[0]
extract_features = self.dropout_features(outputs[1])
quantized_features, codevector_perplexity = self.quantizer(extract_features)
quantized_features = self.project_q(quantized_features)
quantized_features = self.project_hid(quantized_features)
prob_replace_matrix = torch.empty(transformer_features.size(0), transformer_features.size(1)).fill_(
self.config.replace_prob
)
prob_replace_matrix = prob_replace_matrix.transpose(0, 1)
sampled_replace_matrix = torch.bernoulli(prob_replace_matrix).bool().to(transformer_features.device)
sampled_replace_matrix = sampled_replace_matrix.transpose(0, 1)
sampled_replace_matrix = sampled_replace_matrix.unsqueeze(-1)
logits = transformer_features.masked_fill(sampled_replace_matrix, 0.0) + (
quantized_features.masked_fill(~sampled_replace_matrix, 0.0)
)
logits = self.dropout(logits)
logits = self.ctc_proj(logits)
loss = None
if not return_dict:
if loss is not None:
return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return UniSpeechForPreTrainingOutput(
loss=loss,
projected_states=transformer_features,
projected_quantized_states=quantized_features,
codevector_perplexity=codevector_perplexity,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
UNISPEECH_START_DOCSTRING,
)
class UniSpeechForCTC(UniSpeechPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.unispeech = UniSpeechModel(config)
self.dropout = nn.Dropout(config.final_dropout)
if config.vocab_size is None:
raise ValueError(
f"You are trying to instantiate {self.__class__} with a configuration that "
"does not define the vocabulary size of the language model head. Please "
"instantiate the model as follows: `UniSpeechForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
"or define `vocab_size` of your model's configuration."
)
output_hidden_size = (
config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
)
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
self.unispeech.feature_extractor._freeze_parameters()
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_PROCESSOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_CTC_EXPECTED_OUTPUT,
expected_loss=_CTC_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, CausalLMOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
if labels.max() >= self.config.vocab_size:
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
# retrieve loss input_lengths from attention_mask
attention_mask = (
attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
)
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
# assuming that padded tokens are filled with -100
# when not being attended to
labels_mask = labels >= 0
target_lengths = labels_mask.sum(-1)
flattened_targets = labels.masked_select(labels_mask)
# ctc_loss doesn't support fp16
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = nn.functional.ctc_loss(
log_probs,
flattened_targets,
input_lengths,
target_lengths,
blank=self.config.pad_token_id,
reduction=self.config.ctc_loss_reduction,
zero_infinity=self.config.ctc_zero_infinity,
)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"""
UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
SUPERB Keyword Spotting.
""",
UNISPEECH_START_DOCSTRING,
)
class UniSpeechForSequenceClassification(UniSpeechPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of UniSpeech adapters (config.add_adapter=True)"
)
self.unispeech = UniSpeechModel(config)
num_layers = config.num_hidden_layers + 1
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
self.post_init()
def freeze_feature_extractor(self):
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
self.unispeech.feature_extractor._freeze_parameters()
def freeze_base_model(self):
for param in self.unispeech.parameters():
param.requires_grad = False
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_SEQ_CLASS_CHECKPOINT,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
hidden_states[~padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| true | true |
f72cde285309d6d399c8446c716859a2af197049 | 294 | py | Python | product/serializers.py | RKatana/inventory-app-django | a31614237daa5a2d62e30e51b9e573968ef3f0c0 | [
"Apache-2.0"
] | null | null | null | product/serializers.py | RKatana/inventory-app-django | a31614237daa5a2d62e30e51b9e573968ef3f0c0 | [
"Apache-2.0"
] | null | null | null | product/serializers.py | RKatana/inventory-app-django | a31614237daa5a2d62e30e51b9e573968ef3f0c0 | [
"Apache-2.0"
] | null | null | null | from rest_framework import serializers
from product.models import Product
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__'
class ProductListSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('store',)
| 21 | 57 | 0.785714 | from rest_framework import serializers
from product.models import Product
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__'
class ProductListSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('store',)
| true | true |
f72cde9e2a18d5047df425b607f0e92be3a3846e | 1,685 | py | Python | example/app/odnoklassniki.py | NorthIsUp/django-social-auth | 9afedc8ea777b32611d43c1c367babe2e3b18a90 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 863 | 2015-01-01T00:42:07.000Z | 2022-03-30T02:47:18.000Z | example/app/odnoklassniki.py | JohnieLee/django-social-auth | de36265a4799c435751d9af42ddf6fe7e7a90e0a | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 101 | 2015-01-08T00:28:16.000Z | 2022-03-07T03:11:19.000Z | example/app/odnoklassniki.py | JohnieLee/django-social-auth | de36265a4799c435751d9af42ddf6fe7e7a90e0a | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 256 | 2015-01-02T16:55:36.000Z | 2022-03-04T11:10:47.000Z | # -*- coding:utf-8 -*-
from django.conf import settings
from django.contrib.auth import BACKEND_SESSION_KEY, logout
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.shortcuts import redirect
from django.views.generic.base import TemplateView
from social_auth.views import complete
SANDBOX_URL = 'http://api-sandbox.odnoklassniki.ru:8088/sandbox/protected/application/launch.do?appId={0:s}&userId=0'
class OdnoklassnikiInfo(TemplateView):
template_name = 'odnoklassniki_info.html'
def get(self, *args, **kwargs):
if hasattr(settings, 'ODNOKLASSNIKI_APP_ID'):
return redirect(SANDBOX_URL.format(settings.ODNOKLASSNIKI_APP_ID))
else:
return super(OdnoklassnikiInfo, self).get(*args, **kwargs)
ok_app_info = OdnoklassnikiInfo.as_view()
class OdnoklassnikiApp(TemplateView):
template_name = 'odnoklassniki.html'
def get(self, request, *args, **kwargs):
result = None
if request.GET.get('apiconnection', None):
if request.user.is_authenticated() and 'OdnoklassnikiAppBackend' not in request.session.get(BACKEND_SESSION_KEY, ''):
logout(request)
result = complete(request, 'odnoklassnikiapp')
if isinstance(result, HttpResponse):
return result
else:
if not request.user.is_authenticated() or 'OdnoklassnikiAppBackend' not in request.session.get(BACKEND_SESSION_KEY, ''):
request.user = AnonymousUser()
context = self.get_context_data(params=kwargs)
return self.render_to_response(context)
ok_app = OdnoklassnikiApp.as_view() | 42.125 | 132 | 0.706231 |
from django.conf import settings
from django.contrib.auth import BACKEND_SESSION_KEY, logout
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.shortcuts import redirect
from django.views.generic.base import TemplateView
from social_auth.views import complete
SANDBOX_URL = 'http://api-sandbox.odnoklassniki.ru:8088/sandbox/protected/application/launch.do?appId={0:s}&userId=0'
class OdnoklassnikiInfo(TemplateView):
template_name = 'odnoklassniki_info.html'
def get(self, *args, **kwargs):
if hasattr(settings, 'ODNOKLASSNIKI_APP_ID'):
return redirect(SANDBOX_URL.format(settings.ODNOKLASSNIKI_APP_ID))
else:
return super(OdnoklassnikiInfo, self).get(*args, **kwargs)
ok_app_info = OdnoklassnikiInfo.as_view()
class OdnoklassnikiApp(TemplateView):
template_name = 'odnoklassniki.html'
def get(self, request, *args, **kwargs):
result = None
if request.GET.get('apiconnection', None):
if request.user.is_authenticated() and 'OdnoklassnikiAppBackend' not in request.session.get(BACKEND_SESSION_KEY, ''):
logout(request)
result = complete(request, 'odnoklassnikiapp')
if isinstance(result, HttpResponse):
return result
else:
if not request.user.is_authenticated() or 'OdnoklassnikiAppBackend' not in request.session.get(BACKEND_SESSION_KEY, ''):
request.user = AnonymousUser()
context = self.get_context_data(params=kwargs)
return self.render_to_response(context)
ok_app = OdnoklassnikiApp.as_view() | true | true |
f72cdf8621755d98f90f575e4d4b84f0878e736f | 973 | py | Python | setup.py | tjsego/simservice | 1ca1df4c6644f22217645575719cfa72f5b9f895 | [
"MIT"
] | 1 | 2021-08-08T03:15:47.000Z | 2021-08-08T03:15:47.000Z | setup.py | tjsego/simservice | 1ca1df4c6644f22217645575719cfa72f5b9f895 | [
"MIT"
] | null | null | null | setup.py | tjsego/simservice | 1ca1df4c6644f22217645575719cfa72f5b9f895 | [
"MIT"
] | null | null | null | import os
from setuptools import setup
__version__ = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'VERSION.txt')).readline().strip()
setup(
name='simservice',
version=__version__,
description='A library for building simulation services in Python',
url='https://github.com/tjsego/simservice',
author='T.J. Sego',
author_email='tjsego@iu.edu',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
packages=['simservice'],
package_dir={'simservice': '.'},
python_requires='>=3.6',
package_data={'simservice': ['LICENSE', 'VERSION.txt']}
)
| 29.484848 | 110 | 0.626927 | import os
from setuptools import setup
__version__ = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'VERSION.txt')).readline().strip()
setup(
name='simservice',
version=__version__,
description='A library for building simulation services in Python',
url='https://github.com/tjsego/simservice',
author='T.J. Sego',
author_email='tjsego@iu.edu',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
packages=['simservice'],
package_dir={'simservice': '.'},
python_requires='>=3.6',
package_data={'simservice': ['LICENSE', 'VERSION.txt']}
)
| true | true |
f72cdfc48dda33940d0a57929d4878e17ec7d72c | 1,102 | py | Python | exercicios-com-listas/exercicio12.py | diegolinkk/exercicios-python-brasil | 3bf7bdf0e98cdd06c115eedae4fa01e0c25fdba5 | [
"MIT"
] | null | null | null | exercicios-com-listas/exercicio12.py | diegolinkk/exercicios-python-brasil | 3bf7bdf0e98cdd06c115eedae4fa01e0c25fdba5 | [
"MIT"
] | null | null | null | exercicios-com-listas/exercicio12.py | diegolinkk/exercicios-python-brasil | 3bf7bdf0e98cdd06c115eedae4fa01e0c25fdba5 | [
"MIT"
] | null | null | null | #Foram anotadas as idades e infos de 30 alunos. Faça um Programa que determine quantos alunos com mais de 13 anos
# possuem altura inferior à média de altura desses alunos.
from random import random,randint
alunos = []
for _ in range(30):
altura = 0 #zera porque senão altura não entra no laço novamente
idade = randint(1,90)
#não permite infos abaixo dos 1.40
while altura < 1.40:
altura = random() * 2
aluno = []
aluno.append(round(altura,2))
aluno.append(idade)
alunos.append(aluno)
media_alturas = 0
for altura,idade in alunos:
media_alturas += altura
media_alturas = round((media_alturas / len(alunos)),2)
print(media_alturas)
alunos_13_abaixo_da_media = 0
for altura,idade in alunos:
if idade <= 13 and altura < media_alturas:
alunos_13_abaixo_da_media +=1
print(f"Altura: {altura} - idade {idade} *") #sinaliza qual aluno da lista está na condição com *
else:
print(f"Altura: {altura} - idade {idade}")
print(f"Alunos com 13 ou menos e altura abaixo da média: {alunos_13_abaixo_da_media}") | 25.627907 | 114 | 0.686025 |
from random import random,randint
alunos = []
for _ in range(30):
altura = 0
idade = randint(1,90)
while altura < 1.40:
altura = random() * 2
aluno = []
aluno.append(round(altura,2))
aluno.append(idade)
alunos.append(aluno)
media_alturas = 0
for altura,idade in alunos:
media_alturas += altura
media_alturas = round((media_alturas / len(alunos)),2)
print(media_alturas)
alunos_13_abaixo_da_media = 0
for altura,idade in alunos:
if idade <= 13 and altura < media_alturas:
alunos_13_abaixo_da_media +=1
print(f"Altura: {altura} - idade {idade} *")
else:
print(f"Altura: {altura} - idade {idade}")
print(f"Alunos com 13 ou menos e altura abaixo da média: {alunos_13_abaixo_da_media}") | true | true |
f72ce0eb629e9199e4a7dbe67896f97abd42dbec | 4,636 | py | Python | python/src/di_bq.py | cedadev/ceda-di | 5d7e21f28ead02d226c19f2831bc261897300b0f | [
"BSD-3-Clause-Clear"
] | 5 | 2015-04-17T08:52:34.000Z | 2020-07-02T13:32:41.000Z | python/src/di_bq.py | cedadev/ceda-di | 5d7e21f28ead02d226c19f2831bc261897300b0f | [
"BSD-3-Clause-Clear"
] | 14 | 2015-01-07T10:30:34.000Z | 2020-08-13T11:04:00.000Z | python/src/di_bq.py | cedadev/ceda-di | 5d7e21f28ead02d226c19f2831bc261897300b0f | [
"BSD-3-Clause-Clear"
] | 2 | 2016-01-27T11:31:34.000Z | 2017-05-18T13:37:18.000Z | #!/usr/bin/env python
"""
`di_bq.py` is a wrapper around the standard ceda-di tools to help parallelise
the processing of files using a batch queue.
This tool has two main functions:
* Generate lists of files to be processed in individual jobs by the queue
* Dispatch archive processing jobs to the batch queue
Usage:
di_bq.py (--help | --version)
di_bq.py gen-list <input-dir> <file-list-output-dir> [--num=<num>]
di_bq.py submit-jobs <dir-containing-file-lists> [--delete-after]
di_bq.py process <individual-file-list> [--delete-after]
Options:
--help Show this screen.
--version Show version.
--num=<num> Number of paths to store in each file [default: 5000].
--delete-after Delete input files after job submission.
"""
import json
import os
from docopt import docopt
from ceda_di import __version__ # Grab version from package __init__.py
from ceda_di.extract import Extract
import ceda_di.util.cmd as cmd
def dump_to_json(output_directory, seq, file_list):
"""
Write the object specified in "file_list" to a JSON file list.
:param str output_directory: The directory to write all of the files into.
:param int seq: The sequence number of the file.
:param list file_list: The list of files to serialise to JSON.
"""
out_name = "{seq}.json".format(seq=seq)
out_path = os.path.join(output_directory, out_name)
with open(out_path, "w") as out_f:
json.dump(file_list, out_f)
def construct_bsub_command(path, params={}):
# Mapping of "bsub" command parameters to what they mean
bsub_param = {
"stdout": "-o",
"stderr": "-e",
"num-cores": "-n",
"queue": "-q",
"walltime": "-W",
"jobname": "-J"
}
command = "bsub"
for k, v in params.items():
if k in bsub_param:
opt = " {option} {value}".format(option=bsub_param[k], value=v)
command += opt
command += "<<<"
# Multi-line string assignment here
srcdir = os.getcwd()
cedadir = "/".join(srcdir.split("/")[:-1]) # Get dir one level up
command += (
"\'" +
"cd {cedadir}\n".format(cedadir=cedadir) +
"source bin/activate\n" +
"cd {srcdir}\n".format(srcdir=srcdir) +
"python {script} process {path}".format(script=__file__, path=path) +
"\'"
)
return command
def bsub(path, config):
"""
Submit job to batch queue for processing.
"""
out = config["output-path"]
defaults = {
"stdout": os.path.join(out, "%J.o"),
"stderr": os.path.join(out, "%J.e"),
"num-cores": int(config["num-cores"]) + 1, # 1 extra core for main thread
"queue": config["batch-queue"],
"jobname": "ceda-di-{index}".format(index=config["es-index"])
}
bsub_script = construct_bsub_command(path, defaults)
os.system(bsub_script)
def main():
# Get arguments from command line
args = cmd.sanitise_args(docopt(__doc__, version=__version__))
if 'config' not in args or not args["config"]:
direc = os.path.dirname(__file__)
conf_path = os.path.join(direc, "../config/ceda_di.json")
args["config"] = conf_path
config = cmd.get_settings(args["config"], args)
if args["gen-list"]:
# Set up variables from command-line parameters
path = args["input-dir"]
output_directory = args["file-list-output-dir"]
max_files = int(args["num"])
seq = 0
# Begin sweeping for files
flist = []
for root, dirs, files in os.walk(path, followlinks=True):
for f in files:
fp = os.path.join(root, f)
flist.append(fp)
# Dump file paths to JSON document
if len(flist) >= max_files:
dump_to_json(output_directory, seq, flist)
seq += 1 # Increment file sequence number
flist = []
# Dump anything left over to JSON
dump_to_json(output_directory, seq, flist)
elif args["submit-jobs"]:
input_directory = args["dir-containing-file-lists"]
for root, dirs, files in os.walk(input_directory):
for f in files:
fp = os.path.join(root, f)
# Submit job to batch queue
bsub(fp, config)
elif args["process"]:
file_list = args["individual-file-list"]
with open(file_list, "r") as f:
files = json.load(f)
extract = Extract(config, files)
extract.run()
if __name__ == "__main__":
main()
| 30.906667 | 82 | 0.596204 |
import json
import os
from docopt import docopt
from ceda_di import __version__
from ceda_di.extract import Extract
import ceda_di.util.cmd as cmd
def dump_to_json(output_directory, seq, file_list):
out_name = "{seq}.json".format(seq=seq)
out_path = os.path.join(output_directory, out_name)
with open(out_path, "w") as out_f:
json.dump(file_list, out_f)
def construct_bsub_command(path, params={}):
bsub_param = {
"stdout": "-o",
"stderr": "-e",
"num-cores": "-n",
"queue": "-q",
"walltime": "-W",
"jobname": "-J"
}
command = "bsub"
for k, v in params.items():
if k in bsub_param:
opt = " {option} {value}".format(option=bsub_param[k], value=v)
command += opt
command += "<<<"
srcdir = os.getcwd()
cedadir = "/".join(srcdir.split("/")[:-1])
command += (
"\'" +
"cd {cedadir}\n".format(cedadir=cedadir) +
"source bin/activate\n" +
"cd {srcdir}\n".format(srcdir=srcdir) +
"python {script} process {path}".format(script=__file__, path=path) +
"\'"
)
return command
def bsub(path, config):
out = config["output-path"]
defaults = {
"stdout": os.path.join(out, "%J.o"),
"stderr": os.path.join(out, "%J.e"),
"num-cores": int(config["num-cores"]) + 1,
"queue": config["batch-queue"],
"jobname": "ceda-di-{index}".format(index=config["es-index"])
}
bsub_script = construct_bsub_command(path, defaults)
os.system(bsub_script)
def main():
args = cmd.sanitise_args(docopt(__doc__, version=__version__))
if 'config' not in args or not args["config"]:
direc = os.path.dirname(__file__)
conf_path = os.path.join(direc, "../config/ceda_di.json")
args["config"] = conf_path
config = cmd.get_settings(args["config"], args)
if args["gen-list"]:
path = args["input-dir"]
output_directory = args["file-list-output-dir"]
max_files = int(args["num"])
seq = 0
flist = []
for root, dirs, files in os.walk(path, followlinks=True):
for f in files:
fp = os.path.join(root, f)
flist.append(fp)
if len(flist) >= max_files:
dump_to_json(output_directory, seq, flist)
seq += 1
flist = []
dump_to_json(output_directory, seq, flist)
elif args["submit-jobs"]:
input_directory = args["dir-containing-file-lists"]
for root, dirs, files in os.walk(input_directory):
for f in files:
fp = os.path.join(root, f)
bsub(fp, config)
elif args["process"]:
file_list = args["individual-file-list"]
with open(file_list, "r") as f:
files = json.load(f)
extract = Extract(config, files)
extract.run()
if __name__ == "__main__":
main()
| true | true |
f72ce0efe3d0f872e2ed83a0a3cbfa7d5c6e9f5e | 9,361 | py | Python | layers/box_utils.py | sashuIya/ssd.pytorch | fe7d8722414fef4cce32f67422c896ef0c45d6bc | [
"MIT"
] | 1 | 2019-04-03T16:48:43.000Z | 2019-04-03T16:48:43.000Z | layers/box_utils.py | sashuIya/ssd.pytorch | fe7d8722414fef4cce32f67422c896ef0c45d6bc | [
"MIT"
] | null | null | null | layers/box_utils.py | sashuIya/ssd.pytorch | fe7d8722414fef4cce32f67422c896ef0c45d6bc | [
"MIT"
] | null | null | null | import torch
def point_form(boxes):
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin
boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
boxes[:, 2:] - boxes[:, :2], 1) # w, h
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
def match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):
"""Match each prior box with the ground truth box of the highest jaccard
overlap, encode the bounding boxes, then return the matched indices
corresponding to both confidence and location preds.
Args:
threshold: (float) The overlap threshold used when mathing boxes.
truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].
priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
variances: (tensor) Variances corresponding to each prior coord,
Shape: [num_priors, 4].
labels: (tensor) All the class labels for the image, Shape: [num_obj].
loc_t: (tensor) Tensor to be filled w/ endcoded location targets.
conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
idx: (int) current batch index
Return:
The matched indices corresponding to 1)location and 2)confidence preds.
"""
# jaccard index
overlaps = jaccard(
truths,
point_form(priors)
)
# (Bipartite Matching)
# [1,num_objects] best prior for each ground truth
best_prior_overlap, best_prior_idx = overlaps.max(1)
# [1,num_priors] best ground truth for each prior
best_truth_overlap, best_truth_idx = overlaps.max(0)
best_truth_idx.squeeze_(0)
best_truth_overlap.squeeze_(0)
best_prior_idx.squeeze_(1)
best_prior_overlap.squeeze_(1)
best_truth_overlap.index_fill_(0, best_prior_idx, 2) # ensure best prior
# TODO refactor: index best_prior_idx with long tensor
# ensure every gt matches with its prior of max overlap
for j in range(best_prior_idx.size(0)):
best_truth_idx[best_prior_idx[j]] = j
matches = truths[best_truth_idx] # Shape: [num_priors,4]
conf = labels[best_truth_idx] + 1 # Shape: [num_priors]
conf[best_truth_overlap < threshold] = 0 # label as background
loc = encode(matches, priors, variances)
loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
conf_t[idx] = conf # [num_priors] top class label for each prior
def encode(matched, priors, variances):
"""Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 4].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded boxes (tensor), Shape: [num_priors, 4]
"""
# dist b/t match center and prior's center
g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
# return target for smooth_l1_loss
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
# Adapted from https://github.com/Hakuyume/chainer-ssd
def decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def log_sum_exp(x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x-x_max), 1)) + x_max
# Original author: Francisco Massa:
# https://github.com/fmassa/object-detection.torch
# Ported to PyTorch by Max deGroot (02/01/2017)
def nms(boxes, scores, overlap=0.5, top_k=200):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.
"""
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
# I = I[v >= 0.01]
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = torch.Tensor()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter/union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
| 39.167364 | 80 | 0.609443 | import torch
def point_form(boxes):
return torch.cat((boxes[:, :2] - boxes[:, 2:]/2,
boxes[:, :2] + boxes[:, 2:]/2), 1)
def center_size(boxes):
return torch.cat((boxes[:, 2:] + boxes[:, :2])/2,
boxes[:, 2:] - boxes[:, :2], 1)
def intersect(box_a, box_b):
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b):
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter)
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter)
union = area_a + area_b - inter
return inter / union
def match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):
overlaps = jaccard(
truths,
point_form(priors)
)
best_prior_overlap, best_prior_idx = overlaps.max(1)
best_truth_overlap, best_truth_idx = overlaps.max(0)
best_truth_idx.squeeze_(0)
best_truth_overlap.squeeze_(0)
best_prior_idx.squeeze_(1)
best_prior_overlap.squeeze_(1)
best_truth_overlap.index_fill_(0, best_prior_idx, 2)
for j in range(best_prior_idx.size(0)):
best_truth_idx[best_prior_idx[j]] = j
matches = truths[best_truth_idx]
conf = labels[best_truth_idx] + 1
conf[best_truth_overlap < threshold] = 0
loc = encode(matches, priors, variances)
loc_t[idx] = loc
conf_t[idx] = conf
def encode(matched, priors, variances):
g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
# return target for smooth_l1_loss
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
# Adapted from https://github.com/Hakuyume/chainer-ssd
def decode(loc, priors, variances):
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def log_sum_exp(x):
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x-x_max), 1)) + x_max
# Original author: Francisco Massa:
# https://github.com/fmassa/object-detection.torch
# Ported to PyTorch by Max deGroot (02/01/2017)
def nms(boxes, scores, overlap=0.5, top_k=200):
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
# I = I[v >= 0.01]
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = torch.Tensor()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter/union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
| true | true |
f72ce180f1675a5345ae733b6da480edb5ada453 | 1,749 | py | Python | test_package/conanfile.py | Twon/units | 7f64e55d044c8a8d9a5c6d4e4f55167409910749 | [
"MIT"
] | null | null | null | test_package/conanfile.py | Twon/units | 7f64e55d044c8a8d9a5c6d4e4f55167409910749 | [
"MIT"
] | null | null | null | test_package/conanfile.py | Twon/units | 7f64e55d044c8a8d9a5c6d4e4f55167409910749 | [
"MIT"
] | null | null | null | # The MIT License (MIT)
#
# Copyright (c) 2018 Mateusz Pusz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from conans import ConanFile, tools, RunEnvironment
from conan.tools.cmake import CMakeToolchain, CMake, CMakeDeps
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
def generate(self):
tc = CMakeToolchain(self, generator=os.getenv("CONAN_CMAKE_GENERATOR"))
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
self.run("test_package", run_environment=True)
| 39.75 | 80 | 0.732419 |
from conans import ConanFile, tools, RunEnvironment
from conan.tools.cmake import CMakeToolchain, CMake, CMakeDeps
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
def generate(self):
tc = CMakeToolchain(self, generator=os.getenv("CONAN_CMAKE_GENERATOR"))
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
self.run("test_package", run_environment=True)
| true | true |
f72ce1a65a7b1d2ab102ec79f285ac7ea3eedaeb | 49,449 | py | Python | coremltools/converters/mil/frontend/torch/test/test_torch_ops.py | freedomtan/coremltools | 5ee9b537b81c44c140a2fa7571e547dfaa24e1ea | [
"BSD-3-Clause"
] | 1 | 2020-12-23T15:42:01.000Z | 2020-12-23T15:42:01.000Z | coremltools/converters/mil/frontend/torch/test/test_torch_ops.py | freedomtan/coremltools | 5ee9b537b81c44c140a2fa7571e547dfaa24e1ea | [
"BSD-3-Clause"
] | null | null | null | coremltools/converters/mil/frontend/torch/test/test_torch_ops.py | freedomtan/coremltools | 5ee9b537b81c44c140a2fa7571e547dfaa24e1ea | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import sys
from coremltools.models.utils import _python_version
from coremltools.models.utils import _macos_version
from coremltools.converters.mil import testing_reqs
from coremltools.converters.mil.testing_reqs import *
from .testing_utils import *
from coremltools import TensorType, ImageType, RangeDim
backends = testing_reqs.backends
torch = pytest.importorskip("torch")
pytestmark = pytest.mark.skipif(
sys.version_info >= (3, 8), reason="Segfault with Python 3.8+"
) # rdar://problem/65730375
class TestArgSort:
@pytest.mark.parametrize(
"rank, axis, descending, backend",
itertools.product(
[rank for rank in range(1, 6)],
[-1, 0],
[True, False],
backends
)
)
def test_argsort(self, rank, axis, descending, backend):
shape = tuple(np.random.randint(low=1, high=4, size=rank))
model = ModuleWrapper(
function=torch.argsort, kwargs={"dim": axis, "descending": descending}
)
run_compare_torch(shape, model, backend=backend)
class TestBatchNorm:
@pytest.mark.parametrize(
"num_features, eps, backend",
itertools.product([5, 3, 2, 1], [0.1, 1e-05, 1e-9], backends),
)
def test_batchnorm(self, num_features, eps, backend):
model = nn.BatchNorm2d(num_features, eps)
run_compare_torch((6, num_features, 5, 5), model, backend=backend)
@pytest.mark.parametrize("backend", backends)
def test_batchnorm_1d(self, backend):
class CRNNBase(nn.Module):
def __init__(self, ch_in, ch_out, kernel_size=3, use_bn=True):
super(CRNNBase, self).__init__()
self.conv = nn.Conv1d(ch_in, ch_out, kernel_size=kernel_size)
self.norm = nn.BatchNorm1d(ch_out)
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
model = CRNNBase(ch_in=6, ch_out=16)
run_compare_torch((1, 6, 15), model, backend=backend)
class TestInstanceNorm:
@pytest.mark.parametrize(
"num_features, eps, backend",
itertools.product([5, 3, 2, 1], [0.1, 1e-05, 1e-09], backends),
)
def test_instancenorm(self, num_features, eps, backend):
if backend == "nn_proto" and eps == 1e-09:
return
model = nn.InstanceNorm2d(num_features, eps)
run_compare_torch((6, num_features, 5, 5), model, backend=backend)
class TestGroupNorm:
@pytest.mark.parametrize(
"group_features, eps,affine, backend",
itertools.product([(16,32), (32,64), (1,1)], [0.1, 1e-05, 1e-09],[True, False], backends),
)
def test_groupnorm(self, group_features, eps, affine, backend):
if backend == "nn_proto" and eps == 1e-09:
return
model = nn.GroupNorm(group_features[0],group_features[1], eps=eps, affine=affine)
run_compare_torch((6, group_features[1], 5, 5), model, backend=backend)
class TestLinear:
@pytest.mark.parametrize(
"in_features, out_features, backend",
itertools.product([10, 25, 100], [3, 6], backends),
)
def test_addmm(self, in_features, out_features, backend):
model = nn.Linear(in_features, out_features)
run_compare_torch((1, in_features), model, backend=backend)
@pytest.mark.parametrize(
"in_features, out_features, backend",
itertools.product([5], [10], backends),
)
def test_linear_rank1_input(self, in_features, out_features, backend):
model = nn.Linear(in_features, out_features)
run_compare_torch((in_features,), model, backend=backend)
class TestConv:
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [1, 3], [1, 3], [1, 3], backends
),
)
def test_convolution2d(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
run_compare_torch((1, in_channels, height, width), model, backend=backend)
class TestConvTranspose:
@pytest.mark.parametrize(
"width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends
),
)
def test_convolution_transpose1d(
self,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups
)
run_compare_torch((1, in_channels, width), model, backend=backend)
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends
),
)
def test_convolution_transpose2d(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
run_compare_torch((1, in_channels, height, width), model, backend=backend)
# TODO: rdar://65588783 ([PyTorch] Define and error out on unsupported configuration for output_padding)
# TODO: rdar://65550420 (Add Image Resizing (crop, upsample, resize_bilinear) layers to the MIL backend)
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, output_padding, backend",
list(
itertools.product(
[10],
[10],
[1, 3],
[1, 3],
[1, 3],
[1, 2, 3],
[1, 3],
[1, 2],
[1, 2, (1, 2)],
["nn_proto"],
)
)
+ [
pytest.param(
5, 5, 1, 1, 3, 4, 1, 1, 2, "nn_proto", marks=pytest.mark.xfail
),
pytest.param(
5, 5, 1, 1, 3, 2, 1, 3, 2, "nn_proto", marks=pytest.mark.xfail
),
],
)
def test_convolution_transpose2d_output_padding(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
output_padding,
backend,
groups=1,
):
# Output padding must be less than either stride or dilation
# Skip testing invalid combinations
if isinstance(output_padding, int):
if output_padding >= stride and output_padding >= dilation:
return
elif isinstance(output_padding, tuple):
for _output_padding in output_padding:
if _output_padding >= stride and _output_padding >= dilation:
return
model = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
)
run_compare_torch((1, in_channels, height, width), model, backend=backend)
@pytest.mark.parametrize(
"depth, height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[3, 4], [5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends
),
)
@pytest.mark.skip(reason="rdar://65198011 (Re-enable Conv3dTranspose and DynamicTile unit tests)")
def test_convolution_transpose3d(
self,
depth,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
):
model = nn.ConvTranspose3d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
run_compare_torch((1, in_channels, depth, height, width), model, backend=backend)
class TestCond:
@pytest.mark.parametrize("backend", backends)
def test_cond(self, backend):
in_features = 1
out_features = 2
class TestNet(nn.Module):
def forward(self, x):
if torch.squeeze(x) < 10.:
return x*10.
else:
return x*2.
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(torch.tensor([1.]), torch_model,
input_as_shape=False, backend=backend)
run_compare_torch(torch.tensor([11.]), torch_model,
input_as_shape=False, backend=backend)
class TestLoop:
@pytest.mark.parametrize("backend", backends)
def test_for_loop(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = 2.0 * x
return x
class TestNet(nn.Module):
input_size = (64,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
for _ in range(7):
x = self.layer(x)
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(model.input_size, torch_model, backend=backend)
@pytest.mark.parametrize("backend", backends)
def test_while_loop(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = 0.5 * x
return x
class TestNet(nn.Module):
input_size = (1,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
while x > 0.01:
x = self.layer(x)
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(model.input_size, torch_model, backend=backend)
class TestUpsample:
@pytest.mark.parametrize(
"output_size, align_corners, backend",
[
x
for x in itertools.product(
[(10, 10), (1, 1), (20, 20), (2, 3), (190, 170)],
[True, False],
backends,
)
],
)
def test_upsample_bilinear2d_with_output_size(
self, output_size, align_corners, backend
):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{"size": output_size, "mode": "bilinear", "align_corners": align_corners,},
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"scales_h, scales_w, align_corners, backend",
[
x
for x in itertools.product(
[2, 3, 4.5], [4, 5, 5.5], [True, False], backends
)
],
)
def test_upsample_bilinear2d_with_scales(
self, scales_h, scales_w, align_corners, backend
):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{
"scale_factor": (scales_h, scales_w),
"mode": "bilinear",
"align_corners": align_corners,
},
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"output_size, backend",
[
x
for x in itertools.product(
[(10, 10), (30, 20), (20, 20), (20, 30), (190, 170)], backends
)
],
)
def test_upsample_nearest2d_with_output_size(self, output_size, backend):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate, {"size": output_size, "mode": "nearest"},
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"scales_h, scales_w, backend",
[x for x in itertools.product([2, 3, 5], [4, 5, 2], backends)],
)
def test_upsample_nearest2d_with_scales(self, scales_h, scales_w, backend):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{"scale_factor": (scales_h, scales_w), "mode": "nearest",},
)
run_compare_torch(input_shape, model, backend=backend)
class TestBranch:
@pytest.mark.parametrize("backend", backends)
def test_if(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = torch.mean(x)
return x
class TestNet(nn.Module):
input_size = (64,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
m = self.layer(x)
if m < 0:
scale = -2.0
else:
scale = 2.0
x = scale * x
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(model.input_size, torch_model, backend=backend)
class TestAvgPool:
# rdar://66066001 (PyTorch converter: enable ceil_mode=True tests for pooling ops)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
itertools.product(
[(1, 3, 15), (1, 1, 7), (1, 3, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
[True, False],
backends,
),
)
def test_avg_pool1d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
model = nn.AvgPool1d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
itertools.product(
[(1, 3, 15, 15), (1, 1, 7, 7), (1, 3, 10, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
[True, False],
backends,
),
)
def test_avg_pool2d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
model = nn.AvgPool2d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
itertools.product(
[(1, 3, 11, 3, 11), (1, 1, 7, 4, 7), (1, 3, 6, 6, 3)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
[True, False],
backends,
),
)
def test_avg_pool3d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
model = nn.AvgPool3d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
run_compare_torch(input_shape, model, backend=backend)
class TestAdaptiveMaxPool:
@pytest.mark.parametrize(
"output_size, magnification, delta, depth, backend",
itertools.product(
[(1,1), (3,2),(3,6),(32,32)],
[1,2,4,5,6,7],
[0,11],
[1,2,3],
backends,
),
)
def test_adaptive_max_pool2d(
self, output_size, magnification, delta, depth, backend
):
# input_size = output_size * magnification + delta
input_size = (delta + magnification * output_size[0], delta + magnification * output_size[1])
# since coremltools reproduces PyTorch's kernel sizes and
# offsets for adaptive pooling layers only when input_size is
# a multiple of output_size, we expect failures otherwise
if not (input_size[0] % output_size[0] == 0 and input_size[1] % output_size[1] == 0):
pytest.xfail("Test should fail because input_size is not a multiple of output_size")
n = 1
in_shape = (n,depth) + input_size
model = nn.AdaptiveMaxPool2d(
output_size
)
run_compare_torch(in_shape, model, backend=backend)
class TestMaxPool:
# rdar://66066001 (PyTorch converter: enable ceil_mode=True tests for pooling ops)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 15), (1, 1, 7), (1, 3, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
backends,
),
)
def test_max_pool1d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
model = nn.MaxPool1d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 15, 15), (1, 1, 7, 7), (1, 3, 10, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
backends,
),
)
def test_max_pool2d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
model = nn.MaxPool2d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 11, 3, 11), (1, 1, 7, 4, 7), (1, 3, 6, 6, 3)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
backends,
),
)
def test_max_pool3d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
model = nn.MaxPool3d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
run_compare_torch(input_shape, model, backend=backend)
class TestLSTM:
def _pytorch_hidden_to_coreml(self, x):
# Split of Direction axis
f, b = torch.split(x, [1] * x.shape[0], dim=0)
# Concat on Hidden Size axis
x = torch.cat((f, b), dim=2)
# NOTE:
# We are omitting a squeeze because the conversion
# function for the mil op lstm unsqueezes the num_layers
# dimension
return x
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
itertools.product(
[7], [5], [1], [True, False], [False], [0.3], [True, False], backends
),
)
def test_lstm(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
model = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
)
SEQUENCE_LENGTH = 3
BATCH_SIZE = 2
num_directions = int(bidirectional) + 1
# (seq_len, batch, input_size)
if batch_first:
_input = torch.rand(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size)
c0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size)
inputs = (_input, (h0, c0))
expected_results = model(*inputs)
# Need to do some output reshaping if bidirectional
if bidirectional:
ex_hn = self._pytorch_hidden_to_coreml(expected_results[1][0])
ex_cn = self._pytorch_hidden_to_coreml(expected_results[1][1])
expected_results = (expected_results[0], (ex_hn, ex_cn))
run_compare_torch(
inputs, model, expected_results, input_as_shape=False, backend=backend
)
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
[
(7, 3, 2, True, True, 0.3, True, list(backends)[-1]),
(7, 3, 2, False, False, 0.3, False, list(backends)[0]),
],
)
def test_lstm_xexception(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
with pytest.raises(ValueError):
self.test_lstm(
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend=backend,
)
# Workaround for GitHub Issue #824
# i.e. the return h_n/c_n for a converted BLSTM are mangled.
# Therefore, just look at output 'y' (for now) which is correct.
class StripCellAndHidden(nn.Module):
def __init__(self,flagReturnTuple_):
super(StripCellAndHidden, self).__init__()
self.flagReturnTuple = flagReturnTuple_
def forward(self,x):
# Pass tuple, not tensor, to avoid issue in coremltools/converters/mil/frontend/torch/test/testing_utils.py on "if not expected_results:"
# Pass tensor when we need input for LSTM #2 as part of nn.Sequential()
return tuple(x[0]) if self.flagReturnTuple else x[0]
# Check GitHub Issue #810, assume num_layers == 2 and bidirectional == True
class TestStackedBLSTM:
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
itertools.product([7], [5], [2], [True, False], [True, False], [0.3], [True], backends),
)
def test_lstm(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
model = nn.Sequential(
nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=True),
StripCellAndHidden(False),
nn.LSTM(
input_size=2*hidden_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=True),
StripCellAndHidden(True)
)
SEQUENCE_LENGTH = 3
BATCH_SIZE = 2
num_directions = int(bidirectional) + 1
# (seq_len, batch, input_size)
if batch_first:
_input = torch.rand(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
# Do not use h_0/c_0 input and do not check h_n/c_n output, GitHub Issue #824
expected_results = model(_input)
run_compare_torch(_input, model, expected_results, input_as_shape=False, backend=backend)
class TestConcat:
# This tests an edge case where the list of tensors to concatenate only
# has one item. NN throws an error for this case, hence why we have to
# run through the full conversion process to test it.
@pytest.mark.parametrize("backend", backends)
def test_cat(self, backend):
class TestNet(nn.Module):
def __init__(self):
super(TestNet, self).__init__()
def forward(self, x):
x = torch.cat((x,), axis=1)
return x
model = TestNet()
run_compare_torch((1, 3, 16, 16), model, backend=backend)
class TestReduction:
@pytest.mark.parametrize(
"input_shape, dim, keepdim, backend",
itertools.product([(2, 2), (1, 1)], [0, 1], [True, False], backends),
)
def test_max(self, input_shape, dim, keepdim, backend):
class TestMax(nn.Module):
def __init__(self):
super(TestMax, self).__init__()
def forward(self, x):
return torch.max(x, dim=dim, keepdim=keepdim)
input_data = torch.rand(input_shape)
model = TestMax()
# TODO: Expected results are flipped due to naming issue:
# rdar://62681982 (Determine the output names of MLModels)
expected_results = model(input_data)[::-1]
run_compare_torch(
input_data,
model,
expected_results=expected_results,
input_as_shape=False,
backend=backend,
)
class TestLayerNorm:
@pytest.mark.parametrize(
"input_shape, eps, backend",
itertools.product([(1, 3, 15, 15), (1, 1, 1, 1)], [1e-5, 1e-9], backends),
)
def test_layer_norm(self, input_shape, eps, backend):
model = nn.LayerNorm(input_shape, eps=eps)
run_compare_torch(input_shape, model, backend=backend)
class TestPixelShuffle:
@pytest.mark.parametrize(
"batch_size, CHW, r, backend",
itertools.product([1, 3], [(1, 4, 4), (3, 2, 3)], [2, 4], backends),
)
def test_pixel_shuffle(self, batch_size, CHW, r, backend):
C, H, W = CHW
input_shape = (batch_size, C * r * r, H, W)
model = nn.PixelShuffle(upscale_factor=r)
run_compare_torch(input_shape, model, backend=backend)
class TestExpand:
@pytest.mark.parametrize(
"backend, shapes",
itertools.product(
backends,
[[(2, 1), (2, 2)], [(3, 1), (-1, 4)], [(1, 3, 4, 4), (3, 3, 4, 4)]]
),
)
def test_expand(self, backend, shapes):
input_shape, output_shape = shapes
class TestModel(torch.nn.Module):
def forward(self, x):
return x.expand(*output_shape)
model = TestModel()
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"backend, input_shapes",
itertools.product(
backends,
[[(2, 1), (2, 2)], [(3, 1), (3, 4)], [(1, 3, 4, 4), (3, 3, 4, 4)]]
),
)
def test_expand_as(self, backend, input_shapes):
class TestModel(torch.nn.Module):
def forward(self, x, y):
return x.expand_as(y)
model = TestModel()
run_compare_torch(input_shapes, model, backend=backend)
class TestExpandDims:
@pytest.mark.parametrize(
"backend, rank_and_axis",
itertools.product(
backends,
[
(rank, axis)
for rank in range(1, 5)
for axis in range(-rank - 1, rank + 1)
],
),
)
def test_unsqueeze(self, backend, rank_and_axis):
rank, axis = rank_and_axis
input_shape = tuple(np.random.randint(low=2, high=10, size=rank))
model = ModuleWrapper(function=torch.unsqueeze, kwargs={"dim": axis})
run_compare_torch(input_shape, model, backend=backend)
class TestSqueeze:
@pytest.mark.parametrize(
"backend, rank_and_axis",
itertools.product(
backends,
[(2, 1), (2, 0), (3, 1), (3, None), (4, None), (4, 2), (5, None), (5, -1),],
),
)
def test_squeeze(self, backend, rank_and_axis):
rank, axis = rank_and_axis
input_shape = list(np.random.randint(low=2, high=10, size=rank))
if axis is not None:
input_shape[axis] = 1
else:
input_shape[0] = 1
input_shape = tuple(input_shape)
model = ModuleWrapper(
function=torch.squeeze, kwargs={"dim": axis} if axis else {}
)
run_compare_torch(input_shape, model, backend=backend)
class TestCumSum:
@pytest.mark.parametrize(
"backend, axis",
itertools.product(
backends,
[-1, 0, 1, 2, 3],
),
)
def test_cumsum(self, backend, axis):
input_shape = list(np.random.randint(low=2, high=10, size=4))
input_shape = tuple(input_shape)
model = ModuleWrapper(
function=torch.cumsum, kwargs={"dim": axis}
)
run_compare_torch(input_shape, model, backend=backend)
class TestReshape:
# TODO: <rdar://66239973> Add dynamic & rank preserving reshape tests for pytorch
@pytest.mark.parametrize(
"backend, output_shape",
itertools.product(backends, [(3, 2), (2, -1), (2, 1, 1, 3),],),
)
def test_reshape(self, backend, output_shape):
input_shape = (2, 3)
model = ModuleWrapper(function=torch.reshape, kwargs={"shape": output_shape})
run_compare_torch(input_shape, model, backend=backend)
class TestFlatten:
@pytest.mark.parametrize(
"backend, start_dim",
itertools.product(backends, [2,-2],),
)
def test_reshape(self, backend, start_dim):
input_shape = (2, 3, 4, 5)
model = ModuleWrapper(function=torch.flatten, kwargs={"start_dim": start_dim})
run_compare_torch(input_shape, model, backend=backend)
class TestGather:
@pytest.mark.xfail(
reason="Load constant not copied properly for integer valued constants. Enable after eng/PR-65551506 is merged",
run=False,
)
@pytest.mark.parametrize(
"rank_and_axis, backend",
itertools.product([(i, j) for i in range(1, 6) for j in range(0, i)], backends),
)
def test_gather_along_axis(self, rank_and_axis, backend):
rank, axis = rank_and_axis
params_shape = np.random.randint(low=2, high=5, size=rank)
indices_shape = np.copy(params_shape)
indices_shape[axis] = np.random.randint(low=1, high=8)
indices = np.random.randint(0, params_shape[axis], size=indices_shape)
params_shape, indices_shape = tuple(params_shape), tuple(indices_shape)
model = ModuleWrapper(
function=torch.gather,
kwargs={"dim": axis, "index": torch.from_numpy(indices)},
)
run_compare_torch([params_shape], model, backend=backend)
class TestActivation:
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_relu(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.ReLU().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.relu_)
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_relu6(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.ReLU6().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, alpha", itertools.product(backends, [0.1, 0.25, 2.0]),
)
def test_prelu(self, backend, alpha):
input_shape = tuple(np.random.randint(low=5, high=10, size=4))
C = input_shape[1]
model = nn.PReLU(C, alpha).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, alpha",
itertools.product(backends, range(1, 6), [0.1, 2.0, 1.5]),
)
def test_leaky_relu(self, backend, rank, alpha):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.LeakyReLU(negative_slope=alpha).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.leaky_relu_, {'negative_slope': alpha})
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_softmax(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Softmax().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, range_val",
itertools.product(
backends, range(1, 6), [(-1.0, 1.0), (0.0, 0.1), (1.0, 3.0), (-1.0, 6.0)]
),
)
def test_hardtanh(self, backend, rank, range_val):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Hardtanh(range_val[0], range_val[1]).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.hardtanh_,
{'min_val': range_val[0], 'max_val': range_val[1]})
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, alpha",
itertools.product(backends, range(1, 6), [0.1, 2.0, 1.5]),
)
def test_elu(self, backend, rank, alpha):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.ELU(alpha).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
# rdar://problem/66557565
@pytest.mark.parametrize(
"backend, rank", itertools.product(['nn_proto'], range(1, 6)),
)
def test_gelu(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.GELU().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_erf(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
class ERFActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.erf(x)
model = ERFActivation().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_sigmoid(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Sigmoid().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_sigmoid_hard(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Hardsigmoid().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, beta, threshold", itertools.product(backends, [1, 2, 5], [5, 10, 20]),
)
@pytest.mark.skipif(
_macos_version() <= (11,),
reason="Parametric SoftPlus segfaults on macOS 10.15 and below. (rdar://problem/66555235)",
)
def test_softplus(self, backend, beta, threshold):
input_shape = (1, 10, 5, 15)
model = nn.Softplus(beta, threshold).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
# rdar://problem/66557565
@pytest.mark.parametrize(
"backend, rank", itertools.product(['nn_proto'], range(1, 6)),
)
def test_softsign(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Softsign().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
class TestElementWiseUnary:
@pytest.mark.parametrize(
"backend, rank, op_string",
itertools.product(
backends,
[4],
[
"abs",
"acos",
"asin",
"atan",
"ceil",
"cos",
"cosh",
"exp",
"floor",
"round",
"sin",
"sinh",
"sqrt",
"square",
"tan",
"tanh",
"sign",
],
),
)
def test_elementwise_no_params(self, backend, rank, op_string):
if not contains_op(torch, op_string):
return
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
op_func = getattr(torch, op_string)
model = ModuleWrapper(function=op_func)
run_compare_torch(
input_shape, model, backend=backend,
)
## TODO (rdar://66577921): Needs to move to test_elementwise_no_params after backend is added
@pytest.mark.parametrize(
"backend, rank",
itertools.product(
['nn_proto'],
[4],
),
)
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_square(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = ModuleWrapper(function=torch.square)
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, clamp_range",
itertools.product(
backends,
[4],
[(0.0, 1.0), (-1.0, 0.5), (0.2, 0.7)],
),
)
def test_clamp(self, backend, rank, clamp_range):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = ModuleWrapper(torch.clamp, {'min': clamp_range[0], 'max': clamp_range[1]})
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, threshold",
itertools.product(
['nn_proto'], # rdar://66597974 Renable for all backends due to missing cast
[4],
[(0.0, 0.0), (0.5, 0.5), (0.5, 10), (0.9, 0.0)]
),
)
def test_threshold(self, backend, rank, threshold):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = torch.nn.Threshold(threshold[0], threshold[1]).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, op_string",
itertools.product(
backends,
[4],
[
"log",
"rsqrt",
"reciprocal",
],
),
)
def test_elementwise_numerically_stable(self, backend, rank, op_string):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
op_func = getattr(torch, op_string)
model = ModuleWrapper(function=op_func)
run_compare_torch(
input_shape, model, backend=backend, rand_range=(20, 100)
)
class TestMatMul:
@pytest.mark.parametrize("backend", backends)
def test_bmm(self, backend):
shape_x, shape_y = (3,4,5), (3,5,6)
model = ModuleWrapper(function=torch.bmm)
run_compare_torch(
[shape_x, shape_y], model, backend=backend,
)
class TestSplit:
@pytest.mark.parametrize(
"backend, split_size_or_sections, dim",
itertools.product(backends, [1, 2, [1, 4]], [0, -2]),
)
def test_split(self, backend, split_size_or_sections, dim):
input_shape = (5, 2)
model = ModuleWrapper(function=torch.split,
kwargs={"split_size_or_sections": split_size_or_sections, "dim": dim})
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"backend, split_sizes, dim",
itertools.product(backends, [[1, 4], [3, 2]], [-1, -2]),
)
def test_split_with_sizes(self, backend, split_sizes, dim):
input_shape = (5, 5)
model = ModuleWrapper(function=torch.split_with_sizes,
kwargs={"split_sizes": split_sizes, "dim": dim})
run_compare_torch(input_shape, model, backend=backend)
class TestUnbind:
@pytest.mark.parametrize(
"backend, dim",
itertools.product(backends,[0,1,2]),
)
def test_unbind(self, backend, dim):
input_shape = (3, 3, 4)
model = ModuleWrapper(function=torch.unbind,
kwargs={"dim": dim})
run_compare_torch(input_shape, model, backend=backend)
class TestTranspose:
@pytest.mark.parametrize(
"backend, rank, dims",
itertools.product(backends, list(range(2, 6)),
[(0, 1), (-2, -1), (1, 0), (-1, -2)]),
)
def test(self, backend, rank, dims):
input_shape = tuple(np.random.randint(low=1, high=4, size=rank))
model = ModuleWrapper(function=torch.transpose,
kwargs={"dim0": dims[0], "dim1": dims[1]})
run_compare_torch(input_shape, model, backend=backend)
class TestTo:
@pytest.mark.parametrize(
"backend", backends,
)
def test_cast_bug(self, backend):
class TestModel(torch.nn.Module):
def forward(self, spans, embedding):
spans = spans.float().relu().int()
max1, _ = torch.max(spans, dim=1, keepdim=False)
max1, _ = torch.max(max1, dim=1, keepdim=False)
max2, _ = torch.max(embedding, dim=1, keepdim=False)
max2, _ = torch.max(max2, dim=1, keepdim=False)
sigmoided_scores = max1 + max2
return sigmoided_scores
model = TestModel()
run_compare_torch([(1, 21, 2), (1, 6, 384)], model, backend=backend)# [spans.shape, embedding.shape]
class TestSlice:
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend", backends,
)
def test_dynamic_slice(self, backend):
class DynamicSlicer(torch.nn.Module):
def __init__(self):
super(DynamicSlicer, self).__init__()
def forward(self, x, context_length):
return x[context_length:, :, :]
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.tokens_embedding = torch.nn.Embedding(10, 10, 0)
self.context_embedding = torch.nn.Embedding(10, 10, 0)
self.dynamic_slicer = DynamicSlicer()
def forward(self, tokens, context, context_length):
tokens_embeddings = self.tokens_embedding(tokens)
context_embeddings = self.context_embedding(context)
embeddings = torch.cat((context_embeddings, tokens_embeddings), dim=0)
embeddings = self.dynamic_slicer(embeddings, context_length)
return embeddings
model = Model()
batch_size = 5
inputs = [ TensorType(name="tokens", shape=(10, batch_size), dtype=np.int64),
TensorType(name="context", shape=(3, batch_size), dtype=np.int64),
TensorType(name="context_length", shape=(), dtype=np.int32),
]
run_compare_torch(inputs, model, rand_range=(0, 8), backend=backend, use_scripting=False)
class TestRepeat:
@pytest.mark.parametrize(
"backend, rank",
itertools.product(backends, list(range(1, 6))),
)
def test_repeat(self, backend, rank):
input_shape = np.random.randint(low=2, high=6, size=rank)
repeats = np.random.randint(low=2, high=4, size=rank)
input_shape = tuple(input_shape)
model = ModuleWrapper(function=lambda x: x.repeat(*repeats))
run_compare_torch(input_shape, model, backend=backend)
class TestStd:
@pytest.mark.parametrize(
"backend, unbiased",
itertools.product(backends, [True, False]),
)
def test_std_2_inputs(self, backend, unbiased):
model = ModuleWrapper(function=torch.std,
kwargs={"unbiased": unbiased})
x = torch.randn(1, 5, 10) * 3
out = torch.std(x, unbiased=unbiased).unsqueeze(0)
run_compare_torch(x, model, expected_results=out, input_as_shape=False, backend=backend)
@pytest.mark.parametrize(
"backend, unbiased, dim, keepdim",
itertools.product(backends, [True, False], [[0,2], [1], [2]], [True, False]),
)
def test_std_4_inputs(self, backend, unbiased, dim, keepdim):
model = ModuleWrapper(function=torch.std,
kwargs={"unbiased": unbiased, "dim" : dim, "keepdim": keepdim})
input_shape = (2, 5, 10)
run_compare_torch(input_shape, model, backend=backend)
class TestTopk:
@pytest.mark.parametrize(
"backend, largest, shape_dim_k",
itertools.product(
backends,
[True, False],
[
((4, 6, 7, 3), -1, 2),
((10, 3, 4), 2, 2),
((10, 5), -2, 3),
((5,), 0, 2)
],
),
)
def test_topk(self, backend, largest, shape_dim_k):
input_shape = shape_dim_k[0]
dim = shape_dim_k[1]
k = shape_dim_k[2]
class TopkModel(nn.Module):
def __init__(self):
super(TopkModel, self).__init__()
def forward(self, x):
return torch.topk(x, k, dim=dim, largest=largest)
input_data = torch.rand(input_shape)
model = TopkModel()
expected_results = model(input_data)
expected_results = [expected_results.values, expected_results.indices]
run_compare_torch(
input_data,
model,
expected_results=expected_results,
input_as_shape=False,
backend=backend,
) | 33.076254 | 145 | 0.563105 |
import sys
from coremltools.models.utils import _python_version
from coremltools.models.utils import _macos_version
from coremltools.converters.mil import testing_reqs
from coremltools.converters.mil.testing_reqs import *
from .testing_utils import *
from coremltools import TensorType, ImageType, RangeDim
backends = testing_reqs.backends
torch = pytest.importorskip("torch")
pytestmark = pytest.mark.skipif(
sys.version_info >= (3, 8), reason="Segfault with Python 3.8+"
)
class TestArgSort:
@pytest.mark.parametrize(
"rank, axis, descending, backend",
itertools.product(
[rank for rank in range(1, 6)],
[-1, 0],
[True, False],
backends
)
)
def test_argsort(self, rank, axis, descending, backend):
shape = tuple(np.random.randint(low=1, high=4, size=rank))
model = ModuleWrapper(
function=torch.argsort, kwargs={"dim": axis, "descending": descending}
)
run_compare_torch(shape, model, backend=backend)
class TestBatchNorm:
@pytest.mark.parametrize(
"num_features, eps, backend",
itertools.product([5, 3, 2, 1], [0.1, 1e-05, 1e-9], backends),
)
def test_batchnorm(self, num_features, eps, backend):
model = nn.BatchNorm2d(num_features, eps)
run_compare_torch((6, num_features, 5, 5), model, backend=backend)
@pytest.mark.parametrize("backend", backends)
def test_batchnorm_1d(self, backend):
class CRNNBase(nn.Module):
def __init__(self, ch_in, ch_out, kernel_size=3, use_bn=True):
super(CRNNBase, self).__init__()
self.conv = nn.Conv1d(ch_in, ch_out, kernel_size=kernel_size)
self.norm = nn.BatchNorm1d(ch_out)
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
model = CRNNBase(ch_in=6, ch_out=16)
run_compare_torch((1, 6, 15), model, backend=backend)
class TestInstanceNorm:
@pytest.mark.parametrize(
"num_features, eps, backend",
itertools.product([5, 3, 2, 1], [0.1, 1e-05, 1e-09], backends),
)
def test_instancenorm(self, num_features, eps, backend):
if backend == "nn_proto" and eps == 1e-09:
return
model = nn.InstanceNorm2d(num_features, eps)
run_compare_torch((6, num_features, 5, 5), model, backend=backend)
class TestGroupNorm:
@pytest.mark.parametrize(
"group_features, eps,affine, backend",
itertools.product([(16,32), (32,64), (1,1)], [0.1, 1e-05, 1e-09],[True, False], backends),
)
def test_groupnorm(self, group_features, eps, affine, backend):
if backend == "nn_proto" and eps == 1e-09:
return
model = nn.GroupNorm(group_features[0],group_features[1], eps=eps, affine=affine)
run_compare_torch((6, group_features[1], 5, 5), model, backend=backend)
class TestLinear:
@pytest.mark.parametrize(
"in_features, out_features, backend",
itertools.product([10, 25, 100], [3, 6], backends),
)
def test_addmm(self, in_features, out_features, backend):
model = nn.Linear(in_features, out_features)
run_compare_torch((1, in_features), model, backend=backend)
@pytest.mark.parametrize(
"in_features, out_features, backend",
itertools.product([5], [10], backends),
)
def test_linear_rank1_input(self, in_features, out_features, backend):
model = nn.Linear(in_features, out_features)
run_compare_torch((in_features,), model, backend=backend)
class TestConv:
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [1, 3], [1, 3], [1, 3], backends
),
)
def test_convolution2d(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
run_compare_torch((1, in_channels, height, width), model, backend=backend)
class TestConvTranspose:
@pytest.mark.parametrize(
"width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends
),
)
def test_convolution_transpose1d(
self,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups
)
run_compare_torch((1, in_channels, width), model, backend=backend)
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends
),
)
def test_convolution_transpose2d(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
groups=1,
):
model = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
run_compare_torch((1, in_channels, height, width), model, backend=backend)
@pytest.mark.parametrize(
"height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, output_padding, backend",
list(
itertools.product(
[10],
[10],
[1, 3],
[1, 3],
[1, 3],
[1, 2, 3],
[1, 3],
[1, 2],
[1, 2, (1, 2)],
["nn_proto"],
)
)
+ [
pytest.param(
5, 5, 1, 1, 3, 4, 1, 1, 2, "nn_proto", marks=pytest.mark.xfail
),
pytest.param(
5, 5, 1, 1, 3, 2, 1, 3, 2, "nn_proto", marks=pytest.mark.xfail
),
],
)
def test_convolution_transpose2d_output_padding(
self,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
output_padding,
backend,
groups=1,
):
if isinstance(output_padding, int):
if output_padding >= stride and output_padding >= dilation:
return
elif isinstance(output_padding, tuple):
for _output_padding in output_padding:
if _output_padding >= stride and _output_padding >= dilation:
return
model = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
)
run_compare_torch((1, in_channels, height, width), model, backend=backend)
@pytest.mark.parametrize(
"depth, height, width, in_channels, out_channels, kernel_size, stride, padding, dilation, backend",
itertools.product(
[3, 4], [5, 6], [5, 7], [1, 3], [1, 3], [1, 3], [2, 3], [0, 1], [1, 3], backends
),
)
@pytest.mark.skip(reason="rdar://65198011 (Re-enable Conv3dTranspose and DynamicTile unit tests)")
def test_convolution_transpose3d(
self,
depth,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
backend,
):
model = nn.ConvTranspose3d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
)
run_compare_torch((1, in_channels, depth, height, width), model, backend=backend)
class TestCond:
@pytest.mark.parametrize("backend", backends)
def test_cond(self, backend):
in_features = 1
out_features = 2
class TestNet(nn.Module):
def forward(self, x):
if torch.squeeze(x) < 10.:
return x*10.
else:
return x*2.
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(torch.tensor([1.]), torch_model,
input_as_shape=False, backend=backend)
run_compare_torch(torch.tensor([11.]), torch_model,
input_as_shape=False, backend=backend)
class TestLoop:
@pytest.mark.parametrize("backend", backends)
def test_for_loop(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = 2.0 * x
return x
class TestNet(nn.Module):
input_size = (64,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
for _ in range(7):
x = self.layer(x)
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(model.input_size, torch_model, backend=backend)
@pytest.mark.parametrize("backend", backends)
def test_while_loop(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = 0.5 * x
return x
class TestNet(nn.Module):
input_size = (1,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
while x > 0.01:
x = self.layer(x)
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(model.input_size, torch_model, backend=backend)
class TestUpsample:
@pytest.mark.parametrize(
"output_size, align_corners, backend",
[
x
for x in itertools.product(
[(10, 10), (1, 1), (20, 20), (2, 3), (190, 170)],
[True, False],
backends,
)
],
)
def test_upsample_bilinear2d_with_output_size(
self, output_size, align_corners, backend
):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{"size": output_size, "mode": "bilinear", "align_corners": align_corners,},
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"scales_h, scales_w, align_corners, backend",
[
x
for x in itertools.product(
[2, 3, 4.5], [4, 5, 5.5], [True, False], backends
)
],
)
def test_upsample_bilinear2d_with_scales(
self, scales_h, scales_w, align_corners, backend
):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{
"scale_factor": (scales_h, scales_w),
"mode": "bilinear",
"align_corners": align_corners,
},
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"output_size, backend",
[
x
for x in itertools.product(
[(10, 10), (30, 20), (20, 20), (20, 30), (190, 170)], backends
)
],
)
def test_upsample_nearest2d_with_output_size(self, output_size, backend):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate, {"size": output_size, "mode": "nearest"},
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"scales_h, scales_w, backend",
[x for x in itertools.product([2, 3, 5], [4, 5, 2], backends)],
)
def test_upsample_nearest2d_with_scales(self, scales_h, scales_w, backend):
input_shape = (1, 3, 10, 10)
model = ModuleWrapper(
nn.functional.interpolate,
{"scale_factor": (scales_h, scales_w), "mode": "nearest",},
)
run_compare_torch(input_shape, model, backend=backend)
class TestBranch:
@pytest.mark.parametrize("backend", backends)
def test_if(self, backend):
class TestLayer(nn.Module):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, x):
x = torch.mean(x)
return x
class TestNet(nn.Module):
input_size = (64,)
def __init__(self):
super(TestNet, self).__init__()
layer = TestLayer()
self.layer = torch.jit.trace(layer, torch.rand(self.input_size))
def forward(self, x):
m = self.layer(x)
if m < 0:
scale = -2.0
else:
scale = 2.0
x = scale * x
return x
model = TestNet().eval()
torch_model = torch.jit.script(model)
run_compare_torch(model.input_size, torch_model, backend=backend)
class TestAvgPool:
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
itertools.product(
[(1, 3, 15), (1, 1, 7), (1, 3, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
[True, False],
backends,
),
)
def test_avg_pool1d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
model = nn.AvgPool1d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
itertools.product(
[(1, 3, 15, 15), (1, 1, 7, 7), (1, 3, 10, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
[True, False],
backends,
),
)
def test_avg_pool2d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
model = nn.AvgPool2d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend",
itertools.product(
[(1, 3, 11, 3, 11), (1, 1, 7, 4, 7), (1, 3, 6, 6, 3)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
[True, False],
backends,
),
)
def test_avg_pool3d(
self, input_shape, kernel_size, stride, padding, ceil_mode, include_pad, backend
):
if padding > kernel_size / 2:
return
model = nn.AvgPool3d(
kernel_size,
stride,
padding,
ceil_mode=ceil_mode,
count_include_pad=include_pad,
)
run_compare_torch(input_shape, model, backend=backend)
class TestAdaptiveMaxPool:
@pytest.mark.parametrize(
"output_size, magnification, delta, depth, backend",
itertools.product(
[(1,1), (3,2),(3,6),(32,32)],
[1,2,4,5,6,7],
[0,11],
[1,2,3],
backends,
),
)
def test_adaptive_max_pool2d(
self, output_size, magnification, delta, depth, backend
):
input_size = (delta + magnification * output_size[0], delta + magnification * output_size[1])
# offsets for adaptive pooling layers only when input_size is
# a multiple of output_size, we expect failures otherwise
if not (input_size[0] % output_size[0] == 0 and input_size[1] % output_size[1] == 0):
pytest.xfail("Test should fail because input_size is not a multiple of output_size")
n = 1
in_shape = (n,depth) + input_size
model = nn.AdaptiveMaxPool2d(
output_size
)
run_compare_torch(in_shape, model, backend=backend)
class TestMaxPool:
# rdar://66066001 (PyTorch converter: enable ceil_mode=True tests for pooling ops)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 15), (1, 1, 7), (1, 3, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
backends,
),
)
def test_max_pool1d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
model = nn.MaxPool1d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 15, 15), (1, 1, 7, 7), (1, 3, 10, 10)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
backends,
),
)
def test_max_pool2d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
model = nn.MaxPool2d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"input_shape, kernel_size, stride, padding, ceil_mode, backend",
itertools.product(
[(1, 3, 11, 3, 11), (1, 1, 7, 4, 7), (1, 3, 6, 6, 3)],
[1, 2, 3],
[1, 2],
[0, 1],
[False],
backends,
),
)
def test_max_pool3d(
self, input_shape, kernel_size, stride, padding, ceil_mode, backend
):
if padding > kernel_size / 2:
return
model = nn.MaxPool3d(
kernel_size,
stride,
padding,
dilation=1,
return_indices=False,
ceil_mode=ceil_mode,
)
run_compare_torch(input_shape, model, backend=backend)
class TestLSTM:
def _pytorch_hidden_to_coreml(self, x):
# Split of Direction axis
f, b = torch.split(x, [1] * x.shape[0], dim=0)
# Concat on Hidden Size axis
x = torch.cat((f, b), dim=2)
# NOTE:
# We are omitting a squeeze because the conversion
# function for the mil op lstm unsqueezes the num_layers
# dimension
return x
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
itertools.product(
[7], [5], [1], [True, False], [False], [0.3], [True, False], backends
),
)
def test_lstm(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
model = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
)
SEQUENCE_LENGTH = 3
BATCH_SIZE = 2
num_directions = int(bidirectional) + 1
# (seq_len, batch, input_size)
if batch_first:
_input = torch.rand(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size)
c0 = torch.randn(num_layers * num_directions, BATCH_SIZE, hidden_size)
inputs = (_input, (h0, c0))
expected_results = model(*inputs)
# Need to do some output reshaping if bidirectional
if bidirectional:
ex_hn = self._pytorch_hidden_to_coreml(expected_results[1][0])
ex_cn = self._pytorch_hidden_to_coreml(expected_results[1][1])
expected_results = (expected_results[0], (ex_hn, ex_cn))
run_compare_torch(
inputs, model, expected_results, input_as_shape=False, backend=backend
)
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
[
(7, 3, 2, True, True, 0.3, True, list(backends)[-1]),
(7, 3, 2, False, False, 0.3, False, list(backends)[0]),
],
)
def test_lstm_xexception(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
with pytest.raises(ValueError):
self.test_lstm(
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend=backend,
)
# Workaround for GitHub Issue #824
# i.e. the return h_n/c_n for a converted BLSTM are mangled.
# Therefore, just look at output 'y' (for now) which is correct.
class StripCellAndHidden(nn.Module):
def __init__(self,flagReturnTuple_):
super(StripCellAndHidden, self).__init__()
self.flagReturnTuple = flagReturnTuple_
def forward(self,x):
# Pass tuple, not tensor, to avoid issue in coremltools/converters/mil/frontend/torch/test/testing_utils.py on "if not expected_results:"
# Pass tensor when we need input for LSTM #2 as part of nn.Sequential()
return tuple(x[0]) if self.flagReturnTuple else x[0]
# Check GitHub Issue #810, assume num_layers == 2 and bidirectional == True
class TestStackedBLSTM:
@pytest.mark.parametrize(
"input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, backend",
itertools.product([7], [5], [2], [True, False], [True, False], [0.3], [True], backends),
)
def test_lstm(
self,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
backend,
):
model = nn.Sequential(
nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=True),
StripCellAndHidden(False),
nn.LSTM(
input_size=2*hidden_size,
hidden_size=hidden_size,
num_layers=1,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=True),
StripCellAndHidden(True)
)
SEQUENCE_LENGTH = 3
BATCH_SIZE = 2
num_directions = int(bidirectional) + 1
# (seq_len, batch, input_size)
if batch_first:
_input = torch.rand(BATCH_SIZE, SEQUENCE_LENGTH, input_size)
else:
_input = torch.randn(SEQUENCE_LENGTH, BATCH_SIZE, input_size)
# Do not use h_0/c_0 input and do not check h_n/c_n output, GitHub Issue #824
expected_results = model(_input)
run_compare_torch(_input, model, expected_results, input_as_shape=False, backend=backend)
class TestConcat:
# This tests an edge case where the list of tensors to concatenate only
# has one item. NN throws an error for this case, hence why we have to
# run through the full conversion process to test it.
@pytest.mark.parametrize("backend", backends)
def test_cat(self, backend):
class TestNet(nn.Module):
def __init__(self):
super(TestNet, self).__init__()
def forward(self, x):
x = torch.cat((x,), axis=1)
return x
model = TestNet()
run_compare_torch((1, 3, 16, 16), model, backend=backend)
class TestReduction:
@pytest.mark.parametrize(
"input_shape, dim, keepdim, backend",
itertools.product([(2, 2), (1, 1)], [0, 1], [True, False], backends),
)
def test_max(self, input_shape, dim, keepdim, backend):
class TestMax(nn.Module):
def __init__(self):
super(TestMax, self).__init__()
def forward(self, x):
return torch.max(x, dim=dim, keepdim=keepdim)
input_data = torch.rand(input_shape)
model = TestMax()
# TODO: Expected results are flipped due to naming issue:
# rdar://62681982 (Determine the output names of MLModels)
expected_results = model(input_data)[::-1]
run_compare_torch(
input_data,
model,
expected_results=expected_results,
input_as_shape=False,
backend=backend,
)
class TestLayerNorm:
@pytest.mark.parametrize(
"input_shape, eps, backend",
itertools.product([(1, 3, 15, 15), (1, 1, 1, 1)], [1e-5, 1e-9], backends),
)
def test_layer_norm(self, input_shape, eps, backend):
model = nn.LayerNorm(input_shape, eps=eps)
run_compare_torch(input_shape, model, backend=backend)
class TestPixelShuffle:
@pytest.mark.parametrize(
"batch_size, CHW, r, backend",
itertools.product([1, 3], [(1, 4, 4), (3, 2, 3)], [2, 4], backends),
)
def test_pixel_shuffle(self, batch_size, CHW, r, backend):
C, H, W = CHW
input_shape = (batch_size, C * r * r, H, W)
model = nn.PixelShuffle(upscale_factor=r)
run_compare_torch(input_shape, model, backend=backend)
class TestExpand:
@pytest.mark.parametrize(
"backend, shapes",
itertools.product(
backends,
[[(2, 1), (2, 2)], [(3, 1), (-1, 4)], [(1, 3, 4, 4), (3, 3, 4, 4)]]
),
)
def test_expand(self, backend, shapes):
input_shape, output_shape = shapes
class TestModel(torch.nn.Module):
def forward(self, x):
return x.expand(*output_shape)
model = TestModel()
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"backend, input_shapes",
itertools.product(
backends,
[[(2, 1), (2, 2)], [(3, 1), (3, 4)], [(1, 3, 4, 4), (3, 3, 4, 4)]]
),
)
def test_expand_as(self, backend, input_shapes):
class TestModel(torch.nn.Module):
def forward(self, x, y):
return x.expand_as(y)
model = TestModel()
run_compare_torch(input_shapes, model, backend=backend)
class TestExpandDims:
@pytest.mark.parametrize(
"backend, rank_and_axis",
itertools.product(
backends,
[
(rank, axis)
for rank in range(1, 5)
for axis in range(-rank - 1, rank + 1)
],
),
)
def test_unsqueeze(self, backend, rank_and_axis):
rank, axis = rank_and_axis
input_shape = tuple(np.random.randint(low=2, high=10, size=rank))
model = ModuleWrapper(function=torch.unsqueeze, kwargs={"dim": axis})
run_compare_torch(input_shape, model, backend=backend)
class TestSqueeze:
@pytest.mark.parametrize(
"backend, rank_and_axis",
itertools.product(
backends,
[(2, 1), (2, 0), (3, 1), (3, None), (4, None), (4, 2), (5, None), (5, -1),],
),
)
def test_squeeze(self, backend, rank_and_axis):
rank, axis = rank_and_axis
input_shape = list(np.random.randint(low=2, high=10, size=rank))
if axis is not None:
input_shape[axis] = 1
else:
input_shape[0] = 1
input_shape = tuple(input_shape)
model = ModuleWrapper(
function=torch.squeeze, kwargs={"dim": axis} if axis else {}
)
run_compare_torch(input_shape, model, backend=backend)
class TestCumSum:
@pytest.mark.parametrize(
"backend, axis",
itertools.product(
backends,
[-1, 0, 1, 2, 3],
),
)
def test_cumsum(self, backend, axis):
input_shape = list(np.random.randint(low=2, high=10, size=4))
input_shape = tuple(input_shape)
model = ModuleWrapper(
function=torch.cumsum, kwargs={"dim": axis}
)
run_compare_torch(input_shape, model, backend=backend)
class TestReshape:
# TODO: <rdar://66239973> Add dynamic & rank preserving reshape tests for pytorch
@pytest.mark.parametrize(
"backend, output_shape",
itertools.product(backends, [(3, 2), (2, -1), (2, 1, 1, 3),],),
)
def test_reshape(self, backend, output_shape):
input_shape = (2, 3)
model = ModuleWrapper(function=torch.reshape, kwargs={"shape": output_shape})
run_compare_torch(input_shape, model, backend=backend)
class TestFlatten:
@pytest.mark.parametrize(
"backend, start_dim",
itertools.product(backends, [2,-2],),
)
def test_reshape(self, backend, start_dim):
input_shape = (2, 3, 4, 5)
model = ModuleWrapper(function=torch.flatten, kwargs={"start_dim": start_dim})
run_compare_torch(input_shape, model, backend=backend)
class TestGather:
@pytest.mark.xfail(
reason="Load constant not copied properly for integer valued constants. Enable after eng/PR-65551506 is merged",
run=False,
)
@pytest.mark.parametrize(
"rank_and_axis, backend",
itertools.product([(i, j) for i in range(1, 6) for j in range(0, i)], backends),
)
def test_gather_along_axis(self, rank_and_axis, backend):
rank, axis = rank_and_axis
params_shape = np.random.randint(low=2, high=5, size=rank)
indices_shape = np.copy(params_shape)
indices_shape[axis] = np.random.randint(low=1, high=8)
indices = np.random.randint(0, params_shape[axis], size=indices_shape)
params_shape, indices_shape = tuple(params_shape), tuple(indices_shape)
model = ModuleWrapper(
function=torch.gather,
kwargs={"dim": axis, "index": torch.from_numpy(indices)},
)
run_compare_torch([params_shape], model, backend=backend)
class TestActivation:
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_relu(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.ReLU().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.relu_)
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_relu6(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.ReLU6().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, alpha", itertools.product(backends, [0.1, 0.25, 2.0]),
)
def test_prelu(self, backend, alpha):
input_shape = tuple(np.random.randint(low=5, high=10, size=4))
C = input_shape[1]
model = nn.PReLU(C, alpha).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, alpha",
itertools.product(backends, range(1, 6), [0.1, 2.0, 1.5]),
)
def test_leaky_relu(self, backend, rank, alpha):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.LeakyReLU(negative_slope=alpha).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.leaky_relu_, {'negative_slope': alpha})
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_softmax(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Softmax().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, range_val",
itertools.product(
backends, range(1, 6), [(-1.0, 1.0), (0.0, 0.1), (1.0, 3.0), (-1.0, 6.0)]
),
)
def test_hardtanh(self, backend, rank, range_val):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Hardtanh(range_val[0], range_val[1]).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
model = ModuleWrapper(nn.functional.hardtanh_,
{'min_val': range_val[0], 'max_val': range_val[1]})
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, alpha",
itertools.product(backends, range(1, 6), [0.1, 2.0, 1.5]),
)
def test_elu(self, backend, rank, alpha):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.ELU(alpha).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
# rdar://problem/66557565
@pytest.mark.parametrize(
"backend, rank", itertools.product(['nn_proto'], range(1, 6)),
)
def test_gelu(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.GELU().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_erf(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
class ERFActivation(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.erf(x)
model = ERFActivation().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_sigmoid(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Sigmoid().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend, rank", itertools.product(backends, range(1, 6)),
)
def test_sigmoid_hard(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Hardsigmoid().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, beta, threshold", itertools.product(backends, [1, 2, 5], [5, 10, 20]),
)
@pytest.mark.skipif(
_macos_version() <= (11,),
reason="Parametric SoftPlus segfaults on macOS 10.15 and below. (rdar://problem/66555235)",
)
def test_softplus(self, backend, beta, threshold):
input_shape = (1, 10, 5, 15)
model = nn.Softplus(beta, threshold).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
# rdar://problem/66557565
@pytest.mark.parametrize(
"backend, rank", itertools.product(['nn_proto'], range(1, 6)),
)
def test_softsign(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = nn.Softsign().eval()
run_compare_torch(
input_shape, model, backend=backend,
)
class TestElementWiseUnary:
@pytest.mark.parametrize(
"backend, rank, op_string",
itertools.product(
backends,
[4],
[
"abs",
"acos",
"asin",
"atan",
"ceil",
"cos",
"cosh",
"exp",
"floor",
"round",
"sin",
"sinh",
"sqrt",
"square",
"tan",
"tanh",
"sign",
],
),
)
def test_elementwise_no_params(self, backend, rank, op_string):
if not contains_op(torch, op_string):
return
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
op_func = getattr(torch, op_string)
model = ModuleWrapper(function=op_func)
run_compare_torch(
input_shape, model, backend=backend,
)
## TODO (rdar://66577921): Needs to move to test_elementwise_no_params after backend is added
@pytest.mark.parametrize(
"backend, rank",
itertools.product(
['nn_proto'],
[4],
),
)
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_square(self, backend, rank):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = ModuleWrapper(function=torch.square)
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, clamp_range",
itertools.product(
backends,
[4],
[(0.0, 1.0), (-1.0, 0.5), (0.2, 0.7)],
),
)
def test_clamp(self, backend, rank, clamp_range):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = ModuleWrapper(torch.clamp, {'min': clamp_range[0], 'max': clamp_range[1]})
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, threshold",
itertools.product(
['nn_proto'], # rdar://66597974 Renable for all backends due to missing cast
[4],
[(0.0, 0.0), (0.5, 0.5), (0.5, 10), (0.9, 0.0)]
),
)
def test_threshold(self, backend, rank, threshold):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
model = torch.nn.Threshold(threshold[0], threshold[1]).eval()
run_compare_torch(
input_shape, model, backend=backend,
)
@pytest.mark.parametrize(
"backend, rank, op_string",
itertools.product(
backends,
[4],
[
"log",
"rsqrt",
"reciprocal",
],
),
)
def test_elementwise_numerically_stable(self, backend, rank, op_string):
input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
op_func = getattr(torch, op_string)
model = ModuleWrapper(function=op_func)
run_compare_torch(
input_shape, model, backend=backend, rand_range=(20, 100)
)
class TestMatMul:
@pytest.mark.parametrize("backend", backends)
def test_bmm(self, backend):
shape_x, shape_y = (3,4,5), (3,5,6)
model = ModuleWrapper(function=torch.bmm)
run_compare_torch(
[shape_x, shape_y], model, backend=backend,
)
class TestSplit:
@pytest.mark.parametrize(
"backend, split_size_or_sections, dim",
itertools.product(backends, [1, 2, [1, 4]], [0, -2]),
)
def test_split(self, backend, split_size_or_sections, dim):
input_shape = (5, 2)
model = ModuleWrapper(function=torch.split,
kwargs={"split_size_or_sections": split_size_or_sections, "dim": dim})
run_compare_torch(input_shape, model, backend=backend)
@pytest.mark.parametrize(
"backend, split_sizes, dim",
itertools.product(backends, [[1, 4], [3, 2]], [-1, -2]),
)
def test_split_with_sizes(self, backend, split_sizes, dim):
input_shape = (5, 5)
model = ModuleWrapper(function=torch.split_with_sizes,
kwargs={"split_sizes": split_sizes, "dim": dim})
run_compare_torch(input_shape, model, backend=backend)
class TestUnbind:
@pytest.mark.parametrize(
"backend, dim",
itertools.product(backends,[0,1,2]),
)
def test_unbind(self, backend, dim):
input_shape = (3, 3, 4)
model = ModuleWrapper(function=torch.unbind,
kwargs={"dim": dim})
run_compare_torch(input_shape, model, backend=backend)
class TestTranspose:
@pytest.mark.parametrize(
"backend, rank, dims",
itertools.product(backends, list(range(2, 6)),
[(0, 1), (-2, -1), (1, 0), (-1, -2)]),
)
def test(self, backend, rank, dims):
input_shape = tuple(np.random.randint(low=1, high=4, size=rank))
model = ModuleWrapper(function=torch.transpose,
kwargs={"dim0": dims[0], "dim1": dims[1]})
run_compare_torch(input_shape, model, backend=backend)
class TestTo:
@pytest.mark.parametrize(
"backend", backends,
)
def test_cast_bug(self, backend):
class TestModel(torch.nn.Module):
def forward(self, spans, embedding):
spans = spans.float().relu().int()
max1, _ = torch.max(spans, dim=1, keepdim=False)
max1, _ = torch.max(max1, dim=1, keepdim=False)
max2, _ = torch.max(embedding, dim=1, keepdim=False)
max2, _ = torch.max(max2, dim=1, keepdim=False)
sigmoided_scores = max1 + max2
return sigmoided_scores
model = TestModel()
run_compare_torch([(1, 21, 2), (1, 6, 384)], model, backend=backend)# [spans.shape, embedding.shape]
class TestSlice:
@pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
@pytest.mark.parametrize(
"backend", backends,
)
def test_dynamic_slice(self, backend):
class DynamicSlicer(torch.nn.Module):
def __init__(self):
super(DynamicSlicer, self).__init__()
def forward(self, x, context_length):
return x[context_length:, :, :]
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.tokens_embedding = torch.nn.Embedding(10, 10, 0)
self.context_embedding = torch.nn.Embedding(10, 10, 0)
self.dynamic_slicer = DynamicSlicer()
def forward(self, tokens, context, context_length):
tokens_embeddings = self.tokens_embedding(tokens)
context_embeddings = self.context_embedding(context)
embeddings = torch.cat((context_embeddings, tokens_embeddings), dim=0)
embeddings = self.dynamic_slicer(embeddings, context_length)
return embeddings
model = Model()
batch_size = 5
inputs = [ TensorType(name="tokens", shape=(10, batch_size), dtype=np.int64),
TensorType(name="context", shape=(3, batch_size), dtype=np.int64),
TensorType(name="context_length", shape=(), dtype=np.int32),
]
run_compare_torch(inputs, model, rand_range=(0, 8), backend=backend, use_scripting=False)
class TestRepeat:
@pytest.mark.parametrize(
"backend, rank",
itertools.product(backends, list(range(1, 6))),
)
def test_repeat(self, backend, rank):
input_shape = np.random.randint(low=2, high=6, size=rank)
repeats = np.random.randint(low=2, high=4, size=rank)
input_shape = tuple(input_shape)
model = ModuleWrapper(function=lambda x: x.repeat(*repeats))
run_compare_torch(input_shape, model, backend=backend)
class TestStd:
@pytest.mark.parametrize(
"backend, unbiased",
itertools.product(backends, [True, False]),
)
def test_std_2_inputs(self, backend, unbiased):
model = ModuleWrapper(function=torch.std,
kwargs={"unbiased": unbiased})
x = torch.randn(1, 5, 10) * 3
out = torch.std(x, unbiased=unbiased).unsqueeze(0)
run_compare_torch(x, model, expected_results=out, input_as_shape=False, backend=backend)
@pytest.mark.parametrize(
"backend, unbiased, dim, keepdim",
itertools.product(backends, [True, False], [[0,2], [1], [2]], [True, False]),
)
def test_std_4_inputs(self, backend, unbiased, dim, keepdim):
model = ModuleWrapper(function=torch.std,
kwargs={"unbiased": unbiased, "dim" : dim, "keepdim": keepdim})
input_shape = (2, 5, 10)
run_compare_torch(input_shape, model, backend=backend)
class TestTopk:
@pytest.mark.parametrize(
"backend, largest, shape_dim_k",
itertools.product(
backends,
[True, False],
[
((4, 6, 7, 3), -1, 2),
((10, 3, 4), 2, 2),
((10, 5), -2, 3),
((5,), 0, 2)
],
),
)
def test_topk(self, backend, largest, shape_dim_k):
input_shape = shape_dim_k[0]
dim = shape_dim_k[1]
k = shape_dim_k[2]
class TopkModel(nn.Module):
def __init__(self):
super(TopkModel, self).__init__()
def forward(self, x):
return torch.topk(x, k, dim=dim, largest=largest)
input_data = torch.rand(input_shape)
model = TopkModel()
expected_results = model(input_data)
expected_results = [expected_results.values, expected_results.indices]
run_compare_torch(
input_data,
model,
expected_results=expected_results,
input_as_shape=False,
backend=backend,
) | true | true |
f72ce252a89798bc51b81ba3b3a05a173b92e02c | 8,096 | py | Python | Natural Language Processing with Attention Models/Week 4 - Chatbot/w4_unittest.py | meet-seth/Coursera-Deep-Learning | 6fbf9d406468c825ffa1ff2e177dbfd43084bace | [
"MIT"
] | 362 | 2020-10-08T07:34:25.000Z | 2022-03-30T05:11:30.000Z | NLP/Learn_by_deeplearning.ai/Course 4 - Attention Models /Labs/Week 4/w4_unittest.py | abcd1758323829/skills | 195fad43e99de5efe6491817ad2b79e12665cc2a | [
"MIT"
] | 7 | 2020-07-07T16:10:23.000Z | 2021-06-04T08:17:55.000Z | NLP/Learn_by_deeplearning.ai/Course 4 - Attention Models /Labs/Week 4/w4_unittest.py | abcd1758323829/skills | 195fad43e99de5efe6491817ad2b79e12665cc2a | [
"MIT"
] | 238 | 2020-10-08T12:01:31.000Z | 2022-03-25T08:10:42.000Z | import numpy as np
import trax
#from trax import layers as tl
#from trax.fastmath import numpy as fastnp
#from trax.supervised import training
# UNIT TEST for UNQ_C1
def test_get_conversation(target):
data = {'file1.json': {'log':[{'text': 'hi'},
{'text': 'hello'},
{'text': 'nice'}]},
'file2.json':{'log':[{'text': 'a b'},
{'text': ''},
{'text': 'good '},
{'text': 'no?'}]}}
res1 = target('file1.json', data)
res2 = target('file2.json', data)
expected1 = ' Person 1: hi Person 2: hello Person 1: nice'
expected2 = ' Person 1: a b Person 2: Person 1: good Person 2: no?'
success = 0
fails = 0
try:
assert res1 == expected1
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res1, 'x \nExpected: ', expected1)
fails += 1
try:
assert res2 == expected2
success += 1
except:
print('Error in test 2 \nResult : ', res2, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C2
def test_reversible_layer_forward(target):
f1 = lambda x: x + 2
g1 = lambda x: x * 3
f2 = lambda x: x + 1
g2 = lambda x: x * 2
input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
expected1 = np.array([8, 10, 12, 14, 29, 36, 43, 50])
input_vector2 = np.array([1] * 128)
expected2 = np.array([3] * 64 + [7] * 64)
success = 0
fails = 0
try:
res = target(input_vector1, f1, g1)
assert isinstance(res, np.ndarray)
success += 1
except:
print('Wrong type! Output is not of type np.ndarray')
fails += 1
try:
res = target(input_vector1, f1, g1)
assert np.allclose(res, expected1)
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res, 'x \nExpected: ', expected1)
fails += 1
try:
res = target(input_vector2, f2, g2)
assert np.allclose(res, expected2)
success += 1
except:
print('Error in test 2 \nResult : ', res, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C3
def test_reversible_layer_reverse(target):
f1 = lambda x: x + 2
g1 = lambda x: x * 3
f2 = lambda x: x + 1
g2 = lambda x: x * 2
input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
expected1 = np.array([-3, 0, 3, 6, 2, 0, -2, -4])
input_vector2 = np.array([1] * 128)
expected2 = np.array([1] * 64 + [-1] * 64)
success = 0
fails = 0
try:
res = target(input_vector1, f1, g1)
assert isinstance(res, np.ndarray)
success += 1
except:
print('Wrong type! Output is not of type np.ndarray')
fails += 1
try:
res = target(input_vector1, f1, g1)
assert np.allclose(res, expected1)
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res, 'x \nExpected: ', expected1)
fails += 1
try:
res = target(input_vector2, f2, g2)
assert np.allclose(res, expected2)
success += 1
except:
print('Error in test 2 \nResult : ', res, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C4
def test_ReformerLM(target):
test_cases = [
{
"name":"layer_len_check",
"expected":11,
"error":"We found {} layers in your model. It should be 11.\nCheck the LSTM stack before the dense layer"
},
{
"name":"simple_test_check",
"expected":"Serial[ShiftRight(1)Embedding_train_512DropoutPositionalEncodingDup_out2ReversibleSerial_in2_out2[ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2]Concatenate_in2LayerNormDropoutDense_trainLogSoftmax]",
"error":"The ReformerLM is not defined properly."
}
]
temp_model = target('train')
success = 0
fails = 0
for test_case in test_cases:
try:
if test_case['name'] == "simple_test_check":
assert test_case["expected"] == str(temp_model).replace(' ', '').replace('\n','')
success += 1
if test_case['name'] == "layer_len_check":
if test_case["expected"] == len(temp_model.sublayers):
success += 1
else:
print(test_case["error"].format(len(temp_model.sublayers)))
fails += 1
except:
print(test_case['error'])
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
# UNIT TEST for UNQ_C5
def test_tasks(train_task, eval_task):
target = train_task
success = 0
fails = 0
# Test the labeled data parameter for train_task
try:
strlabel = str(target._labeled_data)
assert ("generator" in strlabel) and ("add_loss_weights" in strlabel)
success += 1
except:
fails += 1
print("Wrong labeled data parameter in train_task")
# Test the cross entropy loss data parameter
try:
strlabel = str(target._loss_layer)
assert(strlabel == "CrossEntropyLoss_in3")
success += 1
except:
fails += 1
print("Wrong loss functions. CrossEntropyLoss_in3 was expected")
# Test the optimizer parameter
try:
assert(isinstance(target.optimizer, trax.optimizers.adam.Adam))
success += 1
except:
fails += 1
print("Wrong optimizer")
# Test the schedule parameter
try:
assert(isinstance(target._lr_schedule,trax.supervised.lr_schedules._BodyAndTail))
success += 1
except:
fails += 1
print("Wrong learning rate schedule type")
# Test the _n_steps_per_checkpoint parameter
try:
assert(target._n_steps_per_checkpoint==10)
success += 1
except:
fails += 1
print("Wrong checkpoint step frequency")
target = eval_task
# Test the labeled data parameter for eval_task
try:
strlabel = str(target._labeled_data)
assert ("generator" in strlabel) and ("add_loss_weights" in strlabel)
success += 1
except:
fails += 1
print("Wrong labeled data parameter in eval_task")
# Test the metrics in eval_task
try:
strlabel = str(target._metrics).replace(' ', '')
assert(strlabel == "[CrossEntropyLoss_in3,Accuracy_in3]")
success += 1
except:
fails += 1
print(f"Wrong metrics. found {strlabel} but expected [CrossEntropyLoss_in3,Accuracy_in3]")
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
| 31.874016 | 580 | 0.561018 | import numpy as np
import trax
def test_get_conversation(target):
data = {'file1.json': {'log':[{'text': 'hi'},
{'text': 'hello'},
{'text': 'nice'}]},
'file2.json':{'log':[{'text': 'a b'},
{'text': ''},
{'text': 'good '},
{'text': 'no?'}]}}
res1 = target('file1.json', data)
res2 = target('file2.json', data)
expected1 = ' Person 1: hi Person 2: hello Person 1: nice'
expected2 = ' Person 1: a b Person 2: Person 1: good Person 2: no?'
success = 0
fails = 0
try:
assert res1 == expected1
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res1, 'x \nExpected: ', expected1)
fails += 1
try:
assert res2 == expected2
success += 1
except:
print('Error in test 2 \nResult : ', res2, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
def test_reversible_layer_forward(target):
f1 = lambda x: x + 2
g1 = lambda x: x * 3
f2 = lambda x: x + 1
g2 = lambda x: x * 2
input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
expected1 = np.array([8, 10, 12, 14, 29, 36, 43, 50])
input_vector2 = np.array([1] * 128)
expected2 = np.array([3] * 64 + [7] * 64)
success = 0
fails = 0
try:
res = target(input_vector1, f1, g1)
assert isinstance(res, np.ndarray)
success += 1
except:
print('Wrong type! Output is not of type np.ndarray')
fails += 1
try:
res = target(input_vector1, f1, g1)
assert np.allclose(res, expected1)
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res, 'x \nExpected: ', expected1)
fails += 1
try:
res = target(input_vector2, f2, g2)
assert np.allclose(res, expected2)
success += 1
except:
print('Error in test 2 \nResult : ', res, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
def test_reversible_layer_reverse(target):
f1 = lambda x: x + 2
g1 = lambda x: x * 3
f2 = lambda x: x + 1
g2 = lambda x: x * 2
input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])
expected1 = np.array([-3, 0, 3, 6, 2, 0, -2, -4])
input_vector2 = np.array([1] * 128)
expected2 = np.array([1] * 64 + [-1] * 64)
success = 0
fails = 0
try:
res = target(input_vector1, f1, g1)
assert isinstance(res, np.ndarray)
success += 1
except:
print('Wrong type! Output is not of type np.ndarray')
fails += 1
try:
res = target(input_vector1, f1, g1)
assert np.allclose(res, expected1)
success += 1
except ValueError:
print('Error in test 1 \nResult : ', res, 'x \nExpected: ', expected1)
fails += 1
try:
res = target(input_vector2, f2, g2)
assert np.allclose(res, expected2)
success += 1
except:
print('Error in test 2 \nResult : ', res, ' \nExpected: ', expected2)
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
def test_ReformerLM(target):
test_cases = [
{
"name":"layer_len_check",
"expected":11,
"error":"We found {} layers in your model. It should be 11.\nCheck the LSTM stack before the dense layer"
},
{
"name":"simple_test_check",
"expected":"Serial[ShiftRight(1)Embedding_train_512DropoutPositionalEncodingDup_out2ReversibleSerial_in2_out2[ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2]Concatenate_in2LayerNormDropoutDense_trainLogSoftmax]",
"error":"The ReformerLM is not defined properly."
}
]
temp_model = target('train')
success = 0
fails = 0
for test_case in test_cases:
try:
if test_case['name'] == "simple_test_check":
assert test_case["expected"] == str(temp_model).replace(' ', '').replace('\n','')
success += 1
if test_case['name'] == "layer_len_check":
if test_case["expected"] == len(temp_model.sublayers):
success += 1
else:
print(test_case["error"].format(len(temp_model.sublayers)))
fails += 1
except:
print(test_case['error'])
fails += 1
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
def test_tasks(train_task, eval_task):
target = train_task
success = 0
fails = 0
try:
strlabel = str(target._labeled_data)
assert ("generator" in strlabel) and ("add_loss_weights" in strlabel)
success += 1
except:
fails += 1
print("Wrong labeled data parameter in train_task")
try:
strlabel = str(target._loss_layer)
assert(strlabel == "CrossEntropyLoss_in3")
success += 1
except:
fails += 1
print("Wrong loss functions. CrossEntropyLoss_in3 was expected")
try:
assert(isinstance(target.optimizer, trax.optimizers.adam.Adam))
success += 1
except:
fails += 1
print("Wrong optimizer")
try:
assert(isinstance(target._lr_schedule,trax.supervised.lr_schedules._BodyAndTail))
success += 1
except:
fails += 1
print("Wrong learning rate schedule type")
try:
assert(target._n_steps_per_checkpoint==10)
success += 1
except:
fails += 1
print("Wrong checkpoint step frequency")
target = eval_task
try:
strlabel = str(target._labeled_data)
assert ("generator" in strlabel) and ("add_loss_weights" in strlabel)
success += 1
except:
fails += 1
print("Wrong labeled data parameter in eval_task")
try:
strlabel = str(target._metrics).replace(' ', '')
assert(strlabel == "[CrossEntropyLoss_in3,Accuracy_in3]")
success += 1
except:
fails += 1
print(f"Wrong metrics. found {strlabel} but expected [CrossEntropyLoss_in3,Accuracy_in3]")
if fails == 0:
print("\033[92m All tests passed")
else:
print('\033[92m', success," Tests passed")
print('\033[91m', fails, " Tests failed")
| true | true |
f72ce29ddb1dc2f405d1811a6553b8fcc76db122 | 7,445 | py | Python | many_requests/many_requests_.py | 0xflotus/many_requests | dab3963eff471669f7b372cf488a2d9623270fab | [
"MIT"
] | null | null | null | many_requests/many_requests_.py | 0xflotus/many_requests | dab3963eff471669f7b372cf488a2d9623270fab | [
"MIT"
] | null | null | null | many_requests/many_requests_.py | 0xflotus/many_requests | dab3963eff471669f7b372cf488a2d9623270fab | [
"MIT"
] | null | null | null |
import logging
from json import JSONDecodeError
from typing import List, Optional, Dict, Union
import asks
import trio
from asks.errors import BadHttpResponse
from asks.response_objects import Response
from h11 import RemoteProtocolError
from .easy_async import EasyAsync, delayed, zip_kw
from .common import BadResponse, N_WORKERS_DEFAULT, N_CONNECTIONS_DEFAULT, is_collection
class ManyRequests:
def __init__(
self,
n_workers=N_WORKERS_DEFAULT,
n_connections=N_CONNECTIONS_DEFAULT,
retries=10,
retry_sleep=3,
ok_codes=(200,),
ok_response_func=None,
json=False,
):
"""
Dead easy interface for executing many HTTP requests asynchronously.
Args:
n_workers: Max number of workers to use. Too many workers will use a lot of memory and increase startup
time, too few can lead to slower execution.
n_connections: Max number of open connections to have open at once. The number of connections is also
limited by the OS. For example, by default MacOS has a limit of 256 and Ubuntu has ~66k. These limits
can be changed with OS configuration.
retries: Number of retries to attempt if a request fails
retry_sleep: How long to wait in seconds before retrying a request
ok_codes: A sequence of HTTP status codes to accept as ok. If `any`, all responses will be assumed to be ok
ok_response_func: A function to apply to the response to determine if ok. Should return True/False.
json: Parse response body as json and return instead of full Responce object
Examples:
Execute 10 GET requests to https://example.org
>>> responses = ManyRequests(n_workers=5, n_connections=5)(
>>> method='GET', url=[f'https://example.org' for i in range(10)])
"""
self.n_workers = n_workers
self.n_connections = n_connections
self.session = None
self.retries = retries
self.retry_sleep = retry_sleep
self.ok_codes = ok_codes
self.ok_response_func = ok_response_func
self.json = json
self.requests = None
self.responses = None
def __call__(
self,
method: Union[str, List[str]],
url: Union[str, List[str]],
params=None,
data=None,
json=None,
headers=None,
cookies=None,
auth=None,
) -> List[Union[Response, BadResponse]]:
"""
Process asynchronously many requests, handling bad responses. Return the responses in the same order.
If no ok response was obtained after retires a `BadResponse` will be included in the corresponding position of
the output. A `BadResponse` will contain the last response and error reason.
Arguments mimic `asks.request`_, which in turn mimics `requests.request`_.
Each argument could be a single item or a list of items. When they are a single item, that attribute is
duplicated for every request.
Args:
method: HTTP method type `GET`, `OPTIONS`, `HEAD`, `POST`, `PUT`, `PATCH`, or `DELETE`.
url: URL of the Request
params: Dictionary, list of tuples or bytes to send in the query string of the Request
data: Dictionary, list of tuples, bytes, or file-like object to send in the body of the Request
json: A JSON serializable Python object to send in the body of the Request
headers: Dictionary of HTTP Headers to send with the Request
cookies: Dict or CookieJar object to send with the Request
auth: Auth tuple to enable Basic/Digest/Custom HTTP Auth
Returns:
responses: A list of responses in the same order of the requests. Will include a `BadResponse` in the
position of a request where no good response was obtained.
.. _asks.request:
https://asks.readthedocs.io/en/latest/overview-of-funcs-and-args.html
.. _requests.request:
https://2.python-requests.org/en/master/api/#requests.request
"""
length = None
for e in (method, url, params, data, json, headers, cookies, auth):
if not is_collection(e):
continue
try:
l = len(e)
if length is None or l < length:
length = l
except TypeError:
pass
self.session = asks.Session(connections=self.n_connections)
responses = EasyAsync(n_workers=self.n_workers)(
tasks=(
delayed(self._runner)(request_kwargs=kwargs)
for kwargs in zip_kw(
method=method,
url=url,
params=params,
data=data,
json=json,
headers=headers,
cookies=cookies,
auth=auth,
)
),
length=length,
)
return responses
async def _runner(self, request_kwargs):
"""Task which handles completing a HTTP request and errors that arise"""
last_error = None
for attempt_i in range(0, self.retries+1):
try:
try:
response = await self.session.request(**request_kwargs)
except RemoteProtocolError as e:
raise BadResponse('RemoteProtocolError', reason='RemoteProtocolError', attempt_num=attempt_i)
except BadHttpResponse as e:
raise BadResponse('BadHttpResponse', reason='BadHttpResponse', attempt_num=attempt_i)
if self.ok_codes != "any" and response.status_code not in self.ok_codes:
raise BadResponse(f"Bad response status code: {response.status_code}. Should be in {self.ok_codes}",
response=response, reason='bad_status_code', attempt_num=attempt_i)
if self.ok_response_func is not None and not self.ok_response_func(response):
raise BadResponse('Not OK response determined by `ok_response_func`', response=response,
reason='ok_response_func', attempt_num=attempt_i)
if self.json:
try:
response = response.json()
except JSONDecodeError as e:
raise BadResponse('Cannot decode JSON', response=response, reason='JSONDecodeError',
attempt_num=attempt_i)
logging.debug(f"OK Response {request_kwargs}")
return response
except BadResponse as e:
try:
code, text = response.status_code, response.text
except NameError:
code, text = None, None
logging.info(
f"BAD Response {request_kwargs}: Attempt {attempt_i}. Error {type(e).__name__}. Code: {code}. Body: {text}"
)
last_error = e
await trio.sleep(self.retry_sleep)
logging.warning(
f"FAILED Request {request_kwargs}: Permanently failed. Last error: {last_error.description}"
)
return last_error
| 41.361111 | 127 | 0.595567 |
import logging
from json import JSONDecodeError
from typing import List, Optional, Dict, Union
import asks
import trio
from asks.errors import BadHttpResponse
from asks.response_objects import Response
from h11 import RemoteProtocolError
from .easy_async import EasyAsync, delayed, zip_kw
from .common import BadResponse, N_WORKERS_DEFAULT, N_CONNECTIONS_DEFAULT, is_collection
class ManyRequests:
def __init__(
self,
n_workers=N_WORKERS_DEFAULT,
n_connections=N_CONNECTIONS_DEFAULT,
retries=10,
retry_sleep=3,
ok_codes=(200,),
ok_response_func=None,
json=False,
):
self.n_workers = n_workers
self.n_connections = n_connections
self.session = None
self.retries = retries
self.retry_sleep = retry_sleep
self.ok_codes = ok_codes
self.ok_response_func = ok_response_func
self.json = json
self.requests = None
self.responses = None
def __call__(
self,
method: Union[str, List[str]],
url: Union[str, List[str]],
params=None,
data=None,
json=None,
headers=None,
cookies=None,
auth=None,
) -> List[Union[Response, BadResponse]]:
length = None
for e in (method, url, params, data, json, headers, cookies, auth):
if not is_collection(e):
continue
try:
l = len(e)
if length is None or l < length:
length = l
except TypeError:
pass
self.session = asks.Session(connections=self.n_connections)
responses = EasyAsync(n_workers=self.n_workers)(
tasks=(
delayed(self._runner)(request_kwargs=kwargs)
for kwargs in zip_kw(
method=method,
url=url,
params=params,
data=data,
json=json,
headers=headers,
cookies=cookies,
auth=auth,
)
),
length=length,
)
return responses
async def _runner(self, request_kwargs):
last_error = None
for attempt_i in range(0, self.retries+1):
try:
try:
response = await self.session.request(**request_kwargs)
except RemoteProtocolError as e:
raise BadResponse('RemoteProtocolError', reason='RemoteProtocolError', attempt_num=attempt_i)
except BadHttpResponse as e:
raise BadResponse('BadHttpResponse', reason='BadHttpResponse', attempt_num=attempt_i)
if self.ok_codes != "any" and response.status_code not in self.ok_codes:
raise BadResponse(f"Bad response status code: {response.status_code}. Should be in {self.ok_codes}",
response=response, reason='bad_status_code', attempt_num=attempt_i)
if self.ok_response_func is not None and not self.ok_response_func(response):
raise BadResponse('Not OK response determined by `ok_response_func`', response=response,
reason='ok_response_func', attempt_num=attempt_i)
if self.json:
try:
response = response.json()
except JSONDecodeError as e:
raise BadResponse('Cannot decode JSON', response=response, reason='JSONDecodeError',
attempt_num=attempt_i)
logging.debug(f"OK Response {request_kwargs}")
return response
except BadResponse as e:
try:
code, text = response.status_code, response.text
except NameError:
code, text = None, None
logging.info(
f"BAD Response {request_kwargs}: Attempt {attempt_i}. Error {type(e).__name__}. Code: {code}. Body: {text}"
)
last_error = e
await trio.sleep(self.retry_sleep)
logging.warning(
f"FAILED Request {request_kwargs}: Permanently failed. Last error: {last_error.description}"
)
return last_error
| true | true |
f72ce2a2454d1eafcfe45c1437983329f74f1dde | 1,804 | py | Python | src/robot/parsing/restsupport.py | phil-davis/robotframework | 4d4ce686cbe01e293bb86ea6ff34330e8c45fc43 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-09-21T17:13:24.000Z | 2019-09-24T19:13:25.000Z | src/robot/parsing/restsupport.py | phil-davis/robotframework | 4d4ce686cbe01e293bb86ea6ff34330e8c45fc43 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robot/parsing/restsupport.py | phil-davis/robotframework | 4d4ce686cbe01e293bb86ea6ff34330e8c45fc43 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-12-30T14:05:02.000Z | 2019-12-30T14:05:02.000Z | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
try:
from docutils.core import publish_doctree
from docutils.parsers.rst.directives import register_directive
from docutils.parsers.rst.directives.body import CodeBlock
except ImportError:
raise DataError("Using reStructuredText test data requires having "
"'docutils' module version 0.9 or newer installed.")
class CaptureRobotData(CodeBlock):
def run(self):
if 'robotframework' in self.arguments:
store = RobotDataStorage(self.state_machine.document)
store.add_data(self.content)
return []
register_directive('code', CaptureRobotData)
register_directive('code-block', CaptureRobotData)
register_directive('sourcecode', CaptureRobotData)
class RobotDataStorage(object):
def __init__(self, doctree):
if not hasattr(doctree, '_robot_data'):
doctree._robot_data = []
self._robot_data = doctree._robot_data
def add_data(self, rows):
self._robot_data.extend(rows)
def get_data(self):
return '\n'.join(self._robot_data)
def has_data(self):
return bool(self._robot_data)
| 32.214286 | 75 | 0.720621 |
from robot.errors import DataError
try:
from docutils.core import publish_doctree
from docutils.parsers.rst.directives import register_directive
from docutils.parsers.rst.directives.body import CodeBlock
except ImportError:
raise DataError("Using reStructuredText test data requires having "
"'docutils' module version 0.9 or newer installed.")
class CaptureRobotData(CodeBlock):
def run(self):
if 'robotframework' in self.arguments:
store = RobotDataStorage(self.state_machine.document)
store.add_data(self.content)
return []
register_directive('code', CaptureRobotData)
register_directive('code-block', CaptureRobotData)
register_directive('sourcecode', CaptureRobotData)
class RobotDataStorage(object):
def __init__(self, doctree):
if not hasattr(doctree, '_robot_data'):
doctree._robot_data = []
self._robot_data = doctree._robot_data
def add_data(self, rows):
self._robot_data.extend(rows)
def get_data(self):
return '\n'.join(self._robot_data)
def has_data(self):
return bool(self._robot_data)
| true | true |
f72ce2fc8328c2744c2230cbb122e8c573eb15fd | 3,512 | py | Python | app/app/settings.py | mlobina/recipe-app-API | 0ded3c37a84c109c469d1dd7db015e8d73d3e9f6 | [
"MIT"
] | null | null | null | app/app/settings.py | mlobina/recipe-app-API | 0ded3c37a84c109c469d1dd7db015e8d73d3e9f6 | [
"MIT"
] | null | null | null | app/app/settings.py | mlobina/recipe-app-API | 0ded3c37a84c109c469d1dd7db015e8d73d3e9f6 | [
"MIT"
] | 1 | 2021-08-25T06:29:11.000Z | 2021-08-25T06:29:11.000Z | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-sumebxzlqerp)6^8g!b%n-)r03)4pxwioril1^4igma-3_iw=c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
'HOST': os.environ.get('DB_HOST'),
"PORT": '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'core.User'
| 25.266187 | 91 | 0.691059 |
from pathlib import Path
import os
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-sumebxzlqerp)6^8g!b%n-)r03)4pxwioril1^4igma-3_iw=c'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
'HOST': os.environ.get('DB_HOST'),
"PORT": '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'core.User'
| true | true |
f72ce338215fc493ab34b010dba156b5b7042cc3 | 948 | py | Python | barbican/model/migration/alembic_migrations/versions/4ecde3a3a72a_add_cas_column_to_project_quotas_table.py | mail2nsrajesh/barbican | d16d932b77486e9b2f8c6d30e628a6e66517b1a6 | [
"Apache-2.0"
] | 1 | 2020-03-01T05:01:57.000Z | 2020-03-01T05:01:57.000Z | barbican/model/migration/alembic_migrations/versions/4ecde3a3a72a_add_cas_column_to_project_quotas_table.py | kkutysllb/barbican | 7b14d983e0dce6dcffe9781b05c52335b8203fc7 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | barbican/model/migration/alembic_migrations/versions/4ecde3a3a72a_add_cas_column_to_project_quotas_table.py | kkutysllb/barbican | 7b14d983e0dce6dcffe9781b05c52335b8203fc7 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add cas column to project quotas table
Revision ID: 4ecde3a3a72a
Revises: 10220ccbe7fa
Create Date: 2015-09-09 09:40:08.540064
"""
# revision identifiers, used by Alembic.
revision = '4ecde3a3a72a'
down_revision = '10220ccbe7fa'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'project_quotas',
sa.Column('cas', sa.Integer(), nullable=True))
| 27.882353 | 75 | 0.741561 |
revision = '4ecde3a3a72a'
down_revision = '10220ccbe7fa'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'project_quotas',
sa.Column('cas', sa.Integer(), nullable=True))
| true | true |
f72ce5c56cac97953d6d62c376de59376e33bee5 | 1,625 | py | Python | ParameterFiles/params_bootes_3as_ext.py | dunkenj/eazy-pype | 9cb8ac765d659ace36c00293a5809fc4a066e1ec | [
"MIT"
] | 1 | 2019-07-25T10:55:18.000Z | 2019-07-25T10:55:18.000Z | ParameterFiles/params_bootes_3as_ext.py | dunkenj/eazy-pype | 9cb8ac765d659ace36c00293a5809fc4a066e1ec | [
"MIT"
] | null | null | null | ParameterFiles/params_bootes_3as_ext.py | dunkenj/eazy-pype | 9cb8ac765d659ace36c00293a5809fc4a066e1ec | [
"MIT"
] | 1 | 2018-12-18T16:31:41.000Z | 2018-12-18T16:31:41.000Z | """
Main inputs:
(Change for all fields)
"""
eazypath = '/data2/ken/photoz/eazy-photoz/src/eazy'
working_folder = '/data2/ken/photoz/bootes_3as_ext'
photometry_catalog = 'Bootes_merged_Icorr_2014a_all_ap3_mags.zs.fits.mod'
photometry_format = 'fits'
filter_file = 'filter.bootes_mbrown_2014a.res'
translate_file = 'brown.zphot.2014.translate'
zspec_col = 'z_spec'
flux_col = 'flux'
fluxerr_col ='fluxerr'
do_zp = True
do_zp_tests = False
do_subcats = False
do_full = False
do_stellar = False
do_hb = False
do_merge = False
"""
Training parameters
"""
Ncrossval = 1
test_fraction = 0.2
process_outliers = False
correct_extinction = False
"""
Fitting Parameters
(Change only when needed)
"""
# Templates: Any combination of 'eazy', 'swire', 'atlas'
templates = ['eazy', 'atlas', 'cosmos']#, 'swire']#, 'cosmos', 'atlas'] #,'cosmos', 'atlas']
fitting_mode = ['a', '1', '1']
defaults = ['defaults/zphot.eazy',
'defaults/zphot.atlas_ext',
'defaults/zphot.cosmos']
#'defaults/zphot.eazy',
#'defaults/zphot.atlas',
#'defaults/zphot.swire']
stellar_params = 'defaults/zphot.pickles'
additional_errors = [0.0, 0.1, 0.1]
template_error_norm = [1., 0., 0.]
template_error_file = ''
lambda_fit_max = [5., 30., 30.]
"""
Combination Parameters
"""
include_prior = True
fbad_prior = 'mag' # 'flat', 'vol' or 'mag'
prior_parameter_path = 'bootes_I_prior_coeff.npz'
prior_fname = 'ch2_mag'
prior_colname = 'ch2_mag'
alpha_colname = 'I_mag'
gpz = False
"""
System Parameters
(Specific system only - fixed after installation)
"""
block_size = 1e4
ncpus = 10
| 18.895349 | 94 | 0.687385 | eazypath = '/data2/ken/photoz/eazy-photoz/src/eazy'
working_folder = '/data2/ken/photoz/bootes_3as_ext'
photometry_catalog = 'Bootes_merged_Icorr_2014a_all_ap3_mags.zs.fits.mod'
photometry_format = 'fits'
filter_file = 'filter.bootes_mbrown_2014a.res'
translate_file = 'brown.zphot.2014.translate'
zspec_col = 'z_spec'
flux_col = 'flux'
fluxerr_col ='fluxerr'
do_zp = True
do_zp_tests = False
do_subcats = False
do_full = False
do_stellar = False
do_hb = False
do_merge = False
Ncrossval = 1
test_fraction = 0.2
process_outliers = False
correct_extinction = False
templates = ['eazy', 'atlas', 'cosmos'],
'defaults/zphot.atlas_ext',
'defaults/zphot.cosmos']
stellar_params = 'defaults/zphot.pickles'
additional_errors = [0.0, 0.1, 0.1]
template_error_norm = [1., 0., 0.]
template_error_file = ''
lambda_fit_max = [5., 30., 30.]
include_prior = True
fbad_prior = 'mag'
prior_parameter_path = 'bootes_I_prior_coeff.npz'
prior_fname = 'ch2_mag'
prior_colname = 'ch2_mag'
alpha_colname = 'I_mag'
gpz = False
block_size = 1e4
ncpus = 10
| true | true |
f72ce61d9bb99f838eedbbc565639f110f2dfc86 | 14,763 | py | Python | tf_agents/bandits/agents/neural_linucb_agent_test.py | PeterDomanski/agents | 63c1c76f16f2068a637b26282c34a8825583e73e | [
"Apache-2.0"
] | 1 | 2021-07-16T04:44:19.000Z | 2021-07-16T04:44:19.000Z | tf_agents/bandits/agents/neural_linucb_agent_test.py | PeterDomanski/agents | 63c1c76f16f2068a637b26282c34a8825583e73e | [
"Apache-2.0"
] | null | null | null | tf_agents/bandits/agents/neural_linucb_agent_test.py | PeterDomanski/agents | 63c1c76f16f2068a637b26282c34a8825583e73e | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.bandits.agents.neural_linucb_agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.bandits.agents import neural_linucb_agent
from tf_agents.bandits.agents import utils as bandit_utils
from tf_agents.bandits.drivers import driver_utils
from tf_agents.bandits.policies import policy_utilities
from tf_agents.networks import network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import # TF internal
tfd = tfp.distributions
class DummyNet(network.Network):
def __init__(self, observation_spec, encoding_dim=10):
super(DummyNet, self).__init__(
observation_spec, state_spec=(), name='DummyNet')
context_dim = observation_spec.shape[0]
self._layers.append(
tf.keras.layers.Dense(
encoding_dim,
kernel_initializer=tf.compat.v1.initializers.constant(
np.ones([context_dim, encoding_dim])),
bias_initializer=tf.compat.v1.initializers.constant(
np.zeros([encoding_dim]))))
def call(self, inputs, step_type=None, network_state=()):
del step_type
inputs = tf.cast(inputs, tf.float32)
for layer in self.layers:
inputs = layer(inputs)
return inputs, network_state
def test_cases():
return parameterized.named_parameters(
{
'testcase_name': '_batch1_contextdim10',
'batch_size': 1,
'context_dim': 10,
}, {
'testcase_name': '_batch4_contextdim5',
'batch_size': 4,
'context_dim': 5,
})
def _get_initial_and_final_steps(batch_size, context_dim):
observation = np.array(range(batch_size * context_dim)).reshape(
[batch_size, context_dim])
reward = np.random.uniform(0.0, 1.0, [batch_size])
initial_step = time_step.TimeStep(
tf.constant(
time_step.StepType.FIRST, dtype=tf.int32, shape=[batch_size],
name='step_type'),
tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
tf.constant(observation, dtype=tf.float32,
shape=[batch_size, context_dim], name='observation'))
final_step = time_step.TimeStep(
tf.constant(
time_step.StepType.LAST, dtype=tf.int32, shape=[batch_size],
name='step_type'),
tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
tf.constant(observation + 100.0, dtype=tf.float32,
shape=[batch_size, context_dim], name='observation'))
return initial_step, final_step
def _get_initial_and_final_steps_with_action_mask(batch_size,
context_dim,
num_actions=None):
observation = np.array(range(batch_size * context_dim)).reshape(
[batch_size, context_dim])
observation = tf.constant(observation, dtype=tf.float32)
mask = 1 - tf.eye(batch_size, num_columns=num_actions, dtype=tf.int32)
reward = np.random.uniform(0.0, 1.0, [batch_size])
initial_step = time_step.TimeStep(
tf.constant(
time_step.StepType.FIRST,
dtype=tf.int32,
shape=[batch_size],
name='step_type'),
tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
(observation, mask))
final_step = time_step.TimeStep(
tf.constant(
time_step.StepType.LAST,
dtype=tf.int32,
shape=[batch_size],
name='step_type'),
tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
(observation + 100.0, mask))
return initial_step, final_step
def _get_action_step(action):
return policy_step.PolicyStep(
action=tf.convert_to_tensor(action),
info=policy_utilities.PolicyInfo())
def _get_experience(initial_step, action_step, final_step):
single_experience = driver_utils.trajectory_for_bandit(
initial_step, action_step, final_step)
# Adds a 'time' dimension.
return tf.nest.map_structure(
lambda x: tf.expand_dims(tf.convert_to_tensor(x), 1),
single_experience)
@test_util.run_all_in_graph_and_eager_modes
class NeuralLinUCBAgentTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(NeuralLinUCBAgentTest, self).setUp()
tf.compat.v1.enable_resource_variables()
@test_cases()
def testInitializeAgentNumTrainSteps0(self, batch_size, context_dim):
num_actions = 5
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=0,
encoding_dim=10,
optimizer=None)
self.evaluate(agent.initialize())
@test_cases()
def testInitializeAgentNumTrainSteps10(self, batch_size, context_dim):
num_actions = 5
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=10,
optimizer=None)
self.evaluate(agent.initialize())
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps0(self, batch_size=1, context_dim=10):
"""Check NeuralLinUCBAgent updates when behaving like LinUCB."""
# Construct a `Trajectory` for the given action, observation, reward.
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps(
batch_size, context_dim)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
# Construct an agent and perform the update.
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
encoding_dim = 10
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=0,
encoding_dim=encoding_dim,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=1e-2))
loss_info = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(loss_info)
final_a = self.evaluate(agent.cov_matrix)
final_b = self.evaluate(agent.data_vector)
# Compute the expected updated estimates.
observations_list = tf.dynamic_partition(
data=tf.reshape(tf.cast(experience.observation, tf.float64),
[batch_size, context_dim]),
partitions=tf.convert_to_tensor(action),
num_partitions=num_actions)
rewards_list = tf.dynamic_partition(
data=tf.reshape(tf.cast(experience.reward, tf.float64), [batch_size]),
partitions=tf.convert_to_tensor(action),
num_partitions=num_actions)
expected_a_updated_list = []
expected_b_updated_list = []
for _, (observations_for_arm, rewards_for_arm) in enumerate(zip(
observations_list, rewards_list)):
encoded_observations_for_arm, _ = encoder(observations_for_arm)
encoded_observations_for_arm = tf.cast(
encoded_observations_for_arm, dtype=tf.float64)
num_samples_for_arm_current = tf.cast(
tf.shape(rewards_for_arm)[0], tf.float64)
num_samples_for_arm_total = num_samples_for_arm_current
# pylint: disable=cell-var-from-loop
def true_fn():
a_new = tf.matmul(
encoded_observations_for_arm,
encoded_observations_for_arm,
transpose_a=True)
b_new = bandit_utils.sum_reward_weighted_observations(
rewards_for_arm, encoded_observations_for_arm)
return a_new, b_new
def false_fn():
return (tf.zeros([encoding_dim, encoding_dim], dtype=tf.float64),
tf.zeros([encoding_dim], dtype=tf.float64))
a_new, b_new = tf.cond(
tf.squeeze(num_samples_for_arm_total) > 0,
true_fn,
false_fn)
expected_a_updated_list.append(self.evaluate(a_new))
expected_b_updated_list.append(self.evaluate(b_new))
# Check that the actual updated estimates match the expectations.
self.assertAllClose(expected_a_updated_list, final_a)
self.assertAllClose(expected_b_updated_list, final_b)
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps10(self, batch_size=1, context_dim=10):
"""Check NeuralLinUCBAgent updates when behaving like eps-greedy."""
# Construct a `Trajectory` for the given action, observation, reward.
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps(
batch_size, context_dim)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
# Construct an agent and perform the update.
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
encoding_dim = 10
variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(
num_actions, encoding_dim)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=encoding_dim,
variable_collection=variable_collection,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001))
loss_info, _ = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_value = self.evaluate(loss_info)
self.assertGreater(loss_value, 0.0)
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps10MaskedActions(
self, batch_size=1, context_dim=10):
"""Check updates when behaving like eps-greedy and using masked actions."""
# Construct a `Trajectory` for the given action, observation, reward.
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps_with_action_mask(
batch_size, context_dim, num_actions)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
# Construct an agent and perform the update.
observation_spec = (tensor_spec.TensorSpec([context_dim], tf.float32),
tensor_spec.TensorSpec([num_actions], tf.int32))
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec[0])
encoding_dim = 10
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=encoding_dim,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001),
observation_and_action_constraint_splitter=lambda x: (x[0], x[1]))
loss_info, _ = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_value = self.evaluate(loss_info)
self.assertGreater(loss_value, 0.0)
def testInitializeRestoreVariableCollection(self):
if not tf.executing_eagerly():
self.skipTest('Test only works in eager mode.')
num_actions = 5
encoding_dim = 7
variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(
num_actions=num_actions, encoding_dim=encoding_dim)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(variable_collection.num_samples_list)
checkpoint = tf.train.Checkpoint(variable_collection=variable_collection)
checkpoint_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_dir, 'checkpoint')
checkpoint.save(file_prefix=checkpoint_prefix)
variable_collection.actions_from_reward_layer.assign(False)
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint_load_status = checkpoint.restore(latest_checkpoint)
self.evaluate(checkpoint_load_status.initialize_or_restore())
self.assertEqual(
self.evaluate(variable_collection.actions_from_reward_layer), True)
if __name__ == '__main__':
tf.test.main()
| 40.446575 | 110 | 0.721398 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.bandits.agents import neural_linucb_agent
from tf_agents.bandits.agents import utils as bandit_utils
from tf_agents.bandits.drivers import driver_utils
from tf_agents.bandits.policies import policy_utilities
from tf_agents.networks import network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step
from tensorflow.python.framework import test_util distributions
class DummyNet(network.Network):
def __init__(self, observation_spec, encoding_dim=10):
super(DummyNet, self).__init__(
observation_spec, state_spec=(), name='DummyNet')
context_dim = observation_spec.shape[0]
self._layers.append(
tf.keras.layers.Dense(
encoding_dim,
kernel_initializer=tf.compat.v1.initializers.constant(
np.ones([context_dim, encoding_dim])),
bias_initializer=tf.compat.v1.initializers.constant(
np.zeros([encoding_dim]))))
def call(self, inputs, step_type=None, network_state=()):
del step_type
inputs = tf.cast(inputs, tf.float32)
for layer in self.layers:
inputs = layer(inputs)
return inputs, network_state
def test_cases():
return parameterized.named_parameters(
{
'testcase_name': '_batch1_contextdim10',
'batch_size': 1,
'context_dim': 10,
}, {
'testcase_name': '_batch4_contextdim5',
'batch_size': 4,
'context_dim': 5,
})
def _get_initial_and_final_steps(batch_size, context_dim):
observation = np.array(range(batch_size * context_dim)).reshape(
[batch_size, context_dim])
reward = np.random.uniform(0.0, 1.0, [batch_size])
initial_step = time_step.TimeStep(
tf.constant(
time_step.StepType.FIRST, dtype=tf.int32, shape=[batch_size],
name='step_type'),
tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
tf.constant(observation, dtype=tf.float32,
shape=[batch_size, context_dim], name='observation'))
final_step = time_step.TimeStep(
tf.constant(
time_step.StepType.LAST, dtype=tf.int32, shape=[batch_size],
name='step_type'),
tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
tf.constant(observation + 100.0, dtype=tf.float32,
shape=[batch_size, context_dim], name='observation'))
return initial_step, final_step
def _get_initial_and_final_steps_with_action_mask(batch_size,
context_dim,
num_actions=None):
observation = np.array(range(batch_size * context_dim)).reshape(
[batch_size, context_dim])
observation = tf.constant(observation, dtype=tf.float32)
mask = 1 - tf.eye(batch_size, num_columns=num_actions, dtype=tf.int32)
reward = np.random.uniform(0.0, 1.0, [batch_size])
initial_step = time_step.TimeStep(
tf.constant(
time_step.StepType.FIRST,
dtype=tf.int32,
shape=[batch_size],
name='step_type'),
tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
(observation, mask))
final_step = time_step.TimeStep(
tf.constant(
time_step.StepType.LAST,
dtype=tf.int32,
shape=[batch_size],
name='step_type'),
tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),
(observation + 100.0, mask))
return initial_step, final_step
def _get_action_step(action):
return policy_step.PolicyStep(
action=tf.convert_to_tensor(action),
info=policy_utilities.PolicyInfo())
def _get_experience(initial_step, action_step, final_step):
single_experience = driver_utils.trajectory_for_bandit(
initial_step, action_step, final_step)
return tf.nest.map_structure(
lambda x: tf.expand_dims(tf.convert_to_tensor(x), 1),
single_experience)
@test_util.run_all_in_graph_and_eager_modes
class NeuralLinUCBAgentTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(NeuralLinUCBAgentTest, self).setUp()
tf.compat.v1.enable_resource_variables()
@test_cases()
def testInitializeAgentNumTrainSteps0(self, batch_size, context_dim):
num_actions = 5
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=0,
encoding_dim=10,
optimizer=None)
self.evaluate(agent.initialize())
@test_cases()
def testInitializeAgentNumTrainSteps10(self, batch_size, context_dim):
num_actions = 5
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=10,
optimizer=None)
self.evaluate(agent.initialize())
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps0(self, batch_size=1, context_dim=10):
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps(
batch_size, context_dim)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
encoding_dim = 10
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=0,
encoding_dim=encoding_dim,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=1e-2))
loss_info = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(loss_info)
final_a = self.evaluate(agent.cov_matrix)
final_b = self.evaluate(agent.data_vector)
observations_list = tf.dynamic_partition(
data=tf.reshape(tf.cast(experience.observation, tf.float64),
[batch_size, context_dim]),
partitions=tf.convert_to_tensor(action),
num_partitions=num_actions)
rewards_list = tf.dynamic_partition(
data=tf.reshape(tf.cast(experience.reward, tf.float64), [batch_size]),
partitions=tf.convert_to_tensor(action),
num_partitions=num_actions)
expected_a_updated_list = []
expected_b_updated_list = []
for _, (observations_for_arm, rewards_for_arm) in enumerate(zip(
observations_list, rewards_list)):
encoded_observations_for_arm, _ = encoder(observations_for_arm)
encoded_observations_for_arm = tf.cast(
encoded_observations_for_arm, dtype=tf.float64)
num_samples_for_arm_current = tf.cast(
tf.shape(rewards_for_arm)[0], tf.float64)
num_samples_for_arm_total = num_samples_for_arm_current
def true_fn():
a_new = tf.matmul(
encoded_observations_for_arm,
encoded_observations_for_arm,
transpose_a=True)
b_new = bandit_utils.sum_reward_weighted_observations(
rewards_for_arm, encoded_observations_for_arm)
return a_new, b_new
def false_fn():
return (tf.zeros([encoding_dim, encoding_dim], dtype=tf.float64),
tf.zeros([encoding_dim], dtype=tf.float64))
a_new, b_new = tf.cond(
tf.squeeze(num_samples_for_arm_total) > 0,
true_fn,
false_fn)
expected_a_updated_list.append(self.evaluate(a_new))
expected_b_updated_list.append(self.evaluate(b_new))
self.assertAllClose(expected_a_updated_list, final_a)
self.assertAllClose(expected_b_updated_list, final_b)
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps10(self, batch_size=1, context_dim=10):
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps(
batch_size, context_dim)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec)
encoding_dim = 10
variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(
num_actions, encoding_dim)
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=encoding_dim,
variable_collection=variable_collection,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001))
loss_info, _ = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_value = self.evaluate(loss_info)
self.assertGreater(loss_value, 0.0)
@test_cases()
def testNeuralLinUCBUpdateNumTrainSteps10MaskedActions(
self, batch_size=1, context_dim=10):
num_actions = 5
initial_step, final_step = _get_initial_and_final_steps_with_action_mask(
batch_size, context_dim, num_actions)
action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)
action_step = _get_action_step(action)
experience = _get_experience(initial_step, action_step, final_step)
observation_spec = (tensor_spec.TensorSpec([context_dim], tf.float32),
tensor_spec.TensorSpec([num_actions], tf.int32))
time_step_spec = time_step.time_step_spec(observation_spec)
action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)
encoder = DummyNet(observation_spec[0])
encoding_dim = 10
agent = neural_linucb_agent.NeuralLinUCBAgent(
time_step_spec=time_step_spec,
action_spec=action_spec,
encoding_network=encoder,
encoding_network_num_train_steps=10,
encoding_dim=encoding_dim,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001),
observation_and_action_constraint_splitter=lambda x: (x[0], x[1]))
loss_info, _ = agent.train(experience)
self.evaluate(agent.initialize())
self.evaluate(tf.compat.v1.global_variables_initializer())
loss_value = self.evaluate(loss_info)
self.assertGreater(loss_value, 0.0)
def testInitializeRestoreVariableCollection(self):
if not tf.executing_eagerly():
self.skipTest('Test only works in eager mode.')
num_actions = 5
encoding_dim = 7
variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(
num_actions=num_actions, encoding_dim=encoding_dim)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(variable_collection.num_samples_list)
checkpoint = tf.train.Checkpoint(variable_collection=variable_collection)
checkpoint_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_dir, 'checkpoint')
checkpoint.save(file_prefix=checkpoint_prefix)
variable_collection.actions_from_reward_layer.assign(False)
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint_load_status = checkpoint.restore(latest_checkpoint)
self.evaluate(checkpoint_load_status.initialize_or_restore())
self.assertEqual(
self.evaluate(variable_collection.actions_from_reward_layer), True)
if __name__ == '__main__':
tf.test.main()
| true | true |
f72ce6309a00519d759cb64bf82d33c3718dba6a | 2,373 | py | Python | src/fhir_types/FHIR_AdverseEvent_SuspectEntity.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | 2 | 2022-02-03T00:51:30.000Z | 2022-02-03T18:42:43.000Z | src/fhir_types/FHIR_AdverseEvent_SuspectEntity.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | null | null | null | src/fhir_types/FHIR_AdverseEvent_SuspectEntity.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | null | null | null | from typing import Any, List, Literal, TypedDict
from .FHIR_AdverseEvent_Causality import FHIR_AdverseEvent_Causality
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
# Actual or potential/avoided event causing unintended physical injury resulting from or contributed to by medical care, a research study or other healthcare setting factors that requires additional monitoring, treatment, or hospitalization, or that results in death.
FHIR_AdverseEvent_SuspectEntity = TypedDict(
"FHIR_AdverseEvent_SuspectEntity",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Identifies the actual instance of what caused the adverse event. May be a substance, medication, medication administration, medication statement or a device.
"instance": FHIR_Reference,
# Information on the possible cause of the event.
"causality": List[FHIR_AdverseEvent_Causality],
},
total=False,
)
| 98.875 | 836 | 0.785925 | from typing import Any, List, Literal, TypedDict
from .FHIR_AdverseEvent_Causality import FHIR_AdverseEvent_Causality
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
FHIR_AdverseEvent_SuspectEntity = TypedDict(
"FHIR_AdverseEvent_SuspectEntity",
{
"id": FHIR_string,
"extension": List[Any],
"modifierExtension": List[Any],
# Identifies the actual instance of what caused the adverse event. May be a substance, medication, medication administration, medication statement or a device.
"instance": FHIR_Reference,
# Information on the possible cause of the event.
"causality": List[FHIR_AdverseEvent_Causality],
},
total=False,
)
| true | true |
f72ce673fa0c764781f68e1466651c550042bccc | 13,709 | py | Python | src/attributecode/transform.py | oneforthidiot/aboutcode-toolkit | 666cc8857aadaeb4e07c540c817c831b0f3234e2 | [
"Apache-2.0"
] | 1 | 2021-01-02T08:16:55.000Z | 2021-01-02T08:16:55.000Z | src/attributecode/transform.py | oneforthidiot/aboutcode-toolkit | 666cc8857aadaeb4e07c540c817c831b0f3234e2 | [
"Apache-2.0"
] | null | null | null | src/attributecode/transform.py | oneforthidiot/aboutcode-toolkit | 666cc8857aadaeb4e07c540c817c831b0f3234e2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf8 -*-
# ============================================================================
# Copyright (c) 2013-2020 nexB Inc. http://www.nexb.com/ - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from collections import Counter
from collections import OrderedDict
import io
import json
import attr
from attributecode import CRITICAL
from attributecode import Error
from attributecode import saneyaml
from attributecode.util import csv
from attributecode.util import python2
from attributecode.util import replace_tab_with_spaces
if python2: # pragma: nocover
from itertools import izip_longest as zip_longest # NOQA
else: # pragma: nocover
from itertools import zip_longest # NOQA
def transform_csv_to_csv(location, output, transformer):
"""
Read a CSV file at `location` and write a new CSV file at `output`. Apply
transformations using the `transformer` Transformer.
Return a list of Error objects.
"""
if not transformer:
raise ValueError('Cannot transform without Transformer')
rows = read_csv_rows(location)
errors = []
data = iter(rows)
names = next(rows)
field_names = strip_trailing_fields_csv(names)
dupes = check_duplicate_fields(field_names)
if dupes:
msg = u'Duplicated field name: %(name)s'
for name in dupes:
errors.append(Error(CRITICAL, msg % locals()))
return field_names, [], errors
# Convert to dicts
new_data = [OrderedDict(zip_longest(field_names, item)) for item in data]
field_names, updated_data, errors = transform_data(new_data, transformer)
if errors:
return errors
else:
write_csv(output, updated_data, field_names)
return []
def transform_json_to_json(location, output, transformer):
"""
Read a JSON file at `location` and write a new JSON file at `output`. Apply
transformations using the `transformer` Transformer.
Return a list of Error objects.
"""
if not transformer:
raise ValueError('Cannot transform without Transformer')
items = read_json(location)
data = strip_trailing_fields_json(items)
new_data = normalize_dict_data(data)
field_names, updated_data, errors = transform_data(new_data, transformer)
if errors:
return errors
else:
write_json(output, updated_data)
return []
def strip_trailing_fields_csv(names):
"""
Strip trailing spaces for field names #456
"""
field_names = []
for name in names:
field_names.append(name.strip())
return field_names
def strip_trailing_fields_json(items):
"""
Strip trailing spaces for field name #456
"""
data = []
od = OrderedDict()
for item in items:
for field in item:
stripped_field_name = field.strip()
od[stripped_field_name] = item[field]
data.append(od)
return data
def normalize_dict_data(data):
"""
Check if the input data from scancode-toolkit and normalize to a normal
dictionary if it is.
Return a list type of normalized dictionary.
"""
try:
# Check if this is a JSON output from scancode-toolkit
if(data["headers"][0]["tool_name"] == "scancode-toolkit"):
#only takes data inside "files"
new_data = data["files"]
except:
new_data = data
if not isinstance(new_data, list):
new_data = [new_data]
return new_data
def transform_data(data, transformer):
"""
Read a dictionary and apply transformations using the
`transformer` Transformer.
Return a tuple of:
([field names...], [transformed ordered dict...], [Error objects..])
"""
if not transformer:
return data
renamed_field_data = transformer.apply_renamings(data)
field_names = renamed_field_data[0].keys()
if transformer.field_filters:
renamed_field_data = list(transformer.filter_fields(renamed_field_data))
field_names = [c for c in field_names if c in transformer.field_filters]
if transformer.exclude_fields:
renamed_field_data = list(transformer.filter_excluded(renamed_field_data))
field_names = [c for c in field_names if c not in transformer.exclude_fields]
errors = transformer.check_required_fields(renamed_field_data)
if errors:
return field_names, data, errors
return field_names, renamed_field_data, errors
tranformer_config_help = '''
A transform configuration file is used to describe which transformations and
validations to apply to a source CSV file. This is a simple text file using YAML
format, using the same format as an .ABOUT file.
The attributes that can be set in a configuration file are:
* field_renamings:
An optional map of source CSV or JSON field name to target CSV/JSON new field name that
is used to rename CSV fields.
For instance with this configuration the fields "Directory/Location" will be
renamed to "about_resource" and "foo" to "bar":
field_renamings:
about_resource : 'Directory/Location'
bar : foo
The renaming is always applied first before other transforms and checks. All
other field names referenced below are these that exist AFTER the renamings
have been applied to the existing field names.
* required_fields:
An optional list of required field names that must have a value, beyond the
standard fields names. If a source CSV/JSON does not have such a field or a row is
missing a value for a required field, an error is reported.
For instance with this configuration an error will be reported if the fields
"name" and "version" are missing or if any row does not have a value set for
these fields:
required_fields:
- name
- version
* field_filters:
An optional list of field names that should be kept in the transformed CSV/JSON. If
this list is provided, all the fields from the source CSV/JSON that should be kept
in the target CSV/JSON must be listed regardless of either standard or required
fields. If this list is not provided, all source CSV/JSON fields are kept in the
transformed target CSV/JSON.
For instance with this configuration the target CSV/JSON will only contains the "name"
and "version" fields and no other field:
field_filters:
- name
- version
* exclude_fields:
An optional list of field names that should be excluded in the transformed CSV/JSON. If
this list is provided, all the fields from the source CSV/JSON that should be excluded
in the target CSV/JSON must be listed. Excluding standard or required fields will cause
an error. If this list is not provided, all source CSV/JSON fields are kept in the
transformed target CSV/JSON.
For instance with this configuration the target CSV/JSON will not contain the "type"
and "temp" fields:
exclude_fields:
- type
- temp
'''
@attr.attributes
class Transformer(object):
__doc__ = tranformer_config_help
field_renamings = attr.attrib(default=attr.Factory(dict))
required_fields = attr.attrib(default=attr.Factory(list))
field_filters = attr.attrib(default=attr.Factory(list))
exclude_fields = attr.attrib(default=attr.Factory(list))
# a list of all the standard fields from AboutCode toolkit
standard_fields = attr.attrib(default=attr.Factory(list), init=False)
# a list of the subset of standard fields that are essential and MUST be
# present for AboutCode toolkit to work
essential_fields = attr.attrib(default=attr.Factory(list), init=False)
# called by attr after the __init__()
def __attrs_post_init__(self, *args, **kwargs):
from attributecode.model import About
about = About()
self.essential_fields = list(about.required_fields)
self.standard_fields = [f.name for f in about.all_fields()]
@classmethod
def default(cls):
"""
Return a default Transformer with built-in transforms.
"""
return cls(
field_renamings={},
required_fields=[],
field_filters=[],
exclude_fields=[],
)
@classmethod
def from_file(cls, location):
"""
Load and return a Transformer instance from a YAML configuration file at
`location`.
"""
with io.open(location, encoding='utf-8') as conf:
data = saneyaml.load(replace_tab_with_spaces(conf.read()))
return cls(
field_renamings=data.get('field_renamings', {}),
required_fields=data.get('required_fields', []),
field_filters=data.get('field_filters', []),
exclude_fields=data.get('exclude_fields', []),
)
def check_required_fields(self, data):
"""
Return a list of Error for a `data` list of ordered dict where a
dict is missing a value for a required field name.
"""
errors = []
required = set(self.essential_fields + self.required_fields)
if not required:
return []
for rn, item in enumerate(data):
missings = [rk for rk in required if not item.get(rk)]
if not missings:
continue
missings = ', '.join(missings)
msg = 'Row {rn} is missing required values for fields: {missings}'
errors.append(Error(CRITICAL, msg.format(**locals())))
return errors
def apply_renamings(self, data):
"""
Return a tranformed list of `field_names` where fields are renamed
based on this Transformer configuration.
"""
renamings = self.field_renamings
if not renamings:
return data
renamings = {n: rn for n, rn in renamings.items()}
renamed_list = []
for row in data:
renamed = OrderedDict()
for key in row:
matched = False
for renamed_key in renamings:
if key == renamings[renamed_key]:
renamed[renamed_key] = row[key]
matched = True
if not matched:
renamed[key] = row[key]
renamed_list.append(renamed)
return renamed_list
"""
def clean_fields(self, field_names):
Apply standard cleanups to a list of fields and return these.
if not field_names:
return field_names
return [c.strip().lower() for c in field_names]
"""
def filter_fields(self, data):
"""
Yield transformed dicts from a `data` list of dicts keeping only
fields with a name in the `field_filters`of this Transformer.
Return the data unchanged if no `field_filters` exists.
"""
#field_filters = set(self.clean_fields(self.field_filters))
field_filters = set(self.field_filters)
for entry in data:
items = ((k, v) for k, v in entry.items() if k in field_filters)
yield OrderedDict(items)
def filter_excluded(self, data):
"""
Yield transformed dicts from a `data` list of dicts excluding
fields with names in the `exclude_fields`of this Transformer.
Return the data unchanged if no `exclude_fields` exists.
"""
#exclude_fields = set(self.clean_fields(self.exclude_fields))
exclude_fields = set(self.exclude_fields)
for entry in data:
items = ((k, v) for k, v in entry.items() if k not in exclude_fields)
yield OrderedDict(items)
def check_duplicate_fields(field_names):
"""
Check that there are no duplicate in the `field_names` list of field name
strings, ignoring case. Return a list of unique duplicated field names.
"""
counted = Counter(c.lower() for c in field_names)
return [field for field, count in sorted(counted.items()) if count > 1]
def read_csv_rows(location):
"""
Yield rows (as a list of values) from a CSV file at `location`.
"""
with io.open(location, encoding='utf-8', errors='replace') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
yield row
def read_json(location):
"""
Yield rows (as a list of values) from a CSV file at `location`.
"""
with io.open(location, encoding='utf-8', errors='replace') as jsonfile:
data = json.load(jsonfile, object_pairs_hook=OrderedDict)
return data
def write_csv(location, data, field_names): # NOQA
"""
Write a CSV file at `location` the `data` list of ordered dicts using the
`field_names`.
"""
with io.open(location, 'w', encoding='utf-8', newline='\n') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=field_names)
writer.writeheader()
writer.writerows(data)
def write_json(location, data):
"""
Write a JSON file at `location` the `data` list of ordered dicts.
"""
with open(location, 'w') as jsonfile:
json.dump(data, jsonfile, indent=3)
| 34.358396 | 87 | 0.665183 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from collections import Counter
from collections import OrderedDict
import io
import json
import attr
from attributecode import CRITICAL
from attributecode import Error
from attributecode import saneyaml
from attributecode.util import csv
from attributecode.util import python2
from attributecode.util import replace_tab_with_spaces
if python2:
from itertools import izip_longest as zip_longest
else:
from itertools import zip_longest
def transform_csv_to_csv(location, output, transformer):
if not transformer:
raise ValueError('Cannot transform without Transformer')
rows = read_csv_rows(location)
errors = []
data = iter(rows)
names = next(rows)
field_names = strip_trailing_fields_csv(names)
dupes = check_duplicate_fields(field_names)
if dupes:
msg = u'Duplicated field name: %(name)s'
for name in dupes:
errors.append(Error(CRITICAL, msg % locals()))
return field_names, [], errors
new_data = [OrderedDict(zip_longest(field_names, item)) for item in data]
field_names, updated_data, errors = transform_data(new_data, transformer)
if errors:
return errors
else:
write_csv(output, updated_data, field_names)
return []
def transform_json_to_json(location, output, transformer):
if not transformer:
raise ValueError('Cannot transform without Transformer')
items = read_json(location)
data = strip_trailing_fields_json(items)
new_data = normalize_dict_data(data)
field_names, updated_data, errors = transform_data(new_data, transformer)
if errors:
return errors
else:
write_json(output, updated_data)
return []
def strip_trailing_fields_csv(names):
field_names = []
for name in names:
field_names.append(name.strip())
return field_names
def strip_trailing_fields_json(items):
data = []
od = OrderedDict()
for item in items:
for field in item:
stripped_field_name = field.strip()
od[stripped_field_name] = item[field]
data.append(od)
return data
def normalize_dict_data(data):
try:
if(data["headers"][0]["tool_name"] == "scancode-toolkit"):
new_data = data["files"]
except:
new_data = data
if not isinstance(new_data, list):
new_data = [new_data]
return new_data
def transform_data(data, transformer):
if not transformer:
return data
renamed_field_data = transformer.apply_renamings(data)
field_names = renamed_field_data[0].keys()
if transformer.field_filters:
renamed_field_data = list(transformer.filter_fields(renamed_field_data))
field_names = [c for c in field_names if c in transformer.field_filters]
if transformer.exclude_fields:
renamed_field_data = list(transformer.filter_excluded(renamed_field_data))
field_names = [c for c in field_names if c not in transformer.exclude_fields]
errors = transformer.check_required_fields(renamed_field_data)
if errors:
return field_names, data, errors
return field_names, renamed_field_data, errors
tranformer_config_help = '''
A transform configuration file is used to describe which transformations and
validations to apply to a source CSV file. This is a simple text file using YAML
format, using the same format as an .ABOUT file.
The attributes that can be set in a configuration file are:
* field_renamings:
An optional map of source CSV or JSON field name to target CSV/JSON new field name that
is used to rename CSV fields.
For instance with this configuration the fields "Directory/Location" will be
renamed to "about_resource" and "foo" to "bar":
field_renamings:
about_resource : 'Directory/Location'
bar : foo
The renaming is always applied first before other transforms and checks. All
other field names referenced below are these that exist AFTER the renamings
have been applied to the existing field names.
* required_fields:
An optional list of required field names that must have a value, beyond the
standard fields names. If a source CSV/JSON does not have such a field or a row is
missing a value for a required field, an error is reported.
For instance with this configuration an error will be reported if the fields
"name" and "version" are missing or if any row does not have a value set for
these fields:
required_fields:
- name
- version
* field_filters:
An optional list of field names that should be kept in the transformed CSV/JSON. If
this list is provided, all the fields from the source CSV/JSON that should be kept
in the target CSV/JSON must be listed regardless of either standard or required
fields. If this list is not provided, all source CSV/JSON fields are kept in the
transformed target CSV/JSON.
For instance with this configuration the target CSV/JSON will only contains the "name"
and "version" fields and no other field:
field_filters:
- name
- version
* exclude_fields:
An optional list of field names that should be excluded in the transformed CSV/JSON. If
this list is provided, all the fields from the source CSV/JSON that should be excluded
in the target CSV/JSON must be listed. Excluding standard or required fields will cause
an error. If this list is not provided, all source CSV/JSON fields are kept in the
transformed target CSV/JSON.
For instance with this configuration the target CSV/JSON will not contain the "type"
and "temp" fields:
exclude_fields:
- type
- temp
'''
@attr.attributes
class Transformer(object):
__doc__ = tranformer_config_help
field_renamings = attr.attrib(default=attr.Factory(dict))
required_fields = attr.attrib(default=attr.Factory(list))
field_filters = attr.attrib(default=attr.Factory(list))
exclude_fields = attr.attrib(default=attr.Factory(list))
standard_fields = attr.attrib(default=attr.Factory(list), init=False)
essential_fields = attr.attrib(default=attr.Factory(list), init=False)
def __attrs_post_init__(self, *args, **kwargs):
from attributecode.model import About
about = About()
self.essential_fields = list(about.required_fields)
self.standard_fields = [f.name for f in about.all_fields()]
@classmethod
def default(cls):
return cls(
field_renamings={},
required_fields=[],
field_filters=[],
exclude_fields=[],
)
@classmethod
def from_file(cls, location):
with io.open(location, encoding='utf-8') as conf:
data = saneyaml.load(replace_tab_with_spaces(conf.read()))
return cls(
field_renamings=data.get('field_renamings', {}),
required_fields=data.get('required_fields', []),
field_filters=data.get('field_filters', []),
exclude_fields=data.get('exclude_fields', []),
)
def check_required_fields(self, data):
errors = []
required = set(self.essential_fields + self.required_fields)
if not required:
return []
for rn, item in enumerate(data):
missings = [rk for rk in required if not item.get(rk)]
if not missings:
continue
missings = ', '.join(missings)
msg = 'Row {rn} is missing required values for fields: {missings}'
errors.append(Error(CRITICAL, msg.format(**locals())))
return errors
def apply_renamings(self, data):
renamings = self.field_renamings
if not renamings:
return data
renamings = {n: rn for n, rn in renamings.items()}
renamed_list = []
for row in data:
renamed = OrderedDict()
for key in row:
matched = False
for renamed_key in renamings:
if key == renamings[renamed_key]:
renamed[renamed_key] = row[key]
matched = True
if not matched:
renamed[key] = row[key]
renamed_list.append(renamed)
return renamed_list
def filter_fields(self, data):
field_filters = set(self.field_filters)
for entry in data:
items = ((k, v) for k, v in entry.items() if k in field_filters)
yield OrderedDict(items)
def filter_excluded(self, data):
exclude_fields = set(self.exclude_fields)
for entry in data:
items = ((k, v) for k, v in entry.items() if k not in exclude_fields)
yield OrderedDict(items)
def check_duplicate_fields(field_names):
counted = Counter(c.lower() for c in field_names)
return [field for field, count in sorted(counted.items()) if count > 1]
def read_csv_rows(location):
with io.open(location, encoding='utf-8', errors='replace') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
yield row
def read_json(location):
with io.open(location, encoding='utf-8', errors='replace') as jsonfile:
data = json.load(jsonfile, object_pairs_hook=OrderedDict)
return data
def write_csv(location, data, field_names):
with io.open(location, 'w', encoding='utf-8', newline='\n') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=field_names)
writer.writeheader()
writer.writerows(data)
def write_json(location, data):
with open(location, 'w') as jsonfile:
json.dump(data, jsonfile, indent=3)
| true | true |
f72ce7712b5c7dee583c54beb8325116fc9f9df8 | 1,742 | py | Python | util.py | marcelkotze007/mk007---ML-Python-library | 307e51762fc821588206440daa2c18a6128f4aec | [
"MIT"
] | null | null | null | util.py | marcelkotze007/mk007---ML-Python-library | 307e51762fc821588206440daa2c18a6128f4aec | [
"MIT"
] | null | null | null | util.py | marcelkotze007/mk007---ML-Python-library | 307e51762fc821588206440daa2c18a6128f4aec | [
"MIT"
] | null | null | null | # https://deeplearningcourses.com/c/data-science-supervised-machine-learning-in-python
# https://www.udemy.com/data-science-supervised-machine-learning-in-python
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import pandas as pd
def get_data(limit=None):
print("Reading in and transforming data...")
df = pd.read_csv('train.csv')
data = df.values
np.random.shuffle(data)
X = data[:, 1:]
print(X[4000])
X = data[:, 1:] / 255.0 # data is from 0..255
print(X[4000])
Y = data[:, 0]
if limit is not None:
X, Y = X[:limit], Y[:limit]
return X, Y
def get_xor():
X = np.zeros((200, 2))
X[:50] = np.random.random((50, 2)) / 2 + 0.5 # (0.5-1, 0.5-1)
X[50:100] = np.random.random((50, 2)) / 2 # (0-0.5, 0-0.5)
X[100:150] = np.random.random((50, 2)) / 2 + np.array([[0, 0.5]]) # (0-0.5, 0.5-1)
X[150:] = np.random.random((50, 2)) / 2 + np.array([[0.5, 0]]) # (0.5-1, 0-0.5)
Y = np.array([0]*100 + [1]*100)
return X, Y
def get_donut():
N = 200
R_inner = 5
R_outer = 10
# distance from origin is radius + random normal
# angle theta is uniformly distributed between (0, 2pi)
R1 = np.random.randn(N//2) + R_inner
theta = 2*np.pi*np.random.random(N//2)
X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T
R2 = np.random.randn(N//2) + R_outer
theta = 2*np.pi*np.random.random(N//2)
X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T
X = np.concatenate([ X_inner, X_outer ])
Y = np.array([0]*(N//2) + [1]*(N//2))
return X, Y
get_data() | 32.259259 | 86 | 0.596441 |
from __future__ import print_function, division
from builtins import range, input
import numpy as np
import pandas as pd
def get_data(limit=None):
print("Reading in and transforming data...")
df = pd.read_csv('train.csv')
data = df.values
np.random.shuffle(data)
X = data[:, 1:]
print(X[4000])
X = data[:, 1:] / 255.0
print(X[4000])
Y = data[:, 0]
if limit is not None:
X, Y = X[:limit], Y[:limit]
return X, Y
def get_xor():
X = np.zeros((200, 2))
X[:50] = np.random.random((50, 2)) / 2 + 0.5
X[50:100] = np.random.random((50, 2)) / 2
X[100:150] = np.random.random((50, 2)) / 2 + np.array([[0, 0.5]])
X[150:] = np.random.random((50, 2)) / 2 + np.array([[0.5, 0]])
Y = np.array([0]*100 + [1]*100)
return X, Y
def get_donut():
N = 200
R_inner = 5
R_outer = 10
R1 = np.random.randn(N//2) + R_inner
theta = 2*np.pi*np.random.random(N//2)
X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T
R2 = np.random.randn(N//2) + R_outer
theta = 2*np.pi*np.random.random(N//2)
X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T
X = np.concatenate([ X_inner, X_outer ])
Y = np.array([0]*(N//2) + [1]*(N//2))
return X, Y
get_data() | true | true |
f72ce7ae5cf820ccf1451c8e5cde1d89a16c1e52 | 3,007 | py | Python | pbj/electrostatics/pb_formulation/formulations/direct_external.py | bem4solvation/pbj | 4fa9c111596359192539787ae241a79d4316b15b | [
"MIT"
] | null | null | null | pbj/electrostatics/pb_formulation/formulations/direct_external.py | bem4solvation/pbj | 4fa9c111596359192539787ae241a79d4316b15b | [
"MIT"
] | 1 | 2022-02-18T17:34:37.000Z | 2022-02-18T17:34:37.000Z | pbj/electrostatics/pb_formulation/formulations/direct_external.py | bem4solvation/pbj | 4fa9c111596359192539787ae241a79d4316b15b | [
"MIT"
] | null | null | null | import numpy as np
import bempp.api
import os
from bempp.api.operators.boundary import sparse, laplace, modified_helmholtz
from .common import calculate_potential_one_surface
invert_potential = True
def verify_parameters(self):
return True
def lhs(self):
dirichl_space = self.dirichl_space
neumann_space = self.neumann_space
ep_in = self.ep_in
ep_out = self.ep_ex
kappa = self.kappa
operator_assembler = self.operator_assembler
identity = sparse.identity(dirichl_space, dirichl_space, dirichl_space)
slp_in = laplace.single_layer(
neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler
)
dlp_in = laplace.double_layer(
dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler
)
slp_out = modified_helmholtz.single_layer(
neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler
)
dlp_out = modified_helmholtz.double_layer(
dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler
)
A = bempp.api.BlockedOperator(2, 2)
A[0, 0] = 0.5 * identity - dlp_out
A[0, 1] = slp_out
A[1, 0] = 0.5 * identity + dlp_in
A[1, 1] = -(ep_out / ep_in) * slp_in
self.matrices["A"] = A
def rhs(self):
dirichl_space = self.dirichl_space
neumann_space = self.neumann_space
q = self.q
x_q = self.x_q
ep_in = self.ep_in
rhs_constructor = self.rhs_constructor
if rhs_constructor == "fmm":
@bempp.api.callable(vectorized=True)
def fmm_green_func(x, n, domain_index, result):
import exafmm.laplace as _laplace
sources = _laplace.init_sources(x_q, q)
targets = _laplace.init_targets(x.T)
fmm = _laplace.LaplaceFmm(p=10, ncrit=500, filename=".rhs.tmp")
tree = _laplace.setup(sources, targets, fmm)
values = _laplace.evaluate(tree, fmm)
os.remove(".rhs.tmp")
result[:] = values[:, 0] / ep_in
@bempp.api.real_callable
def zero(x, n, domain_index, result):
result[0] = 0
rhs_1 = bempp.api.GridFunction(neumann_space, fun=zero)
rhs_2 = bempp.api.GridFunction(dirichl_space, fun=fmm_green_func)
else:
@bempp.api.real_callable
def charges_fun(x, n, domain_index, result):
nrm = np.sqrt(
(x[0] - x_q[:, 0]) ** 2
+ (x[1] - x_q[:, 1]) ** 2
+ (x[2] - x_q[:, 2]) ** 2
)
aux = np.sum(q / nrm)
result[0] = aux / (4 * np.pi * ep_in)
@bempp.api.real_callable
def zero(x, n, domain_index, result):
result[0] = 0
rhs_1 = bempp.api.GridFunction(neumann_space, fun=zero)
rhs_2 = bempp.api.GridFunction(dirichl_space, fun=charges_fun)
self.rhs["rhs_1"], self.rhs["rhs_2"] = rhs_1, rhs_2
def calculate_potential(self, rerun_all):
calculate_potential_one_surface(self, rerun_all)
| 30.373737 | 88 | 0.638178 | import numpy as np
import bempp.api
import os
from bempp.api.operators.boundary import sparse, laplace, modified_helmholtz
from .common import calculate_potential_one_surface
invert_potential = True
def verify_parameters(self):
return True
def lhs(self):
dirichl_space = self.dirichl_space
neumann_space = self.neumann_space
ep_in = self.ep_in
ep_out = self.ep_ex
kappa = self.kappa
operator_assembler = self.operator_assembler
identity = sparse.identity(dirichl_space, dirichl_space, dirichl_space)
slp_in = laplace.single_layer(
neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler
)
dlp_in = laplace.double_layer(
dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler
)
slp_out = modified_helmholtz.single_layer(
neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler
)
dlp_out = modified_helmholtz.double_layer(
dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler
)
A = bempp.api.BlockedOperator(2, 2)
A[0, 0] = 0.5 * identity - dlp_out
A[0, 1] = slp_out
A[1, 0] = 0.5 * identity + dlp_in
A[1, 1] = -(ep_out / ep_in) * slp_in
self.matrices["A"] = A
def rhs(self):
dirichl_space = self.dirichl_space
neumann_space = self.neumann_space
q = self.q
x_q = self.x_q
ep_in = self.ep_in
rhs_constructor = self.rhs_constructor
if rhs_constructor == "fmm":
@bempp.api.callable(vectorized=True)
def fmm_green_func(x, n, domain_index, result):
import exafmm.laplace as _laplace
sources = _laplace.init_sources(x_q, q)
targets = _laplace.init_targets(x.T)
fmm = _laplace.LaplaceFmm(p=10, ncrit=500, filename=".rhs.tmp")
tree = _laplace.setup(sources, targets, fmm)
values = _laplace.evaluate(tree, fmm)
os.remove(".rhs.tmp")
result[:] = values[:, 0] / ep_in
@bempp.api.real_callable
def zero(x, n, domain_index, result):
result[0] = 0
rhs_1 = bempp.api.GridFunction(neumann_space, fun=zero)
rhs_2 = bempp.api.GridFunction(dirichl_space, fun=fmm_green_func)
else:
@bempp.api.real_callable
def charges_fun(x, n, domain_index, result):
nrm = np.sqrt(
(x[0] - x_q[:, 0]) ** 2
+ (x[1] - x_q[:, 1]) ** 2
+ (x[2] - x_q[:, 2]) ** 2
)
aux = np.sum(q / nrm)
result[0] = aux / (4 * np.pi * ep_in)
@bempp.api.real_callable
def zero(x, n, domain_index, result):
result[0] = 0
rhs_1 = bempp.api.GridFunction(neumann_space, fun=zero)
rhs_2 = bempp.api.GridFunction(dirichl_space, fun=charges_fun)
self.rhs["rhs_1"], self.rhs["rhs_2"] = rhs_1, rhs_2
def calculate_potential(self, rerun_all):
calculate_potential_one_surface(self, rerun_all)
| true | true |
f72ce81173cc5c6016efe5504b76f86ecabf1edf | 245 | py | Python | python/merge.py | mannyrivera2010/rdf4j-learning | ef5bc6aeac0c16265605f4e7b577255fb48f96f7 | [
"Apache-2.0"
] | null | null | null | python/merge.py | mannyrivera2010/rdf4j-learning | ef5bc6aeac0c16265605f4e7b577255fb48f96f7 | [
"Apache-2.0"
] | null | null | null | python/merge.py | mannyrivera2010/rdf4j-learning | ef5bc6aeac0c16265605f4e7b577255fb48f96f7 | [
"Apache-2.0"
] | null | null | null | import glob
file_list = glob.glob("data/*.csv")
for file_name in file_list:
with open(file_name, 'r') as open_file:
for inner_line in open_file:
if "gender" not in inner_line:
print(inner_line.strip())
| 22.272727 | 43 | 0.62449 | import glob
file_list = glob.glob("data/*.csv")
for file_name in file_list:
with open(file_name, 'r') as open_file:
for inner_line in open_file:
if "gender" not in inner_line:
print(inner_line.strip())
| true | true |
f72ce8530a8392dc1dd22292d6d0dcfb86f65a5a | 2,885 | py | Python | pyramid_request_log/request_log.py | MoiTux/pyramid-request-log | 31852bbf09e4f416f93c7720ecd9eca8cfe32d38 | [
"MIT"
] | 1 | 2017-08-07T10:22:16.000Z | 2017-08-07T10:22:16.000Z | pyramid_request_log/request_log.py | MoiTux/pyramid-request-log | 31852bbf09e4f416f93c7720ecd9eca8cfe32d38 | [
"MIT"
] | null | null | null | pyramid_request_log/request_log.py | MoiTux/pyramid-request-log | 31852bbf09e4f416f93c7720ecd9eca8cfe32d38 | [
"MIT"
] | null | null | null | import logging
import sys
import time
from pyramid.events import NewResponse, NewRequest
from pyramid.events import subscriber
if sys.version_info[0] < 3:
str = basestring
log = logging.getLogger(__name__)
unlog_pattern = None
unlog_route = None
authenticated_id = ''
@subscriber(NewRequest)
def log_request(event):
request = event.request
if ignore_route(request.path):
return
request.pyramid_request_log_start = time.time()
user = 'UnAuthenticatedUser'
if request.authenticated_userid:
user = getattr(request.authenticated_userid,
authenticated_id, 'AuthenticatedUser')
if request.content_type == 'application/json' and request.body:
try:
body = request.json_body
clean(body)
except Exception:
body = 'Json error'
log.info('New request: %s %s (body: %s) (%s: %s)',
request.method, request.path_qs, body, authenticated_id, user)
else:
log.info('New request: %s %s (%s: %s)',
request.method, request.path_qs, authenticated_id, user)
@subscriber(NewResponse)
def log_response(event):
request = event.request
response = event.response
if ignore_route(request.path):
return
duration = '{:.3f}'.format(time.time() - request.pyramid_request_log_start)
extra = {
'method': request.method,
'route_url': request.path_qs,
'status': response.status,
'duration': duration,
}
user = 'UnAuthenticatedUser'
if request.authenticated_userid:
user = getattr(request.authenticated_userid,
authenticated_id, 'AuthenticatedUser')
if response.content_type == 'application/json' and response.body:
try:
body = response.json_body
clean(body)
except Exception:
body = 'Json error'
log.info(
'Response for request: %s %s: HTTPCode: %s, (body: %s) '
'(%s: %s) (endded in %ss)',
request.method, request.path_qs, response.status, body,
authenticated_id, user, duration,
extra=extra,
)
else:
log.info('Response for request: %s %s: HTTPCode: %s, (%s: %s) '
'(endded in %ss)',
request.method, request.path_qs, response.status,
authenticated_id, user, duration,
extra=extra)
def clean(body):
for key in body:
if isinstance(key, (dict, list)):
clean(key)
elif isinstance(body, dict):
if isinstance(body[key], (dict, list)):
clean(body[key])
elif unlog_pattern and unlog_pattern.match(key):
body[key] = '*'*6
def ignore_route(route):
if unlog_route and unlog_route.match(route):
return True
return False
| 27.47619 | 79 | 0.597574 | import logging
import sys
import time
from pyramid.events import NewResponse, NewRequest
from pyramid.events import subscriber
if sys.version_info[0] < 3:
str = basestring
log = logging.getLogger(__name__)
unlog_pattern = None
unlog_route = None
authenticated_id = ''
@subscriber(NewRequest)
def log_request(event):
request = event.request
if ignore_route(request.path):
return
request.pyramid_request_log_start = time.time()
user = 'UnAuthenticatedUser'
if request.authenticated_userid:
user = getattr(request.authenticated_userid,
authenticated_id, 'AuthenticatedUser')
if request.content_type == 'application/json' and request.body:
try:
body = request.json_body
clean(body)
except Exception:
body = 'Json error'
log.info('New request: %s %s (body: %s) (%s: %s)',
request.method, request.path_qs, body, authenticated_id, user)
else:
log.info('New request: %s %s (%s: %s)',
request.method, request.path_qs, authenticated_id, user)
@subscriber(NewResponse)
def log_response(event):
request = event.request
response = event.response
if ignore_route(request.path):
return
duration = '{:.3f}'.format(time.time() - request.pyramid_request_log_start)
extra = {
'method': request.method,
'route_url': request.path_qs,
'status': response.status,
'duration': duration,
}
user = 'UnAuthenticatedUser'
if request.authenticated_userid:
user = getattr(request.authenticated_userid,
authenticated_id, 'AuthenticatedUser')
if response.content_type == 'application/json' and response.body:
try:
body = response.json_body
clean(body)
except Exception:
body = 'Json error'
log.info(
'Response for request: %s %s: HTTPCode: %s, (body: %s) '
'(%s: %s) (endded in %ss)',
request.method, request.path_qs, response.status, body,
authenticated_id, user, duration,
extra=extra,
)
else:
log.info('Response for request: %s %s: HTTPCode: %s, (%s: %s) '
'(endded in %ss)',
request.method, request.path_qs, response.status,
authenticated_id, user, duration,
extra=extra)
def clean(body):
for key in body:
if isinstance(key, (dict, list)):
clean(key)
elif isinstance(body, dict):
if isinstance(body[key], (dict, list)):
clean(body[key])
elif unlog_pattern and unlog_pattern.match(key):
body[key] = '*'*6
def ignore_route(route):
if unlog_route and unlog_route.match(route):
return True
return False
| true | true |
f72cea9211379eec544b1b493a257f6c5b6255c7 | 1,066 | py | Python | api/utils/data_utils.py | wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels | e6df7ffc9b0318fdce405e40993c79785b47c785 | [
"MIT"
] | 74 | 2020-03-08T15:29:00.000Z | 2022-03-05T14:57:33.000Z | api/utils/data_utils.py | wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels | e6df7ffc9b0318fdce405e40993c79785b47c785 | [
"MIT"
] | 19 | 2020-03-06T08:56:51.000Z | 2022-03-27T05:07:35.000Z | api/utils/data_utils.py | wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels | e6df7ffc9b0318fdce405e40993c79785b47c785 | [
"MIT"
] | 23 | 2020-03-20T08:19:55.000Z | 2022-03-16T17:40:09.000Z | from PIL import Image
import numbers
class RandomCrop(object):
def __init__(self, size, v):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.v = v
def __call__(self, img):
w, h = img.size
th, tw = self.size
x1 = int(( w - tw)*self.v)
y1 = int(( h - th)*self.v)
#print("print x, y:", x1, y1)
assert(img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_image = img
else:
out_image = img.crop((x1, y1, x1 + tw, y1 + th)) #same cropping method for all images in the same group
return out_image
class RandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __init__(self, v):
self.v = v
return
def __call__(self, img):
if self.v < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
#print ("horiontal flip: ",self.v)
return img | 31.352941 | 115 | 0.545028 | from PIL import Image
import numbers
class RandomCrop(object):
def __init__(self, size, v):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.v = v
def __call__(self, img):
w, h = img.size
th, tw = self.size
x1 = int(( w - tw)*self.v)
y1 = int(( h - th)*self.v)
assert(img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_image = img
else:
out_image = img.crop((x1, y1, x1 + tw, y1 + th))
return out_image
class RandomHorizontalFlip(object):
def __init__(self, v):
self.v = v
return
def __call__(self, img):
if self.v < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return img | true | true |
f72ceaaf5ad093604d4d028c26cd964e13cd6018 | 9,644 | py | Python | tests/test_authority.py | scottp-dpaw/azure-activedirectory-library-for-python | 3305d666c064e62f8c15526fb82b5cba02a11b80 | [
"MIT"
] | 2 | 2018-03-05T07:54:23.000Z | 2018-07-10T14:53:32.000Z | tests/test_authority.py | scottp-dpaw/azure-activedirectory-library-for-python | 3305d666c064e62f8c15526fb82b5cba02a11b80 | [
"MIT"
] | null | null | null | tests/test_authority.py | scottp-dpaw/azure-activedirectory-library-for-python | 3305d666c064e62f8c15526fb82b5cba02a11b80 | [
"MIT"
] | 1 | 2020-10-26T20:07:07.000Z | 2020-10-26T20:07:07.000Z | #------------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation.
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#------------------------------------------------------------------------------
import sys
import requests
import httpretty
import six
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
import adal
from adal.authority import Authority
from adal import log
from adal.authentication_context import AuthenticationContext
from tests import util
from tests.util import parameters as cp
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class TestAuthority(unittest.TestCase):
# use this as authority to force dynamic as opposed to static instance
# discovery.
nonHardCodedAuthority = 'https://login.doesntexist.com/' + cp['tenant']
nonHardCodedAuthorizeEndpoint = nonHardCodedAuthority + '/oauth2/authorize'
dstsTestEndpoint = 'https://test-dsts.core.azure-test.net/dstsv2/common'
def setUp(self):
util.reset_logging()
util.clear_static_cache()
return super(TestAuthority, self).setUp()
def tearDown(self):
util.reset_logging()
util.clear_static_cache()
return super(TestAuthority, self).tearDown()
def setupExpectedInstanceDiscoveryRequestRetries(self, requestParametersList, authority):
pass
@httpretty.activate
def test_success_dynamic_instance_discovery(self):
instanceDiscoveryRequest = util.setup_expected_instance_discovery_request(
200,
cp['authorityHosts']['global'],
{'tenant_discovery_endpoint' : 'http://foobar'},
self.nonHardCodedAuthorizeEndpoint
)
responseOptions = { 'authority' : self.nonHardCodedAuthority }
response = util.create_response(responseOptions)
wireResponse = response['wireResponse']
util.setup_expected_client_cred_token_request_response(200, wireResponse, self.nonHardCodedAuthority)
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
response['resource'], cp['clientId'], cp['clientSecret'])
self.assertTrue(
util.is_match_token_response(response['cachedResponse'], token_response),
'The response does not match what was expected.: ' + str(token_response)
)
def performStaticInstanceDiscovery(self, authorityHost):
hardCodedAuthority = 'https://' + authorityHost + '/' + cp['tenant']
responseOptions = {
'authority' : hardCodedAuthority
}
response = util.create_response(responseOptions)
wireResponse = response['wireResponse']
tokenRequest = util.setup_expected_client_cred_token_request_response(200, wireResponse, hardCodedAuthority)
context = adal.AuthenticationContext(hardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
response['resource'], cp['clientId'], cp['clientSecret'])
self.assertTrue(
util.is_match_token_response(response['cachedResponse'], token_response),
'The response does not match what was expected.: ' + str(token_response)
)
@httpretty.activate
def test_success_static_instance_discovery(self):
self.performStaticInstanceDiscovery('login.microsoftonline.com')
self.performStaticInstanceDiscovery('login.windows.net')
self.performStaticInstanceDiscovery('login.chinacloudapi.cn')
self.performStaticInstanceDiscovery('login-us.microsoftonline.com')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.windows.net')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.chinacloudapi.cn')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.cloudapi.de')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.usgovcloudapi.net')
self.performStaticInstanceDiscovery('test-dsts.core.azure-test.net')
@httpretty.activate
def test_http_error(self):
util.setup_expected_instance_discovery_request(500, cp['authorityHosts']['global'], None, self.nonHardCodedAuthorizeEndpoint)
with six.assertRaisesRegex(self, Exception, '500'):
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
cp['resource'], cp['clientId'], cp['clientSecret'])
@httpretty.activate
def test_validation_error(self):
returnDoc = { 'error' : 'invalid_instance', 'error_description' : 'the instance was invalid' }
util.setup_expected_instance_discovery_request(400, cp['authorityHosts']['global'], returnDoc, self.nonHardCodedAuthorizeEndpoint)
with six.assertRaisesRegex(self, Exception, 'instance was invalid'):
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
cp['resource'], cp['clientId'], cp['clientSecret'])
@httpretty.activate
def test_validation_off(self):
instanceDiscoveryRequest = util.setup_expected_instance_discovery_request(
200,
cp['authorityHosts']['global'],
{'tenant_discovery_endpoint' : 'http://foobar'},
self.nonHardCodedAuthorizeEndpoint
)
responseOptions = { 'authority' : self.nonHardCodedAuthority}
response = util.create_response(responseOptions)
wireResponse = response['wireResponse']
util.setup_expected_client_cred_token_request_response(200, wireResponse, self.nonHardCodedAuthority)
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
response['resource'], cp['clientId'], cp['clientSecret'])
self.assertTrue(
util.is_match_token_response(response['cachedResponse'], token_response),
'The response does not match what was expected.: ' + str(token_response)
)
@httpretty.activate
def test_bad_url_not_https(self):
with six.assertRaisesRegex(self, ValueError, "The authority url must be an https endpoint\."):
context = AuthenticationContext('http://this.is.not.https.com/mytenant.com')
@httpretty.activate
def test_bad_url_has_query(self):
with six.assertRaisesRegex(self, ValueError, "The authority url must not have a query string\."):
context = AuthenticationContext(cp['authorityTenant'] + '?this=should¬=be&here=foo')
@httpretty.activate
def test_url_extra_path_elements(self):
with six.assertRaisesRegex(self, ValueError, "tenant"): # Some tenant specific error message
context = AuthenticationContext(self.nonHardCodedAuthority + '/extra/path')
@httpretty.activate
def test_dsts_authority(self):
try:
context = AuthenticationContext(self.dstsTestEndpoint)
except:
self.fail("AuthenticationContext() rased an exception on dstsTestEndpoint")
@httpretty.activate
def test_url_extra_slashes(self):
util.setup_expected_instance_discovery_request(200,
cp['authorityHosts']['global'],
{
'tenant_discovery_endpoint': 'http://foobar'
},
self.nonHardCodedAuthorizeEndpoint)
authority_url = self.nonHardCodedAuthority + '/' # This should pass for one or more than one slashes
authority = Authority(authority_url, True)
obj = util.create_empty_adal_object()
authority.validate(obj['call_context'])
req = httpretty.last_request()
util.match_standard_request_headers(req)
@httpretty.activate
def test_url_extra_slashes_change_authority_url(self):
authority_url = self.nonHardCodedAuthority + '/' # This should pass for one or more than one slashes
authority = Authority(authority_url, True)
self.assertTrue(authority._url.geturl(), self.nonHardCodedAuthority)
if __name__ == '__main__':
unittest.main()
| 42.672566 | 138 | 0.687578 |
import sys
import requests
import httpretty
import six
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
import adal
from adal.authority import Authority
from adal import log
from adal.authentication_context import AuthenticationContext
from tests import util
from tests.util import parameters as cp
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class TestAuthority(unittest.TestCase):
nonHardCodedAuthority = 'https://login.doesntexist.com/' + cp['tenant']
nonHardCodedAuthorizeEndpoint = nonHardCodedAuthority + '/oauth2/authorize'
dstsTestEndpoint = 'https://test-dsts.core.azure-test.net/dstsv2/common'
def setUp(self):
util.reset_logging()
util.clear_static_cache()
return super(TestAuthority, self).setUp()
def tearDown(self):
util.reset_logging()
util.clear_static_cache()
return super(TestAuthority, self).tearDown()
def setupExpectedInstanceDiscoveryRequestRetries(self, requestParametersList, authority):
pass
@httpretty.activate
def test_success_dynamic_instance_discovery(self):
instanceDiscoveryRequest = util.setup_expected_instance_discovery_request(
200,
cp['authorityHosts']['global'],
{'tenant_discovery_endpoint' : 'http://foobar'},
self.nonHardCodedAuthorizeEndpoint
)
responseOptions = { 'authority' : self.nonHardCodedAuthority }
response = util.create_response(responseOptions)
wireResponse = response['wireResponse']
util.setup_expected_client_cred_token_request_response(200, wireResponse, self.nonHardCodedAuthority)
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
response['resource'], cp['clientId'], cp['clientSecret'])
self.assertTrue(
util.is_match_token_response(response['cachedResponse'], token_response),
'The response does not match what was expected.: ' + str(token_response)
)
def performStaticInstanceDiscovery(self, authorityHost):
hardCodedAuthority = 'https://' + authorityHost + '/' + cp['tenant']
responseOptions = {
'authority' : hardCodedAuthority
}
response = util.create_response(responseOptions)
wireResponse = response['wireResponse']
tokenRequest = util.setup_expected_client_cred_token_request_response(200, wireResponse, hardCodedAuthority)
context = adal.AuthenticationContext(hardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
response['resource'], cp['clientId'], cp['clientSecret'])
self.assertTrue(
util.is_match_token_response(response['cachedResponse'], token_response),
'The response does not match what was expected.: ' + str(token_response)
)
@httpretty.activate
def test_success_static_instance_discovery(self):
self.performStaticInstanceDiscovery('login.microsoftonline.com')
self.performStaticInstanceDiscovery('login.windows.net')
self.performStaticInstanceDiscovery('login.chinacloudapi.cn')
self.performStaticInstanceDiscovery('login-us.microsoftonline.com')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.windows.net')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.chinacloudapi.cn')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.cloudapi.de')
self.performStaticInstanceDiscovery('test-dsts.dsts.core.usgovcloudapi.net')
self.performStaticInstanceDiscovery('test-dsts.core.azure-test.net')
@httpretty.activate
def test_http_error(self):
util.setup_expected_instance_discovery_request(500, cp['authorityHosts']['global'], None, self.nonHardCodedAuthorizeEndpoint)
with six.assertRaisesRegex(self, Exception, '500'):
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
cp['resource'], cp['clientId'], cp['clientSecret'])
@httpretty.activate
def test_validation_error(self):
returnDoc = { 'error' : 'invalid_instance', 'error_description' : 'the instance was invalid' }
util.setup_expected_instance_discovery_request(400, cp['authorityHosts']['global'], returnDoc, self.nonHardCodedAuthorizeEndpoint)
with six.assertRaisesRegex(self, Exception, 'instance was invalid'):
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
cp['resource'], cp['clientId'], cp['clientSecret'])
@httpretty.activate
def test_validation_off(self):
instanceDiscoveryRequest = util.setup_expected_instance_discovery_request(
200,
cp['authorityHosts']['global'],
{'tenant_discovery_endpoint' : 'http://foobar'},
self.nonHardCodedAuthorizeEndpoint
)
responseOptions = { 'authority' : self.nonHardCodedAuthority}
response = util.create_response(responseOptions)
wireResponse = response['wireResponse']
util.setup_expected_client_cred_token_request_response(200, wireResponse, self.nonHardCodedAuthority)
context = adal.AuthenticationContext(self.nonHardCodedAuthority)
token_response = context.acquire_token_with_client_credentials(
response['resource'], cp['clientId'], cp['clientSecret'])
self.assertTrue(
util.is_match_token_response(response['cachedResponse'], token_response),
'The response does not match what was expected.: ' + str(token_response)
)
@httpretty.activate
def test_bad_url_not_https(self):
with six.assertRaisesRegex(self, ValueError, "The authority url must be an https endpoint\."):
context = AuthenticationContext('http://this.is.not.https.com/mytenant.com')
@httpretty.activate
def test_bad_url_has_query(self):
with six.assertRaisesRegex(self, ValueError, "The authority url must not have a query string\."):
context = AuthenticationContext(cp['authorityTenant'] + '?this=should¬=be&here=foo')
@httpretty.activate
def test_url_extra_path_elements(self):
with six.assertRaisesRegex(self, ValueError, "tenant"):
context = AuthenticationContext(self.nonHardCodedAuthority + '/extra/path')
@httpretty.activate
def test_dsts_authority(self):
try:
context = AuthenticationContext(self.dstsTestEndpoint)
except:
self.fail("AuthenticationContext() rased an exception on dstsTestEndpoint")
@httpretty.activate
def test_url_extra_slashes(self):
util.setup_expected_instance_discovery_request(200,
cp['authorityHosts']['global'],
{
'tenant_discovery_endpoint': 'http://foobar'
},
self.nonHardCodedAuthorizeEndpoint)
authority_url = self.nonHardCodedAuthority + '/'
authority = Authority(authority_url, True)
obj = util.create_empty_adal_object()
authority.validate(obj['call_context'])
req = httpretty.last_request()
util.match_standard_request_headers(req)
@httpretty.activate
def test_url_extra_slashes_change_authority_url(self):
authority_url = self.nonHardCodedAuthority + '/'
authority = Authority(authority_url, True)
self.assertTrue(authority._url.geturl(), self.nonHardCodedAuthority)
if __name__ == '__main__':
unittest.main()
| true | true |
f72ceae57491507ca3020763b8de106a6f696481 | 486 | py | Python | DjangoAPI/MyApi/migrations/0001_initial.py | sni710/Django_api | a40d049586d9396c3b1bea4cd82177c573b24c17 | [
"Apache-2.0"
] | 2 | 2020-08-27T11:26:35.000Z | 2021-03-20T16:27:20.000Z | DjangoAPI/MyApi/migrations/0001_initial.py | ankit98040/Django-ML-Project | 3e50f51e56aa34bb8a7ae31f4955a10e57176ea7 | [
"Apache-2.0"
] | null | null | null | DjangoAPI/MyApi/migrations/0001_initial.py | ankit98040/Django-ML-Project | 3e50f51e56aa34bb8a7ae31f4955a10e57176ea7 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.6 on 2020-08-26 04:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
],
),
]
| 22.090909 | 114 | 0.569959 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
],
),
]
| true | true |
f72ceb176086bf9c83e429c918a55d39155ac895 | 3,556 | py | Python | spyder/plugins/tours/container.py | fumitoh/spyder | 12294fec88a2f61c756538ac38bd748d8e7b3f82 | [
"MIT"
] | 1 | 2021-07-08T01:27:25.000Z | 2021-07-08T01:27:25.000Z | spyder/plugins/tours/container.py | fumitoh/spyder | 12294fec88a2f61c756538ac38bd748d8e7b3f82 | [
"MIT"
] | null | null | null | spyder/plugins/tours/container.py | fumitoh/spyder | 12294fec88a2f61c756538ac38bd748d8e7b3f82 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Project Contributors
#
# Distributed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tours Container.
"""
from collections import OrderedDict
# Local imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.translations import get_translation
from spyder.api.widgets.main_container import PluginMainContainer
from spyder.plugins.tours.tours import TourIdentifiers
from spyder.plugins.tours.widgets import AnimatedTour, OpenTourDialog
# Localization
_ = get_translation('spyder')
# Set the index for the default tour
DEFAULT_TOUR = TourIdentifiers.IntroductionTour
class TourActions:
"""
Tours actions.
"""
ShowTour = "show tour"
# --- Plugin
# ----------------------------------------------------------------------------
class ToursContainer(PluginMainContainer):
"""
Tours container.
"""
def __init__(self, name, plugin, parent=None):
super().__init__(name, plugin, parent=parent)
self._main = plugin.main
self._tours = OrderedDict()
self._tour_titles = OrderedDict()
self._tour_widget = AnimatedTour(self._main)
self._tour_dialog = OpenTourDialog(
self, lambda: self.show_tour(DEFAULT_TOUR))
self.tour_action = self.create_action(
TourActions.ShowTour,
text=_("Show tour"),
icon=self.create_icon('tour'),
triggered=lambda: self.show_tour(DEFAULT_TOUR)
)
# --- PluginMainContainer API
# ------------------------------------------------------------------------
def setup(self):
self.tours_menu = self.create_menu(
"tours_menu", _("Interactive tours"))
def update_actions(self):
pass
# --- Public API
# ------------------------------------------------------------------------
def register_tour(self, tour_id, title, tour_data):
"""
Register a new interactive tour on spyder.
Parameters
----------
tour_id: str
Unique tour string identifier.
title: str
Localized tour name.
tour_data: dict
The tour steps.
"""
if tour_id in self._tours:
raise SpyderAPIError(
"Tour with id '{}' has already been registered!".format(
tour_id))
self._tours[tour_id] = tour_data
self._tour_titles[tour_id] = title
action = self.create_action(
tour_id,
text=title,
triggered=lambda: self.show_tour(tour_id),
)
self.add_item_to_menu(action, menu=self.tours_menu)
def show_tour(self, tour_id):
"""
Show interactive tour.
Parameters
----------
tour_id: str
Unique tour string identifier.
"""
tour_data = self._tours[tour_id]
dic = {'last': 0, 'tour': tour_data}
self._tour_widget.set_tour(tour_id, dic, self._main)
self._tour_widget.start_tour()
def show_tour_message(self):
"""
Show message about starting the tour the first time Spyder starts.
"""
self._tour_dialog.show()
self._tour_dialog.raise_()
| 30.135593 | 80 | 0.53009 |
from collections import OrderedDict
from spyder.api.exceptions import SpyderAPIError
from spyder.api.translations import get_translation
from spyder.api.widgets.main_container import PluginMainContainer
from spyder.plugins.tours.tours import TourIdentifiers
from spyder.plugins.tours.widgets import AnimatedTour, OpenTourDialog
_ = get_translation('spyder')
DEFAULT_TOUR = TourIdentifiers.IntroductionTour
class TourActions:
ShowTour = "show tour"
class ToursContainer(PluginMainContainer):
def __init__(self, name, plugin, parent=None):
super().__init__(name, plugin, parent=parent)
self._main = plugin.main
self._tours = OrderedDict()
self._tour_titles = OrderedDict()
self._tour_widget = AnimatedTour(self._main)
self._tour_dialog = OpenTourDialog(
self, lambda: self.show_tour(DEFAULT_TOUR))
self.tour_action = self.create_action(
TourActions.ShowTour,
text=_("Show tour"),
icon=self.create_icon('tour'),
triggered=lambda: self.show_tour(DEFAULT_TOUR)
)
def setup(self):
self.tours_menu = self.create_menu(
"tours_menu", _("Interactive tours"))
def update_actions(self):
pass
def register_tour(self, tour_id, title, tour_data):
if tour_id in self._tours:
raise SpyderAPIError(
"Tour with id '{}' has already been registered!".format(
tour_id))
self._tours[tour_id] = tour_data
self._tour_titles[tour_id] = title
action = self.create_action(
tour_id,
text=title,
triggered=lambda: self.show_tour(tour_id),
)
self.add_item_to_menu(action, menu=self.tours_menu)
def show_tour(self, tour_id):
tour_data = self._tours[tour_id]
dic = {'last': 0, 'tour': tour_data}
self._tour_widget.set_tour(tour_id, dic, self._main)
self._tour_widget.start_tour()
def show_tour_message(self):
self._tour_dialog.show()
self._tour_dialog.raise_()
| true | true |
f72ceb2273d8e23fd38d9723f4f39d414670b675 | 375 | py | Python | Python/Swap Case.py | BiswajitDeb/My_all_programs | 4717cfc0b3b1aeda75f8eec0b7ff643e8556d262 | [
"Unlicense"
] | null | null | null | Python/Swap Case.py | BiswajitDeb/My_all_programs | 4717cfc0b3b1aeda75f8eec0b7ff643e8556d262 | [
"Unlicense"
] | null | null | null | Python/Swap Case.py | BiswajitDeb/My_all_programs | 4717cfc0b3b1aeda75f8eec0b7ff643e8556d262 | [
"Unlicense"
] | null | null | null | def swap_case(s):
k = []
l = list(s)
for i in l:
j = ""
if i.islower():
j = i.upper()
elif i.isupper():
j = i.lower()
else:
k.append(i)
k.append(j)
final = ''.join(k)
return final
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result) | 16.304348 | 26 | 0.416 | def swap_case(s):
k = []
l = list(s)
for i in l:
j = ""
if i.islower():
j = i.upper()
elif i.isupper():
j = i.lower()
else:
k.append(i)
k.append(j)
final = ''.join(k)
return final
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result) | true | true |
f72ced6147cf08a6b779747e4e46d56e84081e4e | 1,139 | py | Python | plenum/test/view_change/test_start_view_change_ts_set.py | andkononykhin/indy-plenum-copy | 46c48feaf75e5578c9dceb76d4b6d09f7e63add5 | [
"Apache-2.0"
] | 1 | 2019-03-19T23:44:56.000Z | 2019-03-19T23:44:56.000Z | plenum/test/view_change/test_start_view_change_ts_set.py | andkononykhin/indy-plenum-copy | 46c48feaf75e5578c9dceb76d4b6d09f7e63add5 | [
"Apache-2.0"
] | null | null | null | plenum/test/view_change/test_start_view_change_ts_set.py | andkononykhin/indy-plenum-copy | 46c48feaf75e5578c9dceb76d4b6d09f7e63add5 | [
"Apache-2.0"
] | 2 | 2017-12-13T21:14:54.000Z | 2021-06-06T15:48:03.000Z | from contextlib import ExitStack
import pytest
from plenum.test.helper import create_new_test_node
@pytest.fixture(scope="module")
def create_node_and_not_start(testNodeClass,
node_config_helper_class,
tconf,
tdir,
allPluginsPath,
looper,
tdirWithPoolTxns,
tdirWithDomainTxns,
tdirWithNodeKeepInited):
with ExitStack() as exitStack:
node = exitStack.enter_context(create_new_test_node(testNodeClass,
node_config_helper_class,
"Alpha",
tconf,
tdir,
allPluginsPath))
yield node
node.stop()
def test_start_view_change_ts_set(looper, create_node_and_not_start):
node = create_node_and_not_start
node.start(looper)
node.on_view_change_start()
assert node.view_changer.start_view_change_ts != 0
| 33.5 | 74 | 0.515364 | from contextlib import ExitStack
import pytest
from plenum.test.helper import create_new_test_node
@pytest.fixture(scope="module")
def create_node_and_not_start(testNodeClass,
node_config_helper_class,
tconf,
tdir,
allPluginsPath,
looper,
tdirWithPoolTxns,
tdirWithDomainTxns,
tdirWithNodeKeepInited):
with ExitStack() as exitStack:
node = exitStack.enter_context(create_new_test_node(testNodeClass,
node_config_helper_class,
"Alpha",
tconf,
tdir,
allPluginsPath))
yield node
node.stop()
def test_start_view_change_ts_set(looper, create_node_and_not_start):
node = create_node_and_not_start
node.start(looper)
node.on_view_change_start()
assert node.view_changer.start_view_change_ts != 0
| true | true |
f72ced80e9d31b06e29ceff3d3a4092de61f6141 | 8,950 | py | Python | test/test_cognon_extended.py | pauh/neuron | c08f7033f954373617d7a58eb1e5b88f91ac1a27 | [
"Apache-2.0"
] | 3 | 2018-08-25T22:03:37.000Z | 2019-04-15T10:59:14.000Z | test/test_cognon_extended.py | pauh/neuron | c08f7033f954373617d7a58eb1e5b88f91ac1a27 | [
"Apache-2.0"
] | null | null | null | test/test_cognon_extended.py | pauh/neuron | c08f7033f954373617d7a58eb1e5b88f91ac1a27 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Pau Haro Negre
# based on C++ code by Carl Staelin Copyright 2009-2011
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cognon_extended import Neuron
from cognon_extended import Synapse
from cognon_extended import Word
from cognon_extended import WordSet
from nose.tools import assert_false
from nose.tools import assert_greater_equal
from nose.tools import assert_in
from nose.tools import assert_is_none
from nose.tools import assert_less
from nose.tools import assert_less_equal
from nose.tools import assert_true
from nose.tools import eq_
from nose.tools import ok_
from nose.tools import raises
class TestSynapse:
@raises(TypeError)
def test_construct_requires_args(self):
s = Synapse()
def test_named_attributes(self):
s = Synapse(1, 0)
eq_(s.offset, 1)
eq_(s.delay, 0)
class TestWord:
def test_empty(self):
w = Word()
eq_(len(w.synapses), 0)
@raises(ValueError)
def test_negative_synapse_offset(self):
w = Word([(-1, 0)])
def test_fire_1_3_8(self):
w = Word([(1,0),(3,0),(8,0)])
eq_(len(w.synapses), 3)
assert_in((1,0), w.synapses)
assert_in((3,0), w.synapses)
assert_in((8,0), w.synapses)
def test_delay_0(self):
w = Word([(1,0),(3,0),(8,0)])
for offset, delay in w.synapses:
eq_(delay, 0)
class TestWordSet:
def test_small(self):
num_words = 5
word_length = 16
num_delays = 4
num_active = 4
ws = WordSet(num_words, word_length, num_delays, num_active)
eq_(len(ws.words), num_words)
eq_(len(ws.delays), num_words)
for word in ws.words:
eq_(len(word.synapses), num_active)
for synapse in word.synapses:
assert_greater_equal(synapse.offset, 0)
assert_less(synapse.offset, word_length)
assert_greater_equal(synapse.delay, 0)
assert_less(synapse.delay, num_delays)
def test_refractory_period(self):
num_words = 5
word_length = 16
num_delays = 4
num_active = None
refractory_period = 4
ws = WordSet(num_words, word_length, num_delays, num_active,
refractory_period)
eq_(len(ws.words), num_words)
eq_(len(ws.delays), num_words)
for word in ws.words:
for synapse in word.synapses:
assert_greater_equal(synapse.offset, 0)
assert_less(synapse.offset, word_length)
assert_greater_equal(synapse.delay, 0)
assert_less(synapse.delay, num_delays)
class TestNeuron:
def test_defaults(self):
n = Neuron()
eq_(n.S0, 200)
eq_(n.H, 5.0)
eq_(n.G, 2.0)
eq_(n.C, 1)
eq_(n.D1, 4)
eq_(n.D2, 7)
assert_false(n.training)
eq_(len(n.synapses), n.S0)
assert_true((n.synapses['strength'] == 1.0).all())
assert_true((n.synapses['delay'] >= 0).all())
assert_true((n.synapses['delay'] < n.D2).all())
assert_true((n.synapses['container'] >= 0).all())
assert_true((n.synapses['container'] < n.C).all())
def test_attributes_in_range(self):
n = Neuron()
assert_greater_equal(n.H, 1.0)
assert_greater_equal(n.C, 1)
assert_less_equal(n.D1, n.D2)
assert_true((n.synapses['strength'] >= 0.0).all())
def test_expose_not_training(self):
n = Neuron(S0 = 16, H = 4.0, G = 2.0, C = 1, D1 = 1, D2 = 1)
w = Word([(1,0), (6,0), (9,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(1,0), (3,0), (4,0), (5,0), (6,0), (8,0), (9,0), (14,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 0)
@raises(IndexError)
def test_expose_index_error(self):
n = Neuron(S0 = 16)
w = Word([(16,0)])
n.expose(w)
def test_expose_multiple_containers(self):
n = Neuron(S0 = 16, H = 2.0, G = 2.0, C = 3, D1 = 1, D2 = 1)
# Set container assignment manually to remove randomness
n.synapses['container'][ 0:10] = 0
n.synapses['container'][10:14] = 1
n.synapses['container'][14:16] = 2
w = Word([(1,0), (2,0), (6,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(1,0), (2,0), (3,0), (4,0), (5,0), (6,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 0)
w = Word([(10,0), (11,0), (12,0), (13,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 1)
w = Word([(14,0), (15,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
def test_expose_with_delays(self):
n = Neuron(S0 = 16, H = 2.0, G = 2.0, C = 1, D1 = 2, D2 = 3)
# Set delay assignment manually to remove randomness
n.synapses['delay'][ 0:10] = 0
n.synapses['delay'][10:14] = 1
n.synapses['delay'][14:16] = 2
w = Word([(1,0), (2,0), (6,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(1,0), (2,0), (3,0), (4,0), (5,0), (6,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 0)
w = Word([(1,1), (2,1), (3,1), (4,1), (5,0), (6,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 1)
eq_(container, 0)
w = Word([(1,0), (2,0), (3,0), (4,1), (5,1), (6,1)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(10,1), (11,1), (12,1), (13,1)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 2)
eq_(container, 0)
w = Word([(12,0), (13,0), (14,0), (15,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
def test_train(self):
n = Neuron(S0 = 16, H = 4.0, G = 2.0, C = 1, D1 = 1, D2 = 1)
# Train neuron with 2 patterns
wA = Word([(1,0), (6,0), (9,0), (14,0)])
wB = Word([(3,0), (4,0), (9,0), (13,0)])
n.start_training()
assert_true(n.train(wA))
assert_true(n.train(wB))
n.finish_training()
# Test recognition
wD = Word([(2,0), (6,0), (12,0), (14,0)])
fired, delay, container = n.expose(wD)
assert_false(fired)
wE = Word([(3,0), (7,0), (9,0), (13,0)])
fired, delay, container = n.expose(wE)
assert_false(fired)
wF = Word([(1,0), (4,0), (9,0), (14,0)])
fired, delay, container = n.expose(wF)
assert_true(fired) # False alarm
def test_train_not_training(self):
n = Neuron()
w = Word()
assert_false(n.train(w))
def test_train_with_delays(self):
n = Neuron(S0 = 16, H = 4.0, G = 2.0, C = 1, D1 = 2, D2 = 2)
# Fix neuron delays manually for the test
n.synapses['delay'] = 1
n.synapses['delay'][1] = 0
n.synapses['delay'][14] = 0
# Train neuron with 2 patterns
wA = Word([(1,1), (6,0), (9,0), (14,1)])
wB = Word([(3,0), (4,0), (9,0), (13,0)])
n.start_training()
assert_true(n.train(wA))
assert_true(n.train(wB))
n.finish_training()
# Recognize
wD = Word([(2,0), (6,0), (12,0), (14,0)])
fired, delay, container = n.expose(wD)
assert_false(fired)
wE = Word([(1,1), (3,0), (9,0), (13,0)])
fired, delay, container = n.expose(wE)
assert_true(fired) # False alarm
wF = Word([(1,0), (4,1), (7,0), (9,0), (11,0), (14,0)])
fired, delay, container = n.expose(wF)
assert_false(fired)
| 30.13468 | 75 | 0.562682 |
from cognon_extended import Neuron
from cognon_extended import Synapse
from cognon_extended import Word
from cognon_extended import WordSet
from nose.tools import assert_false
from nose.tools import assert_greater_equal
from nose.tools import assert_in
from nose.tools import assert_is_none
from nose.tools import assert_less
from nose.tools import assert_less_equal
from nose.tools import assert_true
from nose.tools import eq_
from nose.tools import ok_
from nose.tools import raises
class TestSynapse:
@raises(TypeError)
def test_construct_requires_args(self):
s = Synapse()
def test_named_attributes(self):
s = Synapse(1, 0)
eq_(s.offset, 1)
eq_(s.delay, 0)
class TestWord:
def test_empty(self):
w = Word()
eq_(len(w.synapses), 0)
@raises(ValueError)
def test_negative_synapse_offset(self):
w = Word([(-1, 0)])
def test_fire_1_3_8(self):
w = Word([(1,0),(3,0),(8,0)])
eq_(len(w.synapses), 3)
assert_in((1,0), w.synapses)
assert_in((3,0), w.synapses)
assert_in((8,0), w.synapses)
def test_delay_0(self):
w = Word([(1,0),(3,0),(8,0)])
for offset, delay in w.synapses:
eq_(delay, 0)
class TestWordSet:
def test_small(self):
num_words = 5
word_length = 16
num_delays = 4
num_active = 4
ws = WordSet(num_words, word_length, num_delays, num_active)
eq_(len(ws.words), num_words)
eq_(len(ws.delays), num_words)
for word in ws.words:
eq_(len(word.synapses), num_active)
for synapse in word.synapses:
assert_greater_equal(synapse.offset, 0)
assert_less(synapse.offset, word_length)
assert_greater_equal(synapse.delay, 0)
assert_less(synapse.delay, num_delays)
def test_refractory_period(self):
num_words = 5
word_length = 16
num_delays = 4
num_active = None
refractory_period = 4
ws = WordSet(num_words, word_length, num_delays, num_active,
refractory_period)
eq_(len(ws.words), num_words)
eq_(len(ws.delays), num_words)
for word in ws.words:
for synapse in word.synapses:
assert_greater_equal(synapse.offset, 0)
assert_less(synapse.offset, word_length)
assert_greater_equal(synapse.delay, 0)
assert_less(synapse.delay, num_delays)
class TestNeuron:
def test_defaults(self):
n = Neuron()
eq_(n.S0, 200)
eq_(n.H, 5.0)
eq_(n.G, 2.0)
eq_(n.C, 1)
eq_(n.D1, 4)
eq_(n.D2, 7)
assert_false(n.training)
eq_(len(n.synapses), n.S0)
assert_true((n.synapses['strength'] == 1.0).all())
assert_true((n.synapses['delay'] >= 0).all())
assert_true((n.synapses['delay'] < n.D2).all())
assert_true((n.synapses['container'] >= 0).all())
assert_true((n.synapses['container'] < n.C).all())
def test_attributes_in_range(self):
n = Neuron()
assert_greater_equal(n.H, 1.0)
assert_greater_equal(n.C, 1)
assert_less_equal(n.D1, n.D2)
assert_true((n.synapses['strength'] >= 0.0).all())
def test_expose_not_training(self):
n = Neuron(S0 = 16, H = 4.0, G = 2.0, C = 1, D1 = 1, D2 = 1)
w = Word([(1,0), (6,0), (9,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(1,0), (3,0), (4,0), (5,0), (6,0), (8,0), (9,0), (14,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 0)
@raises(IndexError)
def test_expose_index_error(self):
n = Neuron(S0 = 16)
w = Word([(16,0)])
n.expose(w)
def test_expose_multiple_containers(self):
n = Neuron(S0 = 16, H = 2.0, G = 2.0, C = 3, D1 = 1, D2 = 1)
n.synapses['container'][ 0:10] = 0
n.synapses['container'][10:14] = 1
n.synapses['container'][14:16] = 2
w = Word([(1,0), (2,0), (6,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(1,0), (2,0), (3,0), (4,0), (5,0), (6,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 0)
w = Word([(10,0), (11,0), (12,0), (13,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 1)
w = Word([(14,0), (15,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
def test_expose_with_delays(self):
n = Neuron(S0 = 16, H = 2.0, G = 2.0, C = 1, D1 = 2, D2 = 3)
n.synapses['delay'][ 0:10] = 0
n.synapses['delay'][10:14] = 1
n.synapses['delay'][14:16] = 2
w = Word([(1,0), (2,0), (6,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(1,0), (2,0), (3,0), (4,0), (5,0), (6,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 0)
eq_(container, 0)
w = Word([(1,1), (2,1), (3,1), (4,1), (5,0), (6,0)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 1)
eq_(container, 0)
w = Word([(1,0), (2,0), (3,0), (4,1), (5,1), (6,1)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
w = Word([(10,1), (11,1), (12,1), (13,1)])
fired, delay, container = n.expose(w)
assert_true(fired)
eq_(delay, 2)
eq_(container, 0)
w = Word([(12,0), (13,0), (14,0), (15,0)])
fired, delay, container = n.expose(w)
assert_false(fired)
assert_is_none(delay)
assert_is_none(container)
def test_train(self):
n = Neuron(S0 = 16, H = 4.0, G = 2.0, C = 1, D1 = 1, D2 = 1)
wA = Word([(1,0), (6,0), (9,0), (14,0)])
wB = Word([(3,0), (4,0), (9,0), (13,0)])
n.start_training()
assert_true(n.train(wA))
assert_true(n.train(wB))
n.finish_training()
wD = Word([(2,0), (6,0), (12,0), (14,0)])
fired, delay, container = n.expose(wD)
assert_false(fired)
wE = Word([(3,0), (7,0), (9,0), (13,0)])
fired, delay, container = n.expose(wE)
assert_false(fired)
wF = Word([(1,0), (4,0), (9,0), (14,0)])
fired, delay, container = n.expose(wF)
assert_true(fired)
def test_train_not_training(self):
n = Neuron()
w = Word()
assert_false(n.train(w))
def test_train_with_delays(self):
n = Neuron(S0 = 16, H = 4.0, G = 2.0, C = 1, D1 = 2, D2 = 2)
n.synapses['delay'] = 1
n.synapses['delay'][1] = 0
n.synapses['delay'][14] = 0
wA = Word([(1,1), (6,0), (9,0), (14,1)])
wB = Word([(3,0), (4,0), (9,0), (13,0)])
n.start_training()
assert_true(n.train(wA))
assert_true(n.train(wB))
n.finish_training()
wD = Word([(2,0), (6,0), (12,0), (14,0)])
fired, delay, container = n.expose(wD)
assert_false(fired)
wE = Word([(1,1), (3,0), (9,0), (13,0)])
fired, delay, container = n.expose(wE)
assert_true(fired)
wF = Word([(1,0), (4,1), (7,0), (9,0), (11,0), (14,0)])
fired, delay, container = n.expose(wF)
assert_false(fired)
| true | true |
f72ced891d0ab304ac2986df0441a610cd13e4c7 | 896 | py | Python | 2020/day25/day25.py | ChrisCh7/advent-of-code | d6f1dda4a67aae18ac1e15b9eccb3e6e94d705c1 | [
"Unlicense"
] | 3 | 2020-12-03T23:20:27.000Z | 2020-12-03T23:20:53.000Z | 2020/day25/day25.py | ChrisCh7/advent-of-code | d6f1dda4a67aae18ac1e15b9eccb3e6e94d705c1 | [
"Unlicense"
] | null | null | null | 2020/day25/day25.py | ChrisCh7/advent-of-code | d6f1dda4a67aae18ac1e15b9eccb3e6e94d705c1 | [
"Unlicense"
] | null | null | null | def main():
with open('in.txt') as file:
card_pubkey, door_pubkey = map(lambda n: int(n), file.read().splitlines())
print('card public key:', card_pubkey)
print('door public key:', door_pubkey)
print('card loop size:', card_loop_size := get_loop_size(card_pubkey))
print('door loop size:', door_loop_size := get_loop_size(door_pubkey))
print('encryption key:', get_encryption_key(card_loop_size, door_pubkey))
def get_loop_size(pubkey):
value = 1
loop_size = 0
while value != pubkey:
value *= 7
value %= 20201227
loop_size += 1
return loop_size
def get_encryption_key(first_loop_size, second_public_key):
value = 1
loop_size = 0
while loop_size < first_loop_size:
value *= second_public_key
value %= 20201227
loop_size += 1
return value
if __name__ == '__main__':
main()
| 24.888889 | 82 | 0.65067 | def main():
with open('in.txt') as file:
card_pubkey, door_pubkey = map(lambda n: int(n), file.read().splitlines())
print('card public key:', card_pubkey)
print('door public key:', door_pubkey)
print('card loop size:', card_loop_size := get_loop_size(card_pubkey))
print('door loop size:', door_loop_size := get_loop_size(door_pubkey))
print('encryption key:', get_encryption_key(card_loop_size, door_pubkey))
def get_loop_size(pubkey):
value = 1
loop_size = 0
while value != pubkey:
value *= 7
value %= 20201227
loop_size += 1
return loop_size
def get_encryption_key(first_loop_size, second_public_key):
value = 1
loop_size = 0
while loop_size < first_loop_size:
value *= second_public_key
value %= 20201227
loop_size += 1
return value
if __name__ == '__main__':
main()
| true | true |
f72cedd7827c3a103f775708f54a774487557133 | 4,338 | py | Python | doom/test.py | luxinglong/ViZDoom-SL | fbc54c401b1ca320e9e804f2c97fdedc5d0c534d | [
"MIT"
] | null | null | null | doom/test.py | luxinglong/ViZDoom-SL | fbc54c401b1ca320e9e804f2c97fdedc5d0c534d | [
"MIT"
] | null | null | null | doom/test.py | luxinglong/ViZDoom-SL | fbc54c401b1ca320e9e804f2c97fdedc5d0c534d | [
"MIT"
] | null | null | null | import sys
import argparse
import numpy as np
from actions import ActionBuilder
from game import Game
# use_continuous speed action_combinations crouch freelook
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
def bool_flag(string):
"""
Parse boolean arguments from the command line.
"""
if string.lower() in FALSY_STRINGS:
return False
elif string.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag. "
"use 0 or 1")
def main():
parser = argparse.ArgumentParser(description='LUBAN runner')
parser.add_argument("--use_continuous", type=bool_flag, default=False,
help="weather use continuous actions")
# Available actions
# combination of actions the agent is allowed to do.
# this is for non-continuous mode only, and is ignored in continuous mode
parser.add_argument("--action_combinations", type=str,
default='move_fb+turn_lr+move_lr+attack',
help="Allowed combinations of actions")
# freelook: allow the agent to look up and down
parser.add_argument("--freelook", type=bool_flag, default=False,
help="Enable freelook (look up / look down)")
parser.add_argument("--human_player", type=bool_flag, default=False,
help="DoomGame mode")
# speed and crouch buttons: in non-continuous mode, the network can not
# have control on these buttons, and they must be set to always 'on' or
# 'off'. In continuous mode, the network can manually control crouch and
# speed.
parser.add_argument("--speed", type=str, default='off',
help="Crouch: on / off / manual")
parser.add_argument("--crouch", type=str, default='off',
help="Crouch: on / off / manual")
# for process_buffers
parser.add_argument("--height", type=int, default=60,
help="Image height")
parser.add_argument("--width", type=int, default=108,
help="Image width")
parser.add_argument("--gray", type=bool_flag, default=False,
help="Use grayscale")
parser.add_argument("--use_screen_buffer", type=bool_flag, default=True,
help="Use the screen buffer")
parser.add_argument("--use_depth_buffer", type=bool_flag, default=False,
help="Use the depth buffer")
parser.add_argument("--labels_mapping", type=str, default='',
help="Map labels to different feature maps")
parser.add_argument("--dump_freq", type=int, default=0,
help="Dump every X iterations (0 to disable)")
# for observe_state
parser.add_argument("--hist_size", type=int, default=4,
help="History size")
params, unparsed = parser.parse_known_args(sys.argv)
print(sys.argv)
params.game_variables = [('health', 101), ('sel_ammo', 301)]
print(params)
action_builder = ActionBuilder(params)
print(action_builder.n_actions)
print(action_builder.available_actions)
game = Game(
scenario='full_deathmatch',
action_builder=action_builder,
score_variable='USER2',
freedoom=True,
screen_resolution='RES_800X450',
use_screen_buffer=True,
use_depth_buffer=True,
labels_mapping="",
game_features="target,enemy",
mode=('SPECTATOR' if params.human_player else 'PLAYER'),
render_hud=True,
render_crosshair=True,
render_weapon=True,
freelook=params.freelook,
visible=0,
n_bots=10,
use_scripted_marines=True
)
game.start(map_id = 2)
game.init_bots_health(100)
episodes = 100000
last_states = []
for _ in range(episodes):
if game.is_player_dead():
game.respawn_player()
game.observe_state(params, last_states)
action = np.random.randint(0, 29)
game.make_action(action, frame_skip=1, sleep=None)
game.close()
if __name__ == '__main__':
main()
| 37.396552 | 78 | 0.605579 | import sys
import argparse
import numpy as np
from actions import ActionBuilder
from game import Game
FALSY_STRINGS = {'off', 'false', '0'}
TRUTHY_STRINGS = {'on', 'true', '1'}
def bool_flag(string):
if string.lower() in FALSY_STRINGS:
return False
elif string.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag. "
"use 0 or 1")
def main():
parser = argparse.ArgumentParser(description='LUBAN runner')
parser.add_argument("--use_continuous", type=bool_flag, default=False,
help="weather use continuous actions")
parser.add_argument("--action_combinations", type=str,
default='move_fb+turn_lr+move_lr+attack',
help="Allowed combinations of actions")
parser.add_argument("--freelook", type=bool_flag, default=False,
help="Enable freelook (look up / look down)")
parser.add_argument("--human_player", type=bool_flag, default=False,
help="DoomGame mode")
parser.add_argument("--speed", type=str, default='off',
help="Crouch: on / off / manual")
parser.add_argument("--crouch", type=str, default='off',
help="Crouch: on / off / manual")
parser.add_argument("--height", type=int, default=60,
help="Image height")
parser.add_argument("--width", type=int, default=108,
help="Image width")
parser.add_argument("--gray", type=bool_flag, default=False,
help="Use grayscale")
parser.add_argument("--use_screen_buffer", type=bool_flag, default=True,
help="Use the screen buffer")
parser.add_argument("--use_depth_buffer", type=bool_flag, default=False,
help="Use the depth buffer")
parser.add_argument("--labels_mapping", type=str, default='',
help="Map labels to different feature maps")
parser.add_argument("--dump_freq", type=int, default=0,
help="Dump every X iterations (0 to disable)")
parser.add_argument("--hist_size", type=int, default=4,
help="History size")
params, unparsed = parser.parse_known_args(sys.argv)
print(sys.argv)
params.game_variables = [('health', 101), ('sel_ammo', 301)]
print(params)
action_builder = ActionBuilder(params)
print(action_builder.n_actions)
print(action_builder.available_actions)
game = Game(
scenario='full_deathmatch',
action_builder=action_builder,
score_variable='USER2',
freedoom=True,
screen_resolution='RES_800X450',
use_screen_buffer=True,
use_depth_buffer=True,
labels_mapping="",
game_features="target,enemy",
mode=('SPECTATOR' if params.human_player else 'PLAYER'),
render_hud=True,
render_crosshair=True,
render_weapon=True,
freelook=params.freelook,
visible=0,
n_bots=10,
use_scripted_marines=True
)
game.start(map_id = 2)
game.init_bots_health(100)
episodes = 100000
last_states = []
for _ in range(episodes):
if game.is_player_dead():
game.respawn_player()
game.observe_state(params, last_states)
action = np.random.randint(0, 29)
game.make_action(action, frame_skip=1, sleep=None)
game.close()
if __name__ == '__main__':
main()
| true | true |
f72cef269ff93973871d0381495aa221dec684e9 | 576 | py | Python | processdata/migrations/0001_initial.py | shinysong/covid19-dashboard | c4c536e3781caecb7f1cfcfdb27c1324fe493eb2 | [
"MIT"
] | null | null | null | processdata/migrations/0001_initial.py | shinysong/covid19-dashboard | c4c536e3781caecb7f1cfcfdb27c1324fe493eb2 | [
"MIT"
] | null | null | null | processdata/migrations/0001_initial.py | shinysong/covid19-dashboard | c4c536e3781caecb7f1cfcfdb27c1324fe493eb2 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2022-03-29 07:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='test_api',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dmName', models.TextField()),
('mainURL', models.URLField()),
('dsCount', models.IntegerField()),
],
),
]
| 24 | 114 | 0.548611 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='test_api',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dmName', models.TextField()),
('mainURL', models.URLField()),
('dsCount', models.IntegerField()),
],
),
]
| true | true |
f72cefc08416d058c79ffed4f4ba0c15d2eb9ff0 | 17,944 | py | Python | homeassistant/components/climate/wink.py | jamescurtin/home-assistant | 6a9968ccb9b0082f5629e50955549d432aba7d90 | [
"Apache-2.0"
] | 2 | 2020-02-20T18:47:55.000Z | 2021-11-09T11:33:28.000Z | homeassistant/components/climate/wink.py | moose51789/home-assistant | 63c9d59d5455850fd4b37c2475fe6f10effb5245 | [
"Apache-2.0"
] | 1 | 2021-02-08T20:56:06.000Z | 2021-02-08T20:56:06.000Z | homeassistant/components/climate/wink.py | moose51789/home-assistant | 63c9d59d5455850fd4b37c2475fe6f10effb5245 | [
"Apache-2.0"
] | 1 | 2019-09-15T04:45:12.000Z | 2019-09-15T04:45:12.000Z | """
Support for Wink thermostats, Air Conditioners, and Water Heaters.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.wink/
"""
import logging
import asyncio
from homeassistant.components.wink import WinkDevice, DOMAIN
from homeassistant.components.climate import (
STATE_AUTO, STATE_COOL, STATE_HEAT, ClimateDevice,
ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW,
ATTR_TEMPERATURE, STATE_FAN_ONLY,
ATTR_CURRENT_HUMIDITY, STATE_ECO, STATE_ELECTRIC,
STATE_PERFORMANCE, STATE_HIGH_DEMAND,
STATE_HEAT_PUMP, STATE_GAS)
from homeassistant.const import (
TEMP_CELSIUS, STATE_ON,
STATE_OFF, STATE_UNKNOWN)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['wink']
SPEED_LOW = 'low'
SPEED_MEDIUM = 'medium'
SPEED_HIGH = 'high'
HA_STATE_TO_WINK = {STATE_AUTO: 'auto',
STATE_ECO: 'eco',
STATE_FAN_ONLY: 'fan_only',
STATE_HEAT: 'heat_only',
STATE_COOL: 'cool_only',
STATE_PERFORMANCE: 'performance',
STATE_HIGH_DEMAND: 'high_demand',
STATE_HEAT_PUMP: 'heat_pump',
STATE_ELECTRIC: 'electric_only',
STATE_GAS: 'gas',
STATE_OFF: 'off'}
WINK_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_WINK.items()}
ATTR_EXTERNAL_TEMPERATURE = "external_temperature"
ATTR_SMART_TEMPERATURE = "smart_temperature"
ATTR_ECO_TARGET = "eco_target"
ATTR_OCCUPIED = "occupied"
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Wink climate devices."""
import pywink
for climate in pywink.get_thermostats():
_id = climate.object_id() + climate.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkThermostat(climate, hass)])
for climate in pywink.get_air_conditioners():
_id = climate.object_id() + climate.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkAC(climate, hass)])
for water_heater in pywink.get_water_heaters():
_id = water_heater.object_id() + water_heater.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkWaterHeater(water_heater, hass)])
# pylint: disable=abstract-method
class WinkThermostat(WinkDevice, ClimateDevice):
"""Representation of a Wink thermostat."""
@asyncio.coroutine
def async_added_to_hass(self):
"""Callback when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['climate'].append(self)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
# The Wink API always returns temp in Celsius
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
data = {}
target_temp_high = self.target_temperature_high
target_temp_low = self.target_temperature_low
if target_temp_high is not None:
data[ATTR_TARGET_TEMP_HIGH] = self._convert_for_display(
self.target_temperature_high)
if target_temp_low is not None:
data[ATTR_TARGET_TEMP_LOW] = self._convert_for_display(
self.target_temperature_low)
if self.external_temperature:
data[ATTR_EXTERNAL_TEMPERATURE] = self._convert_for_display(
self.external_temperature)
if self.smart_temperature:
data[ATTR_SMART_TEMPERATURE] = self.smart_temperature
if self.occupied:
data[ATTR_OCCUPIED] = self.occupied
if self.eco_target:
data[ATTR_ECO_TARGET] = self.eco_target
current_humidity = self.current_humidity
if current_humidity is not None:
data[ATTR_CURRENT_HUMIDITY] = current_humidity
return data
@property
def current_temperature(self):
"""Return the current temperature."""
return self.wink.current_temperature()
@property
def current_humidity(self):
"""Return the current humidity."""
if self.wink.current_humidity() is not None:
# The API states humidity will be a float 0-1
# the only example API response with humidity listed show an int
# This will address both possibilities
if self.wink.current_humidity() < 1:
return self.wink.current_humidity() * 100
return self.wink.current_humidity()
return None
@property
def external_temperature(self):
"""Return the current external temperature."""
return self.wink.current_external_temperature()
@property
def smart_temperature(self):
"""Return the current average temp of all remote sensor."""
return self.wink.current_smart_temperature()
@property
def eco_target(self):
"""Return status of eco target (Is the termostat in eco mode)."""
return self.wink.eco_target()
@property
def occupied(self):
"""Return status of if the thermostat has detected occupancy."""
return self.wink.occupied()
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_hvac_mode())
if current_op == 'aux':
return STATE_HEAT
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
target_hum = None
if self.wink.current_humidifier_mode() == 'on':
if self.wink.current_humidifier_set_point() is not None:
target_hum = self.wink.current_humidifier_set_point() * 100
elif self.wink.current_dehumidifier_mode() == 'on':
if self.wink.current_dehumidifier_set_point() is not None:
target_hum = self.wink.current_dehumidifier_set_point() * 100
else:
target_hum = None
return target_hum
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.current_operation != STATE_AUTO and not self.is_away_mode_on:
if self.current_operation == STATE_COOL:
return self.wink.current_max_set_point()
elif self.current_operation == STATE_HEAT:
return self.wink.current_min_set_point()
return None
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return self.wink.current_min_set_point()
return None
@property
def target_temperature_high(self):
"""Return the higher bound temperature we try to reach."""
if self.current_operation == STATE_AUTO:
return self.wink.current_max_set_point()
return None
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self.wink.away()
@property
def is_aux_heat_on(self):
"""Return true if aux heater."""
if 'aux' not in self.wink.hvac_modes():
return None
if self.wink.current_hvac_mode() == 'aux':
return True
return False
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp is not None:
if self.current_operation == STATE_COOL:
target_temp_high = target_temp
if self.current_operation == STATE_HEAT:
target_temp_low = target_temp
if target_temp_low is not None:
target_temp_low = target_temp_low
if target_temp_high is not None:
target_temp_high = target_temp_high
self.wink.set_temperature(target_temp_low, target_temp_high)
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
# The only way to disable aux heat is with the toggle
if self.is_aux_heat_on and op_mode_to_set == STATE_HEAT:
return
self.wink.set_operation_mode(op_mode_to_set)
@property
def operation_list(self):
"""List of available operation modes."""
op_list = ['off']
modes = self.wink.hvac_modes()
for mode in modes:
if mode == 'aux':
continue
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invaid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
def turn_away_mode_on(self):
"""Turn away on."""
self.wink.set_away_mode()
def turn_away_mode_off(self):
"""Turn away off."""
self.wink.set_away_mode(False)
@property
def current_fan_mode(self):
"""Return whether the fan is on."""
if self.wink.current_fan_mode() == 'on':
return STATE_ON
elif self.wink.current_fan_mode() == 'auto':
return STATE_AUTO
# No Fan available so disable slider
return None
@property
def fan_list(self):
"""List of available fan modes."""
if self.wink.has_fan():
return self.wink.fan_modes()
return None
def set_fan_mode(self, fan):
"""Turn fan on/off."""
self.wink.set_fan_mode(fan.lower())
def turn_aux_heat_on(self):
"""Turn auxiliary heater on."""
self.wink.set_operation_mode('aux')
def turn_aux_heat_off(self):
"""Turn auxiliary heater off."""
self.set_operation_mode(STATE_HEAT)
@property
def min_temp(self):
"""Return the minimum temperature."""
minimum = 7 # Default minimum
min_min = self.wink.min_min_set_point()
min_max = self.wink.min_max_set_point()
return_value = minimum
if self.current_operation == STATE_HEAT:
if min_min:
return_value = min_min
else:
return_value = minimum
elif self.current_operation == STATE_COOL:
if min_max:
return_value = min_max
else:
return_value = minimum
elif self.current_operation == STATE_AUTO:
if min_min and min_max:
return_value = min(min_min, min_max)
else:
return_value = minimum
else:
return_value = minimum
return return_value
@property
def max_temp(self):
"""Return the maximum temperature."""
maximum = 35 # Default maximum
max_min = self.wink.max_min_set_point()
max_max = self.wink.max_max_set_point()
return_value = maximum
if self.current_operation == STATE_HEAT:
if max_min:
return_value = max_min
else:
return_value = maximum
elif self.current_operation == STATE_COOL:
if max_max:
return_value = max_max
else:
return_value = maximum
elif self.current_operation == STATE_AUTO:
if max_min and max_max:
return_value = min(max_min, max_max)
else:
return_value = maximum
else:
return_value = maximum
return return_value
class WinkAC(WinkDevice, ClimateDevice):
"""Representation of a Wink air conditioner."""
@property
def temperature_unit(self):
"""Return the unit of measurement."""
# The Wink API always returns temp in Celsius
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
data = {}
target_temp_high = self.target_temperature_high
target_temp_low = self.target_temperature_low
if target_temp_high is not None:
data[ATTR_TARGET_TEMP_HIGH] = self._convert_for_display(
self.target_temperature_high)
if target_temp_low is not None:
data[ATTR_TARGET_TEMP_LOW] = self._convert_for_display(
self.target_temperature_low)
data["total_consumption"] = self.wink.total_consumption()
data["schedule_enabled"] = self.wink.schedule_enabled()
return data
@property
def current_temperature(self):
"""Return the current temperature."""
return self.wink.current_temperature()
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_hvac_mode())
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def operation_list(self):
"""List of available operation modes."""
op_list = ['off']
modes = self.wink.modes()
for mode in modes:
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invaid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
self.wink.set_temperature(target_temp)
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
if op_mode_to_set == 'eco':
op_mode_to_set = 'auto_eco'
self.wink.set_operation_mode(op_mode_to_set)
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.wink.current_max_set_point()
@property
def current_fan_mode(self):
"""Return the current fan mode."""
speed = self.wink.current_fan_speed()
if speed <= 0.4 and speed > 0.3:
return SPEED_LOW
elif speed <= 0.8 and speed > 0.5:
return SPEED_MEDIUM
elif speed <= 1.0 and speed > 0.8:
return SPEED_HIGH
return STATE_UNKNOWN
@property
def fan_list(self):
"""Return a list of available fan modes."""
return [SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
def set_fan_mode(self, fan):
"""Set fan speed."""
if fan == SPEED_LOW:
speed = 0.4
elif fan == SPEED_MEDIUM:
speed = 0.8
elif fan == SPEED_HIGH:
speed = 1.0
self.wink.set_ac_fan_speed(speed)
class WinkWaterHeater(WinkDevice, ClimateDevice):
"""Representation of a Wink water heater."""
@property
def temperature_unit(self):
"""Return the unit of measurement."""
# The Wink API always returns temp in Celsius
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
data = {}
data["vacation_mode"] = self.wink.vacation_mode_enabled()
data["rheem_type"] = self.wink.rheem_type()
return data
@property
def current_operation(self):
"""
Return current operation one of the following.
["eco", "performance", "heat_pump",
"high_demand", "electric_only", "gas]
"""
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_mode())
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def operation_list(self):
"""List of available operation modes."""
op_list = ['off']
modes = self.wink.modes()
for mode in modes:
if mode == 'aux':
continue
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invaid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
self.wink.set_temperature(target_temp)
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
self.wink.set_operation_mode(op_mode_to_set)
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.wink.current_set_point()
def turn_away_mode_on(self):
"""Turn away on."""
self.wink.set_vacation_mode(True)
def turn_away_mode_off(self):
"""Turn away off."""
self.wink.set_vacation_mode(False)
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.wink.min_set_point()
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.wink.max_set_point()
| 33.729323 | 77 | 0.615805 | import logging
import asyncio
from homeassistant.components.wink import WinkDevice, DOMAIN
from homeassistant.components.climate import (
STATE_AUTO, STATE_COOL, STATE_HEAT, ClimateDevice,
ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW,
ATTR_TEMPERATURE, STATE_FAN_ONLY,
ATTR_CURRENT_HUMIDITY, STATE_ECO, STATE_ELECTRIC,
STATE_PERFORMANCE, STATE_HIGH_DEMAND,
STATE_HEAT_PUMP, STATE_GAS)
from homeassistant.const import (
TEMP_CELSIUS, STATE_ON,
STATE_OFF, STATE_UNKNOWN)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['wink']
SPEED_LOW = 'low'
SPEED_MEDIUM = 'medium'
SPEED_HIGH = 'high'
HA_STATE_TO_WINK = {STATE_AUTO: 'auto',
STATE_ECO: 'eco',
STATE_FAN_ONLY: 'fan_only',
STATE_HEAT: 'heat_only',
STATE_COOL: 'cool_only',
STATE_PERFORMANCE: 'performance',
STATE_HIGH_DEMAND: 'high_demand',
STATE_HEAT_PUMP: 'heat_pump',
STATE_ELECTRIC: 'electric_only',
STATE_GAS: 'gas',
STATE_OFF: 'off'}
WINK_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_WINK.items()}
ATTR_EXTERNAL_TEMPERATURE = "external_temperature"
ATTR_SMART_TEMPERATURE = "smart_temperature"
ATTR_ECO_TARGET = "eco_target"
ATTR_OCCUPIED = "occupied"
def setup_platform(hass, config, add_devices, discovery_info=None):
import pywink
for climate in pywink.get_thermostats():
_id = climate.object_id() + climate.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkThermostat(climate, hass)])
for climate in pywink.get_air_conditioners():
_id = climate.object_id() + climate.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkAC(climate, hass)])
for water_heater in pywink.get_water_heaters():
_id = water_heater.object_id() + water_heater.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkWaterHeater(water_heater, hass)])
class WinkThermostat(WinkDevice, ClimateDevice):
@asyncio.coroutine
def async_added_to_hass(self):
self.hass.data[DOMAIN]['entities']['climate'].append(self)
@property
def temperature_unit(self):
return TEMP_CELSIUS
@property
def device_state_attributes(self):
data = {}
target_temp_high = self.target_temperature_high
target_temp_low = self.target_temperature_low
if target_temp_high is not None:
data[ATTR_TARGET_TEMP_HIGH] = self._convert_for_display(
self.target_temperature_high)
if target_temp_low is not None:
data[ATTR_TARGET_TEMP_LOW] = self._convert_for_display(
self.target_temperature_low)
if self.external_temperature:
data[ATTR_EXTERNAL_TEMPERATURE] = self._convert_for_display(
self.external_temperature)
if self.smart_temperature:
data[ATTR_SMART_TEMPERATURE] = self.smart_temperature
if self.occupied:
data[ATTR_OCCUPIED] = self.occupied
if self.eco_target:
data[ATTR_ECO_TARGET] = self.eco_target
current_humidity = self.current_humidity
if current_humidity is not None:
data[ATTR_CURRENT_HUMIDITY] = current_humidity
return data
@property
def current_temperature(self):
return self.wink.current_temperature()
@property
def current_humidity(self):
if self.wink.current_humidity() is not None:
if self.wink.current_humidity() < 1:
return self.wink.current_humidity() * 100
return self.wink.current_humidity()
return None
@property
def external_temperature(self):
return self.wink.current_external_temperature()
@property
def smart_temperature(self):
return self.wink.current_smart_temperature()
@property
def eco_target(self):
return self.wink.eco_target()
@property
def occupied(self):
return self.wink.occupied()
@property
def current_operation(self):
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_hvac_mode())
if current_op == 'aux':
return STATE_HEAT
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def target_humidity(self):
target_hum = None
if self.wink.current_humidifier_mode() == 'on':
if self.wink.current_humidifier_set_point() is not None:
target_hum = self.wink.current_humidifier_set_point() * 100
elif self.wink.current_dehumidifier_mode() == 'on':
if self.wink.current_dehumidifier_set_point() is not None:
target_hum = self.wink.current_dehumidifier_set_point() * 100
else:
target_hum = None
return target_hum
@property
def target_temperature(self):
if self.current_operation != STATE_AUTO and not self.is_away_mode_on:
if self.current_operation == STATE_COOL:
return self.wink.current_max_set_point()
elif self.current_operation == STATE_HEAT:
return self.wink.current_min_set_point()
return None
@property
def target_temperature_low(self):
if self.current_operation == STATE_AUTO:
return self.wink.current_min_set_point()
return None
@property
def target_temperature_high(self):
if self.current_operation == STATE_AUTO:
return self.wink.current_max_set_point()
return None
@property
def is_away_mode_on(self):
return self.wink.away()
@property
def is_aux_heat_on(self):
if 'aux' not in self.wink.hvac_modes():
return None
if self.wink.current_hvac_mode() == 'aux':
return True
return False
def set_temperature(self, **kwargs):
target_temp = kwargs.get(ATTR_TEMPERATURE)
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp is not None:
if self.current_operation == STATE_COOL:
target_temp_high = target_temp
if self.current_operation == STATE_HEAT:
target_temp_low = target_temp
if target_temp_low is not None:
target_temp_low = target_temp_low
if target_temp_high is not None:
target_temp_high = target_temp_high
self.wink.set_temperature(target_temp_low, target_temp_high)
def set_operation_mode(self, operation_mode):
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
if self.is_aux_heat_on and op_mode_to_set == STATE_HEAT:
return
self.wink.set_operation_mode(op_mode_to_set)
@property
def operation_list(self):
op_list = ['off']
modes = self.wink.hvac_modes()
for mode in modes:
if mode == 'aux':
continue
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invaid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
def turn_away_mode_on(self):
self.wink.set_away_mode()
def turn_away_mode_off(self):
self.wink.set_away_mode(False)
@property
def current_fan_mode(self):
if self.wink.current_fan_mode() == 'on':
return STATE_ON
elif self.wink.current_fan_mode() == 'auto':
return STATE_AUTO
# No Fan available so disable slider
return None
@property
def fan_list(self):
if self.wink.has_fan():
return self.wink.fan_modes()
return None
def set_fan_mode(self, fan):
self.wink.set_fan_mode(fan.lower())
def turn_aux_heat_on(self):
self.wink.set_operation_mode('aux')
def turn_aux_heat_off(self):
self.set_operation_mode(STATE_HEAT)
@property
def min_temp(self):
minimum = 7 # Default minimum
min_min = self.wink.min_min_set_point()
min_max = self.wink.min_max_set_point()
return_value = minimum
if self.current_operation == STATE_HEAT:
if min_min:
return_value = min_min
else:
return_value = minimum
elif self.current_operation == STATE_COOL:
if min_max:
return_value = min_max
else:
return_value = minimum
elif self.current_operation == STATE_AUTO:
if min_min and min_max:
return_value = min(min_min, min_max)
else:
return_value = minimum
else:
return_value = minimum
return return_value
@property
def max_temp(self):
maximum = 35 # Default maximum
max_min = self.wink.max_min_set_point()
max_max = self.wink.max_max_set_point()
return_value = maximum
if self.current_operation == STATE_HEAT:
if max_min:
return_value = max_min
else:
return_value = maximum
elif self.current_operation == STATE_COOL:
if max_max:
return_value = max_max
else:
return_value = maximum
elif self.current_operation == STATE_AUTO:
if max_min and max_max:
return_value = min(max_min, max_max)
else:
return_value = maximum
else:
return_value = maximum
return return_value
class WinkAC(WinkDevice, ClimateDevice):
@property
def temperature_unit(self):
# The Wink API always returns temp in Celsius
return TEMP_CELSIUS
@property
def device_state_attributes(self):
data = {}
target_temp_high = self.target_temperature_high
target_temp_low = self.target_temperature_low
if target_temp_high is not None:
data[ATTR_TARGET_TEMP_HIGH] = self._convert_for_display(
self.target_temperature_high)
if target_temp_low is not None:
data[ATTR_TARGET_TEMP_LOW] = self._convert_for_display(
self.target_temperature_low)
data["total_consumption"] = self.wink.total_consumption()
data["schedule_enabled"] = self.wink.schedule_enabled()
return data
@property
def current_temperature(self):
return self.wink.current_temperature()
@property
def current_operation(self):
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_hvac_mode())
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def operation_list(self):
op_list = ['off']
modes = self.wink.modes()
for mode in modes:
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invaid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
def set_temperature(self, **kwargs):
target_temp = kwargs.get(ATTR_TEMPERATURE)
self.wink.set_temperature(target_temp)
def set_operation_mode(self, operation_mode):
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
if op_mode_to_set == 'eco':
op_mode_to_set = 'auto_eco'
self.wink.set_operation_mode(op_mode_to_set)
@property
def target_temperature(self):
return self.wink.current_max_set_point()
@property
def current_fan_mode(self):
speed = self.wink.current_fan_speed()
if speed <= 0.4 and speed > 0.3:
return SPEED_LOW
elif speed <= 0.8 and speed > 0.5:
return SPEED_MEDIUM
elif speed <= 1.0 and speed > 0.8:
return SPEED_HIGH
return STATE_UNKNOWN
@property
def fan_list(self):
return [SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
def set_fan_mode(self, fan):
if fan == SPEED_LOW:
speed = 0.4
elif fan == SPEED_MEDIUM:
speed = 0.8
elif fan == SPEED_HIGH:
speed = 1.0
self.wink.set_ac_fan_speed(speed)
class WinkWaterHeater(WinkDevice, ClimateDevice):
@property
def temperature_unit(self):
return TEMP_CELSIUS
@property
def device_state_attributes(self):
data = {}
data["vacation_mode"] = self.wink.vacation_mode_enabled()
data["rheem_type"] = self.wink.rheem_type()
return data
@property
def current_operation(self):
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_mode())
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def operation_list(self):
op_list = ['off']
modes = self.wink.modes()
for mode in modes:
if mode == 'aux':
continue
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invaid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
def set_temperature(self, **kwargs):
target_temp = kwargs.get(ATTR_TEMPERATURE)
self.wink.set_temperature(target_temp)
def set_operation_mode(self, operation_mode):
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
self.wink.set_operation_mode(op_mode_to_set)
@property
def target_temperature(self):
return self.wink.current_set_point()
def turn_away_mode_on(self):
self.wink.set_vacation_mode(True)
def turn_away_mode_off(self):
self.wink.set_vacation_mode(False)
@property
def min_temp(self):
return self.wink.min_set_point()
@property
def max_temp(self):
return self.wink.max_set_point()
| true | true |
f72ceff1b9b1d547913aa773c1b821be3ae401f9 | 55 | py | Python | contest/abc120/A.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | contest/abc120/A.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | contest/abc120/A.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | A,B,C = map(int, input().split())
print(min(B//A,C))
| 18.333333 | 34 | 0.545455 | A,B,C = map(int, input().split())
print(min(B//A,C))
| true | true |
f72cf037943e51b4783520ebbf6f67e18bf38ba4 | 3,648 | py | Python | src/Calibration.py | Mohamed-Abdulaty/UDACITY-CarND-P2-Advanced-Lane-Lines | e5d5fdff45c523a4f17635897b9de4b2e50d273d | [
"MIT"
] | null | null | null | src/Calibration.py | Mohamed-Abdulaty/UDACITY-CarND-P2-Advanced-Lane-Lines | e5d5fdff45c523a4f17635897b9de4b2e50d273d | [
"MIT"
] | null | null | null | src/Calibration.py | Mohamed-Abdulaty/UDACITY-CarND-P2-Advanced-Lane-Lines | e5d5fdff45c523a4f17635897b9de4b2e50d273d | [
"MIT"
] | null | null | null | import os
import cv2
import numpy as np
class Calibration:
def __init__(
self,
source_images_directory,
destination_image_sub_directory,
chessboard_shape,
logger
):
self.source_images_directory = source_images_directory
self.destination_image_sub_directory= destination_image_sub_directory
self.cornered_output_images = str(self.destination_image_sub_directory+'/Cornered')
self.undistorted_output_images = str(self.destination_image_sub_directory+'/Undistorted')
self.chessboard_x, self.chessboard_y= chessboard_shape
self.logger = logger
self.name_list_of_boards = os.listdir(self.source_images_directory)
self.number_of_boards = len(self.name_list_of_boards)
self.image_size = None
self.object_points = []
self.image_points = []
self.camera_matrix, self.distortion_coefficient = \
self.__calculate_calibration_parameters()
def get_calibration_parameters(self):
return self.camera_matrix, self.distortion_coefficient
def __calculate_calibration_parameters(self):
object_points = np.zeros((self.chessboard_x*self.chessboard_y, 3), np.float32)
object_points[:, :2] = np.mgrid[0:self.chessboard_x, 0:self.chessboard_y].T.reshape(-1, 2)
for img_name in self.name_list_of_boards:
# Read the image
image_path = '{}/{}'.format(str(self.source_images_directory), str(img_name))
image_obj = cv2.imread(image_path)
# Gray it
gray_image = cv2.cvtColor(image_obj, cv2.COLOR_BGR2GRAY)
self.image_size = gray_image.shape[::-1]
# Find its corners
ret, corners = cv2.findChessboardCorners(gray_image, (self.chessboard_x, self.chessboard_y), None)
if ret:
self.object_points.append(object_points)
self.image_points.append(corners)
# save image with corners
image = cv2.drawChessboardCorners(\
image_obj, \
(self.chessboard_y, self.chessboard_x), \
corners, \
ret)
# Saved image with corners
self.logger.save_image(str(self.cornered_output_images), img_name, image)
else:
self.logger.log_error('Can not find all needed corners in {}'.format(str(img_name)))
# Calibrate the camera
calibration_parameters = \
cv2.calibrateCamera(self.object_points, \
self.image_points, \
self.image_size, \
None, None)
# save corrected images
self.__save_undistorted_images(calibration_parameters[1], calibration_parameters[2])
# return onlt camera_matrix, and dis_coef
return calibration_parameters[1], calibration_parameters[2]
def __save_undistorted_images(self, camera_matrix, distortion_coef):
cornered_images_list = os.listdir(str('./results/'+self.cornered_output_images))
for cornered_img in cornered_images_list:
image_path = '{}/{}'.format(str('./results/'+self.cornered_output_images), str(cornered_img))
image_obj = cv2.imread(image_path)
self.logger.save_image( \
str(self.undistorted_output_images), \
cornered_img,
cv2.undistort(image_obj, camera_matrix, distortion_coef, None, camera_matrix)) | 41.931034 | 110 | 0.621436 | import os
import cv2
import numpy as np
class Calibration:
def __init__(
self,
source_images_directory,
destination_image_sub_directory,
chessboard_shape,
logger
):
self.source_images_directory = source_images_directory
self.destination_image_sub_directory= destination_image_sub_directory
self.cornered_output_images = str(self.destination_image_sub_directory+'/Cornered')
self.undistorted_output_images = str(self.destination_image_sub_directory+'/Undistorted')
self.chessboard_x, self.chessboard_y= chessboard_shape
self.logger = logger
self.name_list_of_boards = os.listdir(self.source_images_directory)
self.number_of_boards = len(self.name_list_of_boards)
self.image_size = None
self.object_points = []
self.image_points = []
self.camera_matrix, self.distortion_coefficient = \
self.__calculate_calibration_parameters()
def get_calibration_parameters(self):
return self.camera_matrix, self.distortion_coefficient
def __calculate_calibration_parameters(self):
object_points = np.zeros((self.chessboard_x*self.chessboard_y, 3), np.float32)
object_points[:, :2] = np.mgrid[0:self.chessboard_x, 0:self.chessboard_y].T.reshape(-1, 2)
for img_name in self.name_list_of_boards:
image_path = '{}/{}'.format(str(self.source_images_directory), str(img_name))
image_obj = cv2.imread(image_path)
gray_image = cv2.cvtColor(image_obj, cv2.COLOR_BGR2GRAY)
self.image_size = gray_image.shape[::-1]
ret, corners = cv2.findChessboardCorners(gray_image, (self.chessboard_x, self.chessboard_y), None)
if ret:
self.object_points.append(object_points)
self.image_points.append(corners)
image = cv2.drawChessboardCorners(\
image_obj, \
(self.chessboard_y, self.chessboard_x), \
corners, \
ret)
self.logger.save_image(str(self.cornered_output_images), img_name, image)
else:
self.logger.log_error('Can not find all needed corners in {}'.format(str(img_name)))
calibration_parameters = \
cv2.calibrateCamera(self.object_points, \
self.image_points, \
self.image_size, \
None, None)
self.__save_undistorted_images(calibration_parameters[1], calibration_parameters[2])
return calibration_parameters[1], calibration_parameters[2]
def __save_undistorted_images(self, camera_matrix, distortion_coef):
cornered_images_list = os.listdir(str('./results/'+self.cornered_output_images))
for cornered_img in cornered_images_list:
image_path = '{}/{}'.format(str('./results/'+self.cornered_output_images), str(cornered_img))
image_obj = cv2.imread(image_path)
self.logger.save_image( \
str(self.undistorted_output_images), \
cornered_img,
cv2.undistort(image_obj, camera_matrix, distortion_coef, None, camera_matrix)) | true | true |
f72cf0613aadf94eb6e2c98f6e1c046325378d82 | 1,979 | py | Python | dove/utils/bed.py | barslmn/dove | df6344286633422219c0e93e15d4327f9d082041 | [
"MIT"
] | null | null | null | dove/utils/bed.py | barslmn/dove | df6344286633422219c0e93e15d4327f9d082041 | [
"MIT"
] | null | null | null | dove/utils/bed.py | barslmn/dove | df6344286633422219c0e93e15d4327f9d082041 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'bars'
from io import StringIO
import pandas as pd
from collections import defaultdict
class Bed:
"""description"""
def __init__(self, bed_file, mode='file'):
self.bed_file = bed_file
self.mode = mode
def get_header(self):
lines_to_skip = 0
header = defaultdict(list)
if self.mode == 'str':
for line in self.bed_file.split('\n'):
if line.startswith('track'):
header['track'].append(line)
lines_to_skip += 1
elif line.startswith('browser'):
header['browser'].append(line)
lines_to_skip += 1
else:
break
else:
with open(self.bed_file) as f:
lines = f.read().splitlines()
for line in lines:
if line.startswith('track'):
header['track'].append(line)
lines_to_skip += 1
elif line.startswith('browser'):
header['browser'].append(line)
lines_to_skip += 1
else:
break
return lines_to_skip, header
def from_file(self):
lines_to_skip, header = self.get_header()
df_bed = pd.read_csv(
self.bed_file,
sep='\t',
usecols=[0, 1, 2],
names=['CHR', 'START', 'END'],
dtype={'START': int, 'END': int},
skiprows=lines_to_skip
)
return df_bed
def from_string(self):
lines_to_skip, header = self.get_header()
df_bed = pd.read_csv(
StringIO(self.bed_file),
sep='\t',
usecols=[0, 1, 2],
names=['CHR', 'START', 'END'],
dtype={'START': int, 'END': int},
skiprows=lines_to_skip
)
return df_bed
| 29.984848 | 54 | 0.471956 |
__author__ = 'bars'
from io import StringIO
import pandas as pd
from collections import defaultdict
class Bed:
def __init__(self, bed_file, mode='file'):
self.bed_file = bed_file
self.mode = mode
def get_header(self):
lines_to_skip = 0
header = defaultdict(list)
if self.mode == 'str':
for line in self.bed_file.split('\n'):
if line.startswith('track'):
header['track'].append(line)
lines_to_skip += 1
elif line.startswith('browser'):
header['browser'].append(line)
lines_to_skip += 1
else:
break
else:
with open(self.bed_file) as f:
lines = f.read().splitlines()
for line in lines:
if line.startswith('track'):
header['track'].append(line)
lines_to_skip += 1
elif line.startswith('browser'):
header['browser'].append(line)
lines_to_skip += 1
else:
break
return lines_to_skip, header
def from_file(self):
lines_to_skip, header = self.get_header()
df_bed = pd.read_csv(
self.bed_file,
sep='\t',
usecols=[0, 1, 2],
names=['CHR', 'START', 'END'],
dtype={'START': int, 'END': int},
skiprows=lines_to_skip
)
return df_bed
def from_string(self):
lines_to_skip, header = self.get_header()
df_bed = pd.read_csv(
StringIO(self.bed_file),
sep='\t',
usecols=[0, 1, 2],
names=['CHR', 'START', 'END'],
dtype={'START': int, 'END': int},
skiprows=lines_to_skip
)
return df_bed
| true | true |
f72cf0c8eddcbcd99e6cd753b8ca73ce4cc13dcd | 10,400 | py | Python | nnutils/dibr_kaolin.py | junzhezhang/cmr | f0b2ded813535493f124852ce64b26efa761a35c | [
"MIT"
] | null | null | null | nnutils/dibr_kaolin.py | junzhezhang/cmr | f0b2ded813535493f124852ce64b26efa761a35c | [
"MIT"
] | null | null | null | nnutils/dibr_kaolin.py | junzhezhang/cmr | f0b2ded813535493f124852ce64b26efa761a35c | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.misc
import tqdm
import cv2
import torch
from nnutils import geom_utils
# from kaolin.graphics.dib_renderer.rasterizer import linear_rasterizer
# from kaolin.graphics.dib_renderer.utils import datanormalize
# from kaolin.graphics.dib_renderer.renderer.phongrender import PhongRender
from kaolin.graphics.dib_renderer.renderer.texrender import TexRender
from kaolin.graphics.dib_renderer.utils.perspective import lookatnp, perspectiveprojectionnp
from kaolin.graphics.dib_renderer.utils.mesh import loadobj, face2pfmtx, loadobjtex, savemesh
def quaternion_to_matrix(quaternions):
"""
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
class NeuralRenderer(torch.nn.Module):
"""
replace NeuralRenderer from nmr.py with the kaolin's
"""
# 512 --> 256 TODO
def __init__(self, img_size=256,uv_sampler=None):
self.img_size = img_size
super(NeuralRenderer, self).__init__()
self.renderer = TexRender(height=img_size,width=img_size)
# self.renderer = NeuralMeshRenderer(image_size=img_size, camera_mode='look_at',perspective=False,viewing_angle=30,light_intensity_ambient=0.8)
self.offset_z = 5.
self.proj_fn = geom_utils.orthographic_proj_withz
if uv_sampler is not None:
self.uv_sampler = uv_sampler.clone()
else:
print('no uv sampler')
print('DIB-R...')
def ambient_light_only(self):
# Make light only ambient.
# self.renderer.light_intensity_ambient = 1
# self.renderer.light_intensity_directional = 0
print("TODO: ambient_light_only")
pass
def set_bgcolor(self, color):
# self.renderer.background_color = color
print("TODO: set_bgcolor")
pass
def project_points(self, verts, cams):
proj = self.proj_fn(verts, cams)
return proj[:, :, :2]
def forward(self, vertices, faces, cams, textures=None):
### TODO save mesh
if textures is not None:
v_np = vertices[0].detach().cpu().numpy()
f_np = faces[0].detach().cpu().numpy()
file_name = 'vis/bird.obj'
try:
savemesh(v_np, f_np, file_name)
except:
import pdb; pdb.set_trace()
# ours = False
ours = True
if ours:
translation = cams[:,:3]
quant = cams[:,-4:]
tfcamviewmtx_bx3x3 = quaternion_to_matrix(quant)
tfcamshift_bx3 = - translation
# camfovy = 45 / 180.0 * np.pi
camfovy = 90 / 180.0 * np.pi
camprojmtx = perspectiveprojectionnp(camfovy, 1.0 * 1.0 / 1.0)
tfcamproj_3x1 = torch.from_numpy(camprojmtx).cuda()
tfcameras = [tfcamviewmtx_bx3x3,
tfcamshift_bx3,
tfcamproj_3x1]
else:
tfcameras = self.get_sample_cams(bs=vertices.shape[0])
# import pdb; pdb.set_trace()
print('1:',tfcameras[0].shape)
print('2:',tfcameras[1].shape)
print('3:',tfcameras[2].shape)
if textures is None:
tex_flag = False
# shape = [vertices.shape[0], 1280, 6,6,6,3]
# textures = torch.ones(vertices.shape[0], 1280, 6,6,6,3).cuda()*256
textures = torch.ones(vertices.shape[0],3,self.img_size,self.img_size).cuda()
else:
tex_flag = True
# # TODO try with convmesh output
imfile = '/mnt/lustre/zhangjunzhe/tm/convmesh/output/pretrained_cub_512x512_class/mesh_0.png'
# textures_np = cv2.imread(imfile)[:, :, ::-1].astype(np.float32) / 255.0
textures_np = cv2.imread(imfile)[:, :, ::-1].astype(np.float32)
dim = (self.img_size, self.img_size)
resized = cv2.resize(textures_np, dim, interpolation = cv2.INTER_AREA)
textures = torch.from_numpy(resized).cuda().unsqueeze(0)
textures = textures.permute([0, 3, 1, 2])
# print('tex shape:', textures.shape)
# # import pdb; pdb.set_trace()
# textures = torch.ones(vertices.shape[0],3,self.img_size,self.img_size).cuda()
# print(texture)
# renderer.set_smooth(pfmtx) # TODO for phong renderer
tfp_bxpx3 = vertices
tff_fx3 = faces[0] # TODO to verify if fixed topology within a batch
# tff_fx3 = tff_fx3.type(int64)
tff_fx3 = tff_fx3.type(torch.long)
points = [tfp_bxpx3, tff_fx3]
uvs = self.uv_sampler
# TODO texture to clone?
# TODOL ft_fx3
# ft_fx3??? TODO
#only keep rgb, no alpha and depth
print('uv shape:',uvs.shape)
imgs = self.renderer(points=points,
cameras=tfcameras,
uv_bxpx2 = uvs,
texture_bx3xthxtw=textures,
ft_fx3=None)[0]
if tex_flag:
for i, img in enumerate(imgs):
img = img.detach().cpu().numpy()
cv2.imwrite('./vis/lam'+str(i)+'.jpg',img*255)
print('saved img')
print('!!!imgs:',imgs.shape)
imgs = imgs.permute([0,3,1,2])
print('new shape:',imgs.shape)
# print(' cam:',cams)
return imgs
def get_sample_cams(self,bs):
##########################################################
# campos = np.array([0, 0, 1.5], dtype=np.float32) # where camera it is
# campos = np.array([0, 0, 4], dtype=np.float32)
# campos = np.array([0, 4, 0], dtype=np.float32)
campos = np.array([4, 0, 0], dtype=np.float32)
camcenter = np.array([0, 0, 0], dtype=np.float32) # where camra is looking at
# camup = np.array([-1, 1, 0], dtype=np.float32) # y axis of camera view
# camup = np.array([-1, 0, 1], dtype=np.float32)
# camup = np.array([0, -1, 1], dtype=np.float32)
# camup = np.array([0, 1, -1], dtype=np.float32)
# camup = np.array([1, -1, 0], dtype=np.float32)
# camup = np.array([1, 0, -1], dtype=np.float32)
# camup = np.array([1, 1, 0], dtype=np.float32)
# camup = np.array([-1, 0, -1], dtype=np.float32)
camup = np.array([1, 0, 1], dtype=np.float32)
camviewmtx, camviewshift = lookatnp(campos.reshape(3, 1), camcenter.reshape(3, 1), camup.reshape(3, 1))
camviewshift = -np.dot(camviewmtx.transpose(), camviewshift)
camfovy = 45 / 180.0 * np.pi
camprojmtx = perspectiveprojectionnp(camfovy, 1.0 * 1.0 / 1.0)
#####################################################
# tfp_px3 = torch.from_numpy(p)
# tfp_px3.requires_grad = True
# tff_fx3 = torch.from_numpy(f)
# tfuv_tx2 = torch.from_numpy(uv)
# tfuv_tx2.requires_grad = True
# tfft_fx3 = torch.from_numpy(ft)
# tftex_thxtwx3 = torch.from_numpy(np.ascontiguousarray(texturenp))
# tftex_thxtwx3.requires_grad = True
tfcamviewmtx = torch.from_numpy(camviewmtx)
tfcamshift = torch.from_numpy(camviewshift)
tfcamproj = torch.from_numpy(camprojmtx)
##########################################################
# tfp_1xpx3 = torch.unsqueeze(tfp_px3, dim=0)
# tfuv_1xtx2 = torch.unsqueeze(tfuv_tx2, dim=0)
# tftex_1xthxtwx3 = torch.unsqueeze(tftex_thxtwx3, dim=0)
tfcamviewmtx_1x3x3 = torch.unsqueeze(tfcamviewmtx, dim=0)
tfcamshift_1x3 = tfcamshift.view(-1, 3)
tfcamproj_3x1 = tfcamproj
# bs = 4
# tfp_bxpx3 = tfp_1xpx3.repeat([bs, 1, 1])
# tfuv_bxtx2 = tfuv_1xtx2.repeat([bs, 1, 1])
# tftex_bxthxtwx3 = tftex_1xthxtwx3.repeat([bs, 1, 1, 1])
tfcamviewmtx_bx3x3 = tfcamviewmtx_1x3x3.repeat([bs, 1, 1])
tfcamshift_bx3 = tfcamshift_1x3.repeat([bs, 1])
tfcameras = [tfcamviewmtx_bx3x3.cuda(),
tfcamshift_bx3.cuda(),
tfcamproj_3x1.cuda()]
return tfcameras
# def compute_uvsampler(self,verts_t, faces_t, tex_size=2):
# """
# NOTE: copied from utils/mesh.py
# tex_size texture resolution per face default = 6
# TODO : merge with backbone
# For this mesh, pre-computes the UV coordinates for
# F x T x T points.
# Returns F x T x T x 2
# """
# verts = verts_t[0].clone().detach().cpu().numpy()
# faces = faces_t[0].clone().detach().cpu().numpy()
# # import pdb; pdb.set_trace()
# alpha = np.arange(tex_size, dtype=np.float) / (tex_size-1)
# beta = np.arange(tex_size, dtype=np.float) / (tex_size-1)
# import itertools
# # Barycentric coordinate values
# coords = np.stack([p for p in itertools.product(*[alpha, beta])])
# vs = verts[faces]
# # Compute alpha, beta (this is the same order as NMR)
# v2 = vs[:, 2]
# v0v2 = vs[:, 0] - vs[:, 2]
# v1v2 = vs[:, 1] - vs[:, 2]
# # F x 3 x T*2
# samples = np.dstack([v0v2, v1v2]).dot(coords.T) + v2.reshape(-1, 3, 1)
# # F x T*2 x 3 points on the sphere
# samples = np.transpose(samples, (0, 2, 1))
# # Now convert these to uv.
# uv = get_spherical_coords(samples.reshape(-1, 3))
# # uv = uv.reshape(-1, len(coords), 2)
# uv = uv.reshape(-1, tex_size, tex_size, 2)
# return uv | 38.80597 | 151 | 0.559038 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.misc
import tqdm
import cv2
import torch
from nnutils import geom_utils
from kaolin.graphics.dib_renderer.renderer.texrender import TexRender
from kaolin.graphics.dib_renderer.utils.perspective import lookatnp, perspectiveprojectionnp
from kaolin.graphics.dib_renderer.utils.mesh import loadobj, face2pfmtx, loadobjtex, savemesh
def quaternion_to_matrix(quaternions):
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
class NeuralRenderer(torch.nn.Module):
def __init__(self, img_size=256,uv_sampler=None):
self.img_size = img_size
super(NeuralRenderer, self).__init__()
self.renderer = TexRender(height=img_size,width=img_size)
self.offset_z = 5.
self.proj_fn = geom_utils.orthographic_proj_withz
if uv_sampler is not None:
self.uv_sampler = uv_sampler.clone()
else:
print('no uv sampler')
print('DIB-R...')
def ambient_light_only(self):
print("TODO: ambient_light_only")
pass
def set_bgcolor(self, color):
print("TODO: set_bgcolor")
pass
def project_points(self, verts, cams):
proj = self.proj_fn(verts, cams)
return proj[:, :, :2]
def forward(self, vertices, faces, cams, textures=None):
v_np = vertices[0].detach().cpu().numpy()
f_np = faces[0].detach().cpu().numpy()
file_name = 'vis/bird.obj'
try:
savemesh(v_np, f_np, file_name)
except:
import pdb; pdb.set_trace()
ours = True
if ours:
translation = cams[:,:3]
quant = cams[:,-4:]
tfcamviewmtx_bx3x3 = quaternion_to_matrix(quant)
tfcamshift_bx3 = - translation
camfovy = 90 / 180.0 * np.pi
camprojmtx = perspectiveprojectionnp(camfovy, 1.0 * 1.0 / 1.0)
tfcamproj_3x1 = torch.from_numpy(camprojmtx).cuda()
tfcameras = [tfcamviewmtx_bx3x3,
tfcamshift_bx3,
tfcamproj_3x1]
else:
tfcameras = self.get_sample_cams(bs=vertices.shape[0])
print('1:',tfcameras[0].shape)
print('2:',tfcameras[1].shape)
print('3:',tfcameras[2].shape)
if textures is None:
tex_flag = False
textures = torch.ones(vertices.shape[0],3,self.img_size,self.img_size).cuda()
else:
tex_flag = True
tre/zhangjunzhe/tm/convmesh/output/pretrained_cub_512x512_class/mesh_0.png'
textures_np = cv2.imread(imfile)[:, :, ::-1].astype(np.float32)
dim = (self.img_size, self.img_size)
resized = cv2.resize(textures_np, dim, interpolation = cv2.INTER_AREA)
textures = torch.from_numpy(resized).cuda().unsqueeze(0)
textures = textures.permute([0, 3, 1, 2])
ices
tff_fx3 = faces[0]
tff_fx3 = tff_fx3.type(torch.long)
points = [tfp_bxpx3, tff_fx3]
uvs = self.uv_sampler
print('uv shape:',uvs.shape)
imgs = self.renderer(points=points,
cameras=tfcameras,
uv_bxpx2 = uvs,
texture_bx3xthxtw=textures,
ft_fx3=None)[0]
if tex_flag:
for i, img in enumerate(imgs):
img = img.detach().cpu().numpy()
cv2.imwrite('./vis/lam'+str(i)+'.jpg',img*255)
print('saved img')
print('!!!imgs:',imgs.shape)
imgs = imgs.permute([0,3,1,2])
print('new shape:',imgs.shape)
return imgs
def get_sample_cams(self,bs):
| true | true |
f72cf0ce56facdb68d763f793fbd3901d57d4555 | 4,872 | py | Python | sensors/jira_sensor.py | viveksyngh/stackstorm-jira | d08bc9b78bb5a5cce1c6e84c1f947f1ba3088d26 | [
"Apache-2.0"
] | null | null | null | sensors/jira_sensor.py | viveksyngh/stackstorm-jira | d08bc9b78bb5a5cce1c6e84c1f947f1ba3088d26 | [
"Apache-2.0"
] | null | null | null | sensors/jira_sensor.py | viveksyngh/stackstorm-jira | d08bc9b78bb5a5cce1c6e84c1f947f1ba3088d26 | [
"Apache-2.0"
] | 1 | 2020-01-22T16:35:49.000Z | 2020-01-22T16:35:49.000Z | # See ./requirements.txt for requirements.
import os
from jira.client import JIRA
from st2reactor.sensor.base import PollingSensor
class JIRASensor(PollingSensor):
'''
Sensor will monitor for any new projects created in JIRA and
emit trigger instance when one is created.
'''
def __init__(self, sensor_service, config=None, poll_interval=5):
super(JIRASensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._jira_url = None
# The Consumer Key created while setting up the 'Incoming Authentication' in
# JIRA for the Application Link.
self._consumer_key = u''
self._rsa_key = None
self._jira_client = None
self._access_token = u''
self._access_secret = u''
self._projects_available = None
self._poll_interval = 30
self._project = None
self._issues_in_project = None
self._jql_query = None
self._trigger_name = 'issues_tracker'
self._trigger_pack = 'jira'
self._trigger_ref = '.'.join([self._trigger_pack, self._trigger_name])
def _read_cert(self, file_path):
with open(file_path) as f:
return f.read()
def setup(self):
self._jira_url = self._config['url']
auth_method = self._config['auth_method']
options = {'server': self._config['url'],
'verify': self._config['verify']}
# Getting client cert configuration
cert_file_path = self._config['client_cert_file']
key_file_path = self._config['client_key_file']
if cert_file_path and key_file_path:
options['client_cert'] = (cert_file_path, key_file_path)
if auth_method == 'oauth':
rsa_cert_file = self._config['rsa_cert_file']
if not os.path.exists(rsa_cert_file):
raise Exception(
'Cert file for JIRA OAuth not found at %s.' % rsa_cert_file
)
self._rsa_key = self._read_cert(rsa_cert_file)
self._poll_interval = self._config.get(
'poll_interval', self._poll_interval)
oauth_creds = {
'access_token': self._config['oauth_token'],
'access_token_secret': self._config['oauth_secret'],
'consumer_key': self._config['consumer_key'],
'key_cert': self._rsa_key,
}
self._jira_client = JIRA(options=options, oauth=oauth_creds)
elif auth_method == 'basic':
basic_creds = (self._config['username'], self._config['password'])
self._jira_client = JIRA(options=options, basic_auth=basic_creds)
else:
msg = ('You must set auth_method to either "oauth"',
'or "basic" your jira.yaml config file.',
)
raise Exception(msg)
if self._projects_available is None:
self._projects_available = set()
for proj in self._jira_client.projects():
self._projects_available.add(proj.key)
self._project = self._config.get('project', None)
if not self._project or self._project not in self._projects_available:
raise Exception('Invalid project (%s) to track.' % self._project)
self._jql_query = 'project=%s' % self._project
all_issues = self._jira_client.search_issues(
self._jql_query, maxResults=None)
self._issues_in_project = {issue.key: issue for issue in all_issues}
def poll(self):
self._detect_new_issues()
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _detect_new_issues(self):
new_issues = self._jira_client.search_issues(
self._jql_query, maxResults=50, startAt=0
)
for issue in new_issues:
if issue.key not in self._issues_in_project:
self._dispatch_issues_trigger(issue)
self._issues_in_project[issue.key] = issue
def _dispatch_issues_trigger(self, issue):
trigger = self._trigger_ref
payload = {}
payload['issue_name'] = issue.key
payload['issue_url'] = issue.self
payload['issue_browse_url'] = self._jira_url + '/browse/' + issue.key
payload['project'] = self._project
payload['created'] = issue.raw['fields']['created']
payload['assignee'] = issue.raw['fields']['assignee']
payload['fix_versions'] = issue.raw['fields']['fixVersions']
payload['issue_type'] = issue.raw['fields']['issuetype']['name']
self._sensor_service.dispatch(trigger, payload)
| 37.767442 | 84 | 0.610016 |
import os
from jira.client import JIRA
from st2reactor.sensor.base import PollingSensor
class JIRASensor(PollingSensor):
def __init__(self, sensor_service, config=None, poll_interval=5):
super(JIRASensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._jira_url = None
self._consumer_key = u''
self._rsa_key = None
self._jira_client = None
self._access_token = u''
self._access_secret = u''
self._projects_available = None
self._poll_interval = 30
self._project = None
self._issues_in_project = None
self._jql_query = None
self._trigger_name = 'issues_tracker'
self._trigger_pack = 'jira'
self._trigger_ref = '.'.join([self._trigger_pack, self._trigger_name])
def _read_cert(self, file_path):
with open(file_path) as f:
return f.read()
def setup(self):
self._jira_url = self._config['url']
auth_method = self._config['auth_method']
options = {'server': self._config['url'],
'verify': self._config['verify']}
cert_file_path = self._config['client_cert_file']
key_file_path = self._config['client_key_file']
if cert_file_path and key_file_path:
options['client_cert'] = (cert_file_path, key_file_path)
if auth_method == 'oauth':
rsa_cert_file = self._config['rsa_cert_file']
if not os.path.exists(rsa_cert_file):
raise Exception(
'Cert file for JIRA OAuth not found at %s.' % rsa_cert_file
)
self._rsa_key = self._read_cert(rsa_cert_file)
self._poll_interval = self._config.get(
'poll_interval', self._poll_interval)
oauth_creds = {
'access_token': self._config['oauth_token'],
'access_token_secret': self._config['oauth_secret'],
'consumer_key': self._config['consumer_key'],
'key_cert': self._rsa_key,
}
self._jira_client = JIRA(options=options, oauth=oauth_creds)
elif auth_method == 'basic':
basic_creds = (self._config['username'], self._config['password'])
self._jira_client = JIRA(options=options, basic_auth=basic_creds)
else:
msg = ('You must set auth_method to either "oauth"',
'or "basic" your jira.yaml config file.',
)
raise Exception(msg)
if self._projects_available is None:
self._projects_available = set()
for proj in self._jira_client.projects():
self._projects_available.add(proj.key)
self._project = self._config.get('project', None)
if not self._project or self._project not in self._projects_available:
raise Exception('Invalid project (%s) to track.' % self._project)
self._jql_query = 'project=%s' % self._project
all_issues = self._jira_client.search_issues(
self._jql_query, maxResults=None)
self._issues_in_project = {issue.key: issue for issue in all_issues}
def poll(self):
self._detect_new_issues()
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _detect_new_issues(self):
new_issues = self._jira_client.search_issues(
self._jql_query, maxResults=50, startAt=0
)
for issue in new_issues:
if issue.key not in self._issues_in_project:
self._dispatch_issues_trigger(issue)
self._issues_in_project[issue.key] = issue
def _dispatch_issues_trigger(self, issue):
trigger = self._trigger_ref
payload = {}
payload['issue_name'] = issue.key
payload['issue_url'] = issue.self
payload['issue_browse_url'] = self._jira_url + '/browse/' + issue.key
payload['project'] = self._project
payload['created'] = issue.raw['fields']['created']
payload['assignee'] = issue.raw['fields']['assignee']
payload['fix_versions'] = issue.raw['fields']['fixVersions']
payload['issue_type'] = issue.raw['fields']['issuetype']['name']
self._sensor_service.dispatch(trigger, payload)
| true | true |
f72cf166961a193b694eac3676ad404f15972d73 | 6,202 | py | Python | __init__.py | kriswans/logBlizzard | 56ac597b4a499fa331742d441cb42c8c99360e0e | [
"MIT"
] | null | null | null | __init__.py | kriswans/logBlizzard | 56ac597b4a499fa331742d441cb42c8c99360e0e | [
"MIT"
] | null | null | null | __init__.py | kriswans/logBlizzard | 56ac597b4a499fa331742d441cb42c8c99360e0e | [
"MIT"
] | null | null | null | """
Author: Kris Swanson, kriswans@cisco.com
Tested with Python 3.6.1 on WIN10
"""
import socket
import struct
import time
import sys
import multiprocessing
import datetime
import glob
import json
from crypto import Crypto as cryp
from syslog import syslog
from nodemanager import NodeManager as nm
from localsearch import SearchLocal as sl
def logMonitor_Rx(password,params):
"""
fn listens for messages and updates message log.
"""
print("Starting Rx Process...\n")
with open('network_cfg.json','r') as nwc:
nw=json.load(nwc)
LOGMSG_GRP = nw['LOGMSG_GRP']
LOGMSG_PORT = nw['LOGMSG_PORT']
SCH_GRP = nw['SCH_GRP']
SCH_PORT = nw['SCH_PORT']
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', LOGMSG_PORT)) # use LOGMSG_GRP instead of '' to listen only
# to LOGMSG_GRP, not all groups on LOGMSG_PORT
mreq = struct.pack("4sl", socket.inet_aton(LOGMSG_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
filter_tag='%(node_num)s:%(role)s:%(cluster_id)s:%(localnode)s' % params
print(filter_tag)
ts = 0
i=0
dcdmsg=''
search_list=[]
quick_search_tag='LogQSearch:::'
write_mem_tag='!WRITECACHE!'
zero_disk_tag='!DELETEDISKCACHE!'
zero_mem_tag='!DELETEMEMCACHE!'
ts_start=time.time()
log_name='msglog_'+str(ts_start)+'.json'
schjob=[]
while True:
try:
search=False
rx_msg=sock.recv(2048)
dcdmsg=rx_msg.decode("utf-8")
dcdmsg=bytes(dcdmsg,'ascii')
dcdmsg=cryp.DecryptMsg(dcdmsg,password)
if quick_search_tag in dcdmsg:
search=True
print('quick search!')
sl.searchMem(search_list,dcdmsg,password,'off')
if filter_tag not in dcdmsg and search==False:
jlm=json.loads(dcdmsg)
search_list.append({"source_time":jlm["source_time"],'sending_node':jlm['sending_node'],'sending_hostname':jlm['sending_hostname'],"cluster":params["cluster_id"],'orig_message':jlm['orig_message'],'orig_addr':jlm['orig_addr']})
i+=1
if i % 10 == 0:
with open ('msglog_temp.json','w') as log:
json.dump(search_list,log)
continue
if i % 105 == 0:
ts_start=time.time()
log_name='msglog_'+str(ts_start)+'.json'
with open (log_name,'w') as log:
json.dump(search_list,log)
search_list=[]
continue
else:
continue
except:
print('Rx Process Exception')
pass
def logMonitor_Tx(msg, params,password, nw):
LOGMSG_GRP = nw['LOGMSG_GRP']
LOGMSG_PORT = nw['LOGMSG_PORT']
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
print("Starting Tx process...\n")
localnode=params['localnode']
role=params['role']
node=params['localnode']
cluster=params['cluster_id']
hostname=(socket.gethostname())
jobs=[]
z = multiprocessing.Process(target=nm.infHBeat,args=(params,nw))
jobs.append(z)
z.daemon = True
z.start()
n = multiprocessing.Process(target=nm.messageTagGen,args=(nw,))
jobs.append(n)
n.daemon = True
n.start()
if role == 'RxTx':
p = multiprocessing.Process(target=logMonitor_Rx,args=(password,params,))
jobs.append(p)
p.daemon = True
p.start()
ds =multiprocessing.Process(target=sl.deepSearch)
jobs.append(ds)
ds.daemon = True
ds.start()
q = multiprocessing.Process(target=syslog)
jobs.append(q)
q.daemon = True
q.start()
lenfr=0
send_throttle=2
lfr=[0,0]
while True:
lfr[0]=lfr[1]
if max(lfr) > 100:
with open ('syslog.log','w') as f:
f.close()
lfr=[0,0]
time.sleep(send_throttle)
try:
with open ('droplist.json','r') as dlj:
drop_tag=json.load(dlj)
drop_tag=str(drop_tag)
except :
print('possible JSONDecodeError')
drop_tag='[]'
pass
while True:
with open('syslog.log','r') as f:
fr=f.readlines()
lfr[1]=len(fr)
if lfr[1] > lfr[0]:
msg=''
for i in fr[lfr[0]:lfr[1]]:
msg=i.rstrip()
parse_msg=json.loads(msg)
ts = time.time()
msg={'source_time':ts,'sending_node':localnode,'sending_hostname':hostname,'orig_message':parse_msg['log_message'],'orig_addr':parse_msg['orig_addr'],'drop_tag':drop_tag}
msg=json.dumps(msg)
msg=bytes(msg, "ascii")
msg=cryp.EncryptMsg(msg,password)
try:
sock.sendto(msg, (LOGMSG_GRP, LOGMSG_PORT))
except OSError:
msg = ("Attempting to send %s log messages from overran Tx buffer" % str(len(fr)))
msg=localnode+'@'+hostname+"# "+'"'+msg+'"'+drop_tag
msg=bytes(msg, "ascii")
msg=cryp.EncryptMsg(msg,password)
sock.sendto(msg, (LOGMSG_GRP, LOGMSG_PORT))
pass
if lfr[0] == lfr[1]:
pass
else:
pass
break
sys.exit()
"""
main fn to pull user info and kick off logMonitor_Tx fn. logMonitor_Tx kicks off heartbeat and Rx functions.
"""
def main():
params, nw =nm.localParams()
with open('pwf','r') as p:
password=p.read()
password=password.rstrip()
jobs = []
msg=None
r = multiprocessing.Process(target=logMonitor_Tx(msg,params,password,nw))
jobs.append(r)
r.start()
if __name__ == '__main__':
main()
| 27.442478 | 241 | 0.565946 |
import socket
import struct
import time
import sys
import multiprocessing
import datetime
import glob
import json
from crypto import Crypto as cryp
from syslog import syslog
from nodemanager import NodeManager as nm
from localsearch import SearchLocal as sl
def logMonitor_Rx(password,params):
print("Starting Rx Process...\n")
with open('network_cfg.json','r') as nwc:
nw=json.load(nwc)
LOGMSG_GRP = nw['LOGMSG_GRP']
LOGMSG_PORT = nw['LOGMSG_PORT']
SCH_GRP = nw['SCH_GRP']
SCH_PORT = nw['SCH_PORT']
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', LOGMSG_PORT))
mreq = struct.pack("4sl", socket.inet_aton(LOGMSG_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
filter_tag='%(node_num)s:%(role)s:%(cluster_id)s:%(localnode)s' % params
print(filter_tag)
ts = 0
i=0
dcdmsg=''
search_list=[]
quick_search_tag='LogQSearch:::'
write_mem_tag='!WRITECACHE!'
zero_disk_tag='!DELETEDISKCACHE!'
zero_mem_tag='!DELETEMEMCACHE!'
ts_start=time.time()
log_name='msglog_'+str(ts_start)+'.json'
schjob=[]
while True:
try:
search=False
rx_msg=sock.recv(2048)
dcdmsg=rx_msg.decode("utf-8")
dcdmsg=bytes(dcdmsg,'ascii')
dcdmsg=cryp.DecryptMsg(dcdmsg,password)
if quick_search_tag in dcdmsg:
search=True
print('quick search!')
sl.searchMem(search_list,dcdmsg,password,'off')
if filter_tag not in dcdmsg and search==False:
jlm=json.loads(dcdmsg)
search_list.append({"source_time":jlm["source_time"],'sending_node':jlm['sending_node'],'sending_hostname':jlm['sending_hostname'],"cluster":params["cluster_id"],'orig_message':jlm['orig_message'],'orig_addr':jlm['orig_addr']})
i+=1
if i % 10 == 0:
with open ('msglog_temp.json','w') as log:
json.dump(search_list,log)
continue
if i % 105 == 0:
ts_start=time.time()
log_name='msglog_'+str(ts_start)+'.json'
with open (log_name,'w') as log:
json.dump(search_list,log)
search_list=[]
continue
else:
continue
except:
print('Rx Process Exception')
pass
def logMonitor_Tx(msg, params,password, nw):
LOGMSG_GRP = nw['LOGMSG_GRP']
LOGMSG_PORT = nw['LOGMSG_PORT']
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
print("Starting Tx process...\n")
localnode=params['localnode']
role=params['role']
node=params['localnode']
cluster=params['cluster_id']
hostname=(socket.gethostname())
jobs=[]
z = multiprocessing.Process(target=nm.infHBeat,args=(params,nw))
jobs.append(z)
z.daemon = True
z.start()
n = multiprocessing.Process(target=nm.messageTagGen,args=(nw,))
jobs.append(n)
n.daemon = True
n.start()
if role == 'RxTx':
p = multiprocessing.Process(target=logMonitor_Rx,args=(password,params,))
jobs.append(p)
p.daemon = True
p.start()
ds =multiprocessing.Process(target=sl.deepSearch)
jobs.append(ds)
ds.daemon = True
ds.start()
q = multiprocessing.Process(target=syslog)
jobs.append(q)
q.daemon = True
q.start()
lenfr=0
send_throttle=2
lfr=[0,0]
while True:
lfr[0]=lfr[1]
if max(lfr) > 100:
with open ('syslog.log','w') as f:
f.close()
lfr=[0,0]
time.sleep(send_throttle)
try:
with open ('droplist.json','r') as dlj:
drop_tag=json.load(dlj)
drop_tag=str(drop_tag)
except :
print('possible JSONDecodeError')
drop_tag='[]'
pass
while True:
with open('syslog.log','r') as f:
fr=f.readlines()
lfr[1]=len(fr)
if lfr[1] > lfr[0]:
msg=''
for i in fr[lfr[0]:lfr[1]]:
msg=i.rstrip()
parse_msg=json.loads(msg)
ts = time.time()
msg={'source_time':ts,'sending_node':localnode,'sending_hostname':hostname,'orig_message':parse_msg['log_message'],'orig_addr':parse_msg['orig_addr'],'drop_tag':drop_tag}
msg=json.dumps(msg)
msg=bytes(msg, "ascii")
msg=cryp.EncryptMsg(msg,password)
try:
sock.sendto(msg, (LOGMSG_GRP, LOGMSG_PORT))
except OSError:
msg = ("Attempting to send %s log messages from overran Tx buffer" % str(len(fr)))
msg=localnode+'@'+hostname+"# "+'"'+msg+'"'+drop_tag
msg=bytes(msg, "ascii")
msg=cryp.EncryptMsg(msg,password)
sock.sendto(msg, (LOGMSG_GRP, LOGMSG_PORT))
pass
if lfr[0] == lfr[1]:
pass
else:
pass
break
sys.exit()
def main():
params, nw =nm.localParams()
with open('pwf','r') as p:
password=p.read()
password=password.rstrip()
jobs = []
msg=None
r = multiprocessing.Process(target=logMonitor_Tx(msg,params,password,nw))
jobs.append(r)
r.start()
if __name__ == '__main__':
main()
| true | true |
f72cf1840ab9da367881e535c5609e8a88e5c71b | 1,349 | py | Python | actor/skills/grass_cutter.py | tamamiyasita/Roguelike-Tutorial-2020 | db4d4e5369010567bc39bdd404c4f3a7998670fd | [
"MIT"
] | null | null | null | actor/skills/grass_cutter.py | tamamiyasita/Roguelike-Tutorial-2020 | db4d4e5369010567bc39bdd404c4f3a7998670fd | [
"MIT"
] | null | null | null | actor/skills/grass_cutter.py | tamamiyasita/Roguelike-Tutorial-2020 | db4d4e5369010567bc39bdd404c4f3a7998670fd | [
"MIT"
] | null | null | null | from constants import *
from data import IMAGE_ID
from random import randint
from actor.skills.base_skill import BaseSkill
from util import dice
class GrassCutter(BaseSkill):
def __init__(self, x=0, y=0, name="grass_cutter"):
super().__init__(
name=name,
image=IMAGE_ID[name],
x=x,
y=y,
)
#attackに渡される属性
self._damage = 5
self.hit_rate = 95
self.attr = "physical"
self.effect = None
self.owner = None
self._level = 1
self.tag = [Tag.item, Tag.equip, Tag.weapon, Tag.skill, Tag.passive]
self.item_weight = 1.1
self.explanatory_text = f"damage: {self.level}D{self.damage}\nhit rate: {self.hit_rate}"
self.icon = IMAGE_ID["grass_cutter_icon"]
@property
def damage(self):
if self.owner:
return dice((self.level / 3 + 1), ((self.owner.fighter.STR+self._damage))/2, (self.level/2))
def update_animation(self, delta_time):
super().update_animation(delta_time)
try:
if self.master.state == state.ATTACK and Tag.weapon in self.tag:
self.item_margin_x = (self.item_position_x + 5) * SPRITE_SCALE
self.angle += 90
else:
self.angle = 0
except:
pass
| 24.089286 | 104 | 0.575982 | from constants import *
from data import IMAGE_ID
from random import randint
from actor.skills.base_skill import BaseSkill
from util import dice
class GrassCutter(BaseSkill):
def __init__(self, x=0, y=0, name="grass_cutter"):
super().__init__(
name=name,
image=IMAGE_ID[name],
x=x,
y=y,
)
self._damage = 5
self.hit_rate = 95
self.attr = "physical"
self.effect = None
self.owner = None
self._level = 1
self.tag = [Tag.item, Tag.equip, Tag.weapon, Tag.skill, Tag.passive]
self.item_weight = 1.1
self.explanatory_text = f"damage: {self.level}D{self.damage}\nhit rate: {self.hit_rate}"
self.icon = IMAGE_ID["grass_cutter_icon"]
@property
def damage(self):
if self.owner:
return dice((self.level / 3 + 1), ((self.owner.fighter.STR+self._damage))/2, (self.level/2))
def update_animation(self, delta_time):
super().update_animation(delta_time)
try:
if self.master.state == state.ATTACK and Tag.weapon in self.tag:
self.item_margin_x = (self.item_position_x + 5) * SPRITE_SCALE
self.angle += 90
else:
self.angle = 0
except:
pass
| true | true |
f72cf2ba17b0645159849d98585fab8cba690efd | 3,280 | py | Python | enroll_certificates.py | marcelojcn/psd2-tpp-enroller | 86a03287e74a38f1ebb0d46886c2fc0ec0345ff2 | [
"MIT"
] | 2 | 2021-03-23T05:07:53.000Z | 2021-07-04T20:42:20.000Z | enroll_certificates.py | marcelojcn/psd2-tpp-enroller | 86a03287e74a38f1ebb0d46886c2fc0ec0345ff2 | [
"MIT"
] | null | null | null | enroll_certificates.py | marcelojcn/psd2-tpp-enroller | 86a03287e74a38f1ebb0d46886c2fc0ec0345ff2 | [
"MIT"
] | 1 | 2021-03-22T05:45:38.000Z | 2021-03-22T05:45:38.000Z | import argparse
from src.openbanking_tpp_proxy.proxy import Proxy
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Process parametes for certificate enrollment")
parser.add_argument('--api_url', type=str, required=True,
help='API url needed for certificate integration')
parser.add_argument('--tpp_id', type=str, required=True,
help="ID of the TPP certificate which can be found under 'subject=*'.")
parser.add_argument('--tpp_name', type=str, required=True,
help="Name of TPP used for integration purposes.")
parser.add_argument('--qwac_cert', type=str, required=True,
help="Path QWAC certificate in DER format.")
parser.add_argument('--qwac_key', type=str, required=True,
help="Path QWAC key in PEM format.")
parser.add_argument('--qseal_cert', type=str, required=True,
help="Path QSEAL certificate in DER format.")
parser.add_argument('--qseal_key', type=str, required=True,
help="Path QSEAL key in PEM format.")
parser.add_argument('--intermediate_cert', type=str, required=True,
help="Path intermediate certificate in DER format.")
parser.add_argument('--root_cert', type=str, required=True,
help="Path root certificate in DER format.")
# Optional arguments
parser.add_argument('--qseal_key_file_password',
default=None,
type=str,
required=False,
help="(OPTIONAL) Password to qseal key file.")
parser.add_argument('--http_proxy',
default=None,
type=str,
required=False,
help="(OPTIONAL) HTTP proxy URI with port.")
parser.add_argument('--https_proxy',
default=None,
type=str,
required=False,
help="(OPTIONAL) HTTPS proxy URI with port.")
args = parser.parse_args()
# Enrollment process
proxy = Proxy(args.qwac_cert,
args.qwac_key,
args.qseal_cert,
args.qseal_key,
args.qseal_key_file_password,
args.http_proxy,
args.https_proxy)
enrollment_path = args.api_url + "/eidas/1.0/v1/enrollment"
enrollment_response = proxy.enroll_certificates(enrollment_path,
args.intermediate_cert,
args.root_cert,
args.tpp_id,
args.tpp_name)
print(enrollment_response.status_code)
print(enrollment_response.content)
# Perform connection checks
response = proxy.proxy_request("GET", args.api_url + "/eidas/1.0/v1/consents/health-check")
print(response.status_code)
print(response.content)
response = proxy.proxy_request("GET", args.api_url + "/eidas/1.0/v1/payments/health-check")
print(response.status_code)
print(response.content)
| 46.857143 | 96 | 0.559451 | import argparse
from src.openbanking_tpp_proxy.proxy import Proxy
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Process parametes for certificate enrollment")
parser.add_argument('--api_url', type=str, required=True,
help='API url needed for certificate integration')
parser.add_argument('--tpp_id', type=str, required=True,
help="ID of the TPP certificate which can be found under 'subject=*'.")
parser.add_argument('--tpp_name', type=str, required=True,
help="Name of TPP used for integration purposes.")
parser.add_argument('--qwac_cert', type=str, required=True,
help="Path QWAC certificate in DER format.")
parser.add_argument('--qwac_key', type=str, required=True,
help="Path QWAC key in PEM format.")
parser.add_argument('--qseal_cert', type=str, required=True,
help="Path QSEAL certificate in DER format.")
parser.add_argument('--qseal_key', type=str, required=True,
help="Path QSEAL key in PEM format.")
parser.add_argument('--intermediate_cert', type=str, required=True,
help="Path intermediate certificate in DER format.")
parser.add_argument('--root_cert', type=str, required=True,
help="Path root certificate in DER format.")
parser.add_argument('--qseal_key_file_password',
default=None,
type=str,
required=False,
help="(OPTIONAL) Password to qseal key file.")
parser.add_argument('--http_proxy',
default=None,
type=str,
required=False,
help="(OPTIONAL) HTTP proxy URI with port.")
parser.add_argument('--https_proxy',
default=None,
type=str,
required=False,
help="(OPTIONAL) HTTPS proxy URI with port.")
args = parser.parse_args()
proxy = Proxy(args.qwac_cert,
args.qwac_key,
args.qseal_cert,
args.qseal_key,
args.qseal_key_file_password,
args.http_proxy,
args.https_proxy)
enrollment_path = args.api_url + "/eidas/1.0/v1/enrollment"
enrollment_response = proxy.enroll_certificates(enrollment_path,
args.intermediate_cert,
args.root_cert,
args.tpp_id,
args.tpp_name)
print(enrollment_response.status_code)
print(enrollment_response.content)
response = proxy.proxy_request("GET", args.api_url + "/eidas/1.0/v1/consents/health-check")
print(response.status_code)
print(response.content)
response = proxy.proxy_request("GET", args.api_url + "/eidas/1.0/v1/payments/health-check")
print(response.status_code)
print(response.content)
| true | true |
f72cf2bd30e710234b5f0ce409d6c011a60b05e1 | 827 | py | Python | tests/test_service.py | GameServerGurus/Nitrado-SDK | d72536be5def0b51a7ac89ccf62e35095f4ea705 | [
"MIT"
] | 1 | 2022-02-01T18:12:00.000Z | 2022-02-01T18:12:00.000Z | tests/test_service.py | GameServerGurus/Nitrado-SDK | d72536be5def0b51a7ac89ccf62e35095f4ea705 | [
"MIT"
] | 1 | 2022-01-31T21:04:53.000Z | 2022-02-01T02:16:52.000Z | tests/test_service.py | GameServerGurus/Nitrado-SDK | d72536be5def0b51a7ac89ccf62e35095f4ea705 | [
"MIT"
] | null | null | null | import os
from nitrado import Service, initialize_client
def set_client():
url = "https://api.nitrado.net/"
key = os.getenv('NITRADO_KEY')
initialize_client(key, url)
def test_services():
set_client()
services = Service.all()
assert len(services) > 0
def test_logs():
set_client()
service = Service.all()[0]
logs = service.logs()
assert type(logs) == list
def test_tasks():
set_client()
service = Service.all()[0]
tasks = service.tasks()
assert type(tasks) == list
def test_notifications():
set_client()
service = Service.all()[0]
notif = service.notifications()
assert type(notif) == list
def tests():
test_services()
test_notifications()
test_logs()
test_tasks()
if __name__ == "__main__":
tests()
print("passing")
| 17.229167 | 46 | 0.636034 | import os
from nitrado import Service, initialize_client
def set_client():
url = "https://api.nitrado.net/"
key = os.getenv('NITRADO_KEY')
initialize_client(key, url)
def test_services():
set_client()
services = Service.all()
assert len(services) > 0
def test_logs():
set_client()
service = Service.all()[0]
logs = service.logs()
assert type(logs) == list
def test_tasks():
set_client()
service = Service.all()[0]
tasks = service.tasks()
assert type(tasks) == list
def test_notifications():
set_client()
service = Service.all()[0]
notif = service.notifications()
assert type(notif) == list
def tests():
test_services()
test_notifications()
test_logs()
test_tasks()
if __name__ == "__main__":
tests()
print("passing")
| true | true |
f72cf30f4db51c6f09ee40560b05c6f53cdebd84 | 5,436 | py | Python | sdk/python/pulumi_aws/ec2/vpn_connection_route.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/vpn_connection_route.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/vpn_connection_route.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['VpnConnectionRoute']
class VpnConnectionRoute(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_cidr_block: Optional[pulumi.Input[str]] = None,
vpn_connection_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a static route between a VPN connection and a customer gateway.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
vpc = aws.ec2.Vpc("vpc", cidr_block="10.0.0.0/16")
vpn_gateway = aws.ec2.VpnGateway("vpnGateway", vpc_id=vpc.id)
customer_gateway = aws.ec2.CustomerGateway("customerGateway",
bgp_asn="65000",
ip_address="172.0.0.1",
type="ipsec.1")
main = aws.ec2.VpnConnection("main",
vpn_gateway_id=vpn_gateway.id,
customer_gateway_id=customer_gateway.id,
type="ipsec.1",
static_routes_only=True)
office = aws.ec2.VpnConnectionRoute("office",
destination_cidr_block="192.168.10.0/24",
vpn_connection_id=main.id)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] destination_cidr_block: The CIDR block associated with the local subnet of the customer network.
:param pulumi.Input[str] vpn_connection_id: The ID of the VPN connection.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if destination_cidr_block is None:
raise TypeError("Missing required property 'destination_cidr_block'")
__props__['destination_cidr_block'] = destination_cidr_block
if vpn_connection_id is None:
raise TypeError("Missing required property 'vpn_connection_id'")
__props__['vpn_connection_id'] = vpn_connection_id
super(VpnConnectionRoute, __self__).__init__(
'aws:ec2/vpnConnectionRoute:VpnConnectionRoute',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
destination_cidr_block: Optional[pulumi.Input[str]] = None,
vpn_connection_id: Optional[pulumi.Input[str]] = None) -> 'VpnConnectionRoute':
"""
Get an existing VpnConnectionRoute resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] destination_cidr_block: The CIDR block associated with the local subnet of the customer network.
:param pulumi.Input[str] vpn_connection_id: The ID of the VPN connection.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["destination_cidr_block"] = destination_cidr_block
__props__["vpn_connection_id"] = vpn_connection_id
return VpnConnectionRoute(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="destinationCidrBlock")
def destination_cidr_block(self) -> pulumi.Output[str]:
"""
The CIDR block associated with the local subnet of the customer network.
"""
return pulumi.get(self, "destination_cidr_block")
@property
@pulumi.getter(name="vpnConnectionId")
def vpn_connection_id(self) -> pulumi.Output[str]:
"""
The ID of the VPN connection.
"""
return pulumi.get(self, "vpn_connection_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 42.46875 | 134 | 0.653054 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['VpnConnectionRoute']
class VpnConnectionRoute(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_cidr_block: Optional[pulumi.Input[str]] = None,
vpn_connection_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if destination_cidr_block is None:
raise TypeError("Missing required property 'destination_cidr_block'")
__props__['destination_cidr_block'] = destination_cidr_block
if vpn_connection_id is None:
raise TypeError("Missing required property 'vpn_connection_id'")
__props__['vpn_connection_id'] = vpn_connection_id
super(VpnConnectionRoute, __self__).__init__(
'aws:ec2/vpnConnectionRoute:VpnConnectionRoute',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
destination_cidr_block: Optional[pulumi.Input[str]] = None,
vpn_connection_id: Optional[pulumi.Input[str]] = None) -> 'VpnConnectionRoute':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["destination_cidr_block"] = destination_cidr_block
__props__["vpn_connection_id"] = vpn_connection_id
return VpnConnectionRoute(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="destinationCidrBlock")
def destination_cidr_block(self) -> pulumi.Output[str]:
return pulumi.get(self, "destination_cidr_block")
@property
@pulumi.getter(name="vpnConnectionId")
def vpn_connection_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "vpn_connection_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f72cf317b227f69aee075f4d4a76f6668cb37c7e | 10,749 | py | Python | qa/rpc-tests/maxuploadtarget.py | minblock/lua | ee6ef8f49bfc9569e7c568fc6546fa16dbe14585 | [
"MIT"
] | null | null | null | qa/rpc-tests/maxuploadtarget.py | minblock/lua | ee6ef8f49bfc9569e7c568fc6546fa16dbe14585 | [
"MIT"
] | null | null | null | qa/rpc-tests/maxuploadtarget.py | minblock/lua | ee6ef8f49bfc9569e7c568fc6546fa16dbe14585 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import LUASCOINTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(LUASCOINTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LUASCOIND", "luascoind"),
help="luascoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
| 40.409774 | 140 | 0.655224 |
from test_framework.mininode import *
from test_framework.test_framework import LUASCOINTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(LUASCOINTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LUASCOIND", "luascoind"),
help="luascoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
node.generate(1)
def run_test(self):
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start()
[x.wait_for_verack() for x in test_nodes]
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
| false | true |
f72cf3e78bbce35be30e75fc278005c291dd24d7 | 3,977 | py | Python | src/beanmachine/ppl/inference/proposer/nmc/single_site_half_space_nmc_proposer.py | horizon-blue/beanmachine-1 | b13e4e3e28ffb860947eb8046863b0cabb581222 | [
"MIT"
] | 1 | 2021-12-22T13:19:14.000Z | 2021-12-22T13:19:14.000Z | src/beanmachine/ppl/inference/proposer/nmc/single_site_half_space_nmc_proposer.py | horizon-blue/beanmachine-1 | b13e4e3e28ffb860947eb8046863b0cabb581222 | [
"MIT"
] | null | null | null | src/beanmachine/ppl/inference/proposer/nmc/single_site_half_space_nmc_proposer.py | horizon-blue/beanmachine-1 | b13e4e3e28ffb860947eb8046863b0cabb581222 | [
"MIT"
] | null | null | null | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Tuple
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.legacy.inference.proposer.newtonian_monte_carlo_utils import (
is_valid,
hessian_of_log_prob,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.utils import tensorops
from beanmachine.ppl.world import World
LOGGER = logging.getLogger("beanmachine")
class SingleSiteHalfSpaceNMCProposer(SingleSiteAncestralProposer):
"""
Single-Site Half Space Newtonian Monte Carlo Proposers.
See sec. 3.2 of [1]
[1] Arora, Nim, et al. `Newtonian Monte Carlo: single-site MCMC meets second-order gradient methods`
"""
def __init__(self, node: RVIdentifier):
super().__init__(node)
self._proposal_distribution = None
def compute_alpha_beta(
self, world: World
) -> Tuple[bool, torch.Tensor, torch.Tensor]:
"""
Computes alpha and beta of the Gamma proposal given the node.
alpha = 1 - hessian_diag * x^2
beta = -1 * x * hessian_diag - first_grad
"""
node_val = world[self.node]
first_gradient, hessian_diag = hessian_of_log_prob(
world, self.node, node_val, tensorops.halfspace_gradients
)
if not is_valid(first_gradient) or not is_valid(hessian_diag):
LOGGER.warning(
"Gradient or Hessian is invalid at node {n}.\n".format(n=str(self.node))
)
return False, torch.tensor(0.0), torch.tensor(0.0)
node_val_reshaped = node_val.reshape(-1)
predicted_alpha = (
1 - hessian_diag * (node_val_reshaped * node_val_reshaped)
).t()
predicted_beta = -1 * node_val_reshaped * hessian_diag - first_gradient
condition = (predicted_alpha > 0) & (predicted_beta > 0)
predicted_alpha = torch.where(
condition, predicted_alpha, torch.tensor(1.0).to(dtype=predicted_beta.dtype)
)
node_var = world.get_variable(self.node)
mean = (
node_var.distribution.mean.reshape(-1)
if is_valid(node_var.distribution.mean)
else torch.ones_like(predicted_beta)
)
predicted_beta = torch.where(condition, predicted_beta, mean)
predicted_alpha = predicted_alpha.reshape(node_val.shape)
predicted_beta = predicted_beta.reshape(node_val.shape)
return True, predicted_alpha, predicted_beta
def get_proposal_distribution(self, world: World) -> dist.Distribution:
"""
Returns the proposal distribution of the node.
Args:
world: the world in which we're proposing a new value for node.
Returns:
The proposal distribution.
"""
# if the number of variables in the world is 1 and proposal distribution
# has already been computed, we can use the old proposal distribution
# and skip re-computing the gradient, since there are no other variable
# in the world that may change the gradient and the old one is still
# correct.
if self._proposal_distribution is not None and len(world.latent_nodes) == 1:
return self._proposal_distribution
is_valid, alpha, beta = self.compute_alpha_beta(world)
if not is_valid:
LOGGER.warning(
"Node {n} has invalid proposal solution. ".format(n=self.node)
+ "Proposer falls back to SingleSiteAncestralProposer.\n"
)
return super().get_proposal_distribution(world)
self._proposal_distribution = dist.Gamma(alpha, beta)
return self._proposal_distribution
| 38.61165 | 104 | 0.670355 |
import logging
from typing import Tuple
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.legacy.inference.proposer.newtonian_monte_carlo_utils import (
is_valid,
hessian_of_log_prob,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.utils import tensorops
from beanmachine.ppl.world import World
LOGGER = logging.getLogger("beanmachine")
class SingleSiteHalfSpaceNMCProposer(SingleSiteAncestralProposer):
def __init__(self, node: RVIdentifier):
super().__init__(node)
self._proposal_distribution = None
def compute_alpha_beta(
self, world: World
) -> Tuple[bool, torch.Tensor, torch.Tensor]:
node_val = world[self.node]
first_gradient, hessian_diag = hessian_of_log_prob(
world, self.node, node_val, tensorops.halfspace_gradients
)
if not is_valid(first_gradient) or not is_valid(hessian_diag):
LOGGER.warning(
"Gradient or Hessian is invalid at node {n}.\n".format(n=str(self.node))
)
return False, torch.tensor(0.0), torch.tensor(0.0)
node_val_reshaped = node_val.reshape(-1)
predicted_alpha = (
1 - hessian_diag * (node_val_reshaped * node_val_reshaped)
).t()
predicted_beta = -1 * node_val_reshaped * hessian_diag - first_gradient
condition = (predicted_alpha > 0) & (predicted_beta > 0)
predicted_alpha = torch.where(
condition, predicted_alpha, torch.tensor(1.0).to(dtype=predicted_beta.dtype)
)
node_var = world.get_variable(self.node)
mean = (
node_var.distribution.mean.reshape(-1)
if is_valid(node_var.distribution.mean)
else torch.ones_like(predicted_beta)
)
predicted_beta = torch.where(condition, predicted_beta, mean)
predicted_alpha = predicted_alpha.reshape(node_val.shape)
predicted_beta = predicted_beta.reshape(node_val.shape)
return True, predicted_alpha, predicted_beta
def get_proposal_distribution(self, world: World) -> dist.Distribution:
if self._proposal_distribution is not None and len(world.latent_nodes) == 1:
return self._proposal_distribution
is_valid, alpha, beta = self.compute_alpha_beta(world)
if not is_valid:
LOGGER.warning(
"Node {n} has invalid proposal solution. ".format(n=self.node)
+ "Proposer falls back to SingleSiteAncestralProposer.\n"
)
return super().get_proposal_distribution(world)
self._proposal_distribution = dist.Gamma(alpha, beta)
return self._proposal_distribution
| true | true |
f72cf4a833b1d28314f79d0d0b59dfce9cb476dc | 627 | py | Python | setup.py | peterpaohuang/depablo-box | edf21fa7f2bcd7009136a2e14b802d004b12b406 | [
"MIT"
] | 1 | 2019-08-06T15:45:34.000Z | 2019-08-06T15:45:34.000Z | setup.py | peterpaohuang/depablo-box | edf21fa7f2bcd7009136a2e14b802d004b12b406 | [
"MIT"
] | null | null | null | setup.py | peterpaohuang/depablo-box | edf21fa7f2bcd7009136a2e14b802d004b12b406 | [
"MIT"
] | null | null | null | import subprocess
print("Installing required packages...")
subprocess.check_output(['conda','install','--yes','--file','requirements.txt'])
print("Installed requirements")
print("Installing openbabel...")
subprocess.check_output(['conda','install','--yes','-c','openbabel', 'openbabel'])
print("Installed openbabel")
print("Installing beautifulsoup4...")
subprocess.check_output(['conda','install','--yes','-c','anaconda', 'beautifulsoup4'])
print("Installed beautifulsoup4")
print("Installing requests...")
subprocess.check_output(['conda','install','--yes','-c','anaconda', 'requests'])
print("Finished install all packages") | 48.230769 | 86 | 0.728868 | import subprocess
print("Installing required packages...")
subprocess.check_output(['conda','install','--yes','--file','requirements.txt'])
print("Installed requirements")
print("Installing openbabel...")
subprocess.check_output(['conda','install','--yes','-c','openbabel', 'openbabel'])
print("Installed openbabel")
print("Installing beautifulsoup4...")
subprocess.check_output(['conda','install','--yes','-c','anaconda', 'beautifulsoup4'])
print("Installed beautifulsoup4")
print("Installing requests...")
subprocess.check_output(['conda','install','--yes','-c','anaconda', 'requests'])
print("Finished install all packages") | true | true |
f72cf697921980b575ca1e6593cdff20ac9b8c90 | 2,031 | py | Python | v3/htmlexample_module.py | ambadhan/OnlinePythonTutor | 857bab941fbde20f1f020b05b7723094ddead62a | [
"MIT"
] | 17 | 2021-12-09T11:31:44.000Z | 2021-12-29T03:07:14.000Z | v3/htmlexample_module.py | heysachin/OnlinePythonTutor | 0dcdacc7ff5be504dd6a47236ebc69dc0069f991 | [
"MIT"
] | 22 | 2017-09-17T03:59:16.000Z | 2017-11-14T17:33:57.000Z | v3/htmlexample_module.py | heysachin/OnlinePythonTutor | 0dcdacc7ff5be504dd6a47236ebc69dc0069f991 | [
"MIT"
] | 12 | 2021-12-09T11:31:46.000Z | 2022-01-07T03:14:46.000Z | # Example module for Online Python Tutor
# Philip Guo
# 2013-08-03
# To get the Online Python Tutor backend to import this custom module,
# add its filename ('htmlexample_module') to the CUSTOM_MODULE_IMPORTS
# tuple in pg_logger.py
# To see an example of this module at work, write the following code in
# http://pythontutor.com/visualize.html
'''
from htmlexample_module import ColorTable
t = ColorTable(3, 4)
t.set_color(0, 0, 'red')
t.render_HTML()
t.set_color(1, 1, 'green')
t.render_HTML()
t.set_color(2, 2, 'blue')
t.render_HTML()
for i in range(3):
for j in range(4):
t.set_color(i, j, 'gray')
t.render_HTML()
'''
# defines a simple table where you can set colors for individual rows and columns
class ColorTable:
def __init__(self, num_rows, num_columns):
self.num_rows = num_rows
self.num_columns = num_columns
# create a 2D matrix of empty strings
self.table = []
for i in range(self.num_rows):
new_lst = ['' for e in range(self.num_columns)]
self.table.append(new_lst)
# color must be a legal HTML color string
def set_color(self, row, column, color):
assert 0 <= row < self.num_rows
assert 0 <= column < self.num_columns
self.table[row][column] = color
# call this function whenever you want to render this table in HTML
def render_HTML(self):
# incrementally build up an HTML table string
html_string = '<table>'
for i in range(self.num_rows):
html_string += '<tr>'
for j in range(self.num_columns):
color = self.table[i][j]
if not color:
color = "white"
html_string += '''<td style="width: 30px; height: 30px; border: 1px solid black;
background-color: %s;"></td>''' % color
html_string += '</tr>'
html_string += '</table>'
# then call the magic setHTML function
setHTML(html_string)
| 28.208333 | 96 | 0.613983 |
class ColorTable:
def __init__(self, num_rows, num_columns):
self.num_rows = num_rows
self.num_columns = num_columns
self.table = []
for i in range(self.num_rows):
new_lst = ['' for e in range(self.num_columns)]
self.table.append(new_lst)
def set_color(self, row, column, color):
assert 0 <= row < self.num_rows
assert 0 <= column < self.num_columns
self.table[row][column] = color
def render_HTML(self):
html_string = '<table>'
for i in range(self.num_rows):
html_string += '<tr>'
for j in range(self.num_columns):
color = self.table[i][j]
if not color:
color = "white"
html_string += '''<td style="width: 30px; height: 30px; border: 1px solid black;
background-color: %s;"></td>''' % color
html_string += '</tr>'
html_string += '</table>'
setHTML(html_string)
| true | true |
f72cf6de2f57109ea400d2d7cfa8931507537fd0 | 3,018 | py | Python | examples/epd_2in13_test.py | philipempl/ResearchyPi | 74cf9cc78ace39d1d843b2b64fe39704aaafe778 | [
"MIT"
] | null | null | null | examples/epd_2in13_test.py | philipempl/ResearchyPi | 74cf9cc78ace39d1d843b2b64fe39704aaafe778 | [
"MIT"
] | null | null | null | examples/epd_2in13_test.py | philipempl/ResearchyPi | 74cf9cc78ace39d1d843b2b64fe39704aaafe778 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
import sys
import os
picdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'pic')
libdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'lib')
if os.path.exists(libdir):
sys.path.append(libdir)
import logging
from waveshare_epd import epd2in13
import time
from PIL import Image,ImageDraw,ImageFont
import traceback
logging.basicConfig(level=logging.DEBUG)
try:
logging.info("epd2in13 Demo")
epd = epd2in13.EPD()
logging.info("init and Clear")
epd.init(epd.lut_full_update)
epd.Clear(0xFF)
# Drawing on the image
font15 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 15)
font24 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 24)
logging.info("1.Drawing on the image...")
image = Image.new('1', (epd.height, epd.width), 255) # 255: clear the frame
draw = ImageDraw.Draw(image)
draw.rectangle([(0,0),(50,50)],outline = 0)
draw.rectangle([(55,0),(100,50)],fill = 0)
draw.line([(0,0),(50,50)], fill = 0,width = 1)
draw.line([(0,50),(50,0)], fill = 0,width = 1)
draw.chord((10, 60, 50, 100), 0, 360, fill = 0)
draw.ellipse((55, 60, 95, 100), outline = 0)
draw.pieslice((55, 60, 95, 100), 90, 180, outline = 0)
draw.pieslice((55, 60, 95, 100), 270, 360, fill = 0)
draw.polygon([(110,0),(110,50),(150,25)],outline = 0)
draw.polygon([(190,0),(190,50),(150,25)],fill = 0)
draw.text((120, 60), 'e-Paper demo', font = font15, fill = 0)
draw.text((110, 90), u'微雪电子', font = font24, fill = 0)
epd.display(epd.getbuffer(image))
time.sleep(2)
# read bmp file
logging.info("2.read bmp file...")
image = Image.open(os.path.join(picdir, '2in13.bmp'))
epd.display(epd.getbuffer(image))
time.sleep(2)
# read bmp file on window
logging.info("3.read bmp file on window...")
# epd.Clear(0xFF)
image1 = Image.new('1', (epd.height, epd.width), 255) # 255: clear the frame
bmp = Image.open(os.path.join(picdir, '100x100.bmp'))
image1.paste(bmp, (2,2))
epd.display(epd.getbuffer(image1))
time.sleep(2)
# # partial update
logging.info("4.show time...")
epd.init(epd.lut_partial_update)
epd.Clear(0xFF)
time_image = Image.new('1', (epd.height, epd.width), 255)
time_draw = ImageDraw.Draw(time_image)
num = 0
while (True):
time_draw.rectangle((120, 80, 220, 105), fill = 255)
time_draw.text((120, 80), time.strftime('%H:%M:%S'), font = font24, fill = 0)
epd.display(epd.getbuffer(time_image))
num = num + 1
if(num == 10):
break
logging.info("Clear...")
epd.init(epd.lut_full_update)
epd.Clear(0xFF)
logging.info("Goto Sleep...")
epd.sleep()
except IOError as e:
logging.info(e)
except KeyboardInterrupt:
logging.info("ctrl + c:")
epd2in13.epdconfig.module_exit()
exit()
| 32.106383 | 90 | 0.611995 |
import sys
import os
picdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'pic')
libdir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'lib')
if os.path.exists(libdir):
sys.path.append(libdir)
import logging
from waveshare_epd import epd2in13
import time
from PIL import Image,ImageDraw,ImageFont
import traceback
logging.basicConfig(level=logging.DEBUG)
try:
logging.info("epd2in13 Demo")
epd = epd2in13.EPD()
logging.info("init and Clear")
epd.init(epd.lut_full_update)
epd.Clear(0xFF)
font15 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 15)
font24 = ImageFont.truetype(os.path.join(picdir, 'Font.ttc'), 24)
logging.info("1.Drawing on the image...")
image = Image.new('1', (epd.height, epd.width), 255)
draw = ImageDraw.Draw(image)
draw.rectangle([(0,0),(50,50)],outline = 0)
draw.rectangle([(55,0),(100,50)],fill = 0)
draw.line([(0,0),(50,50)], fill = 0,width = 1)
draw.line([(0,50),(50,0)], fill = 0,width = 1)
draw.chord((10, 60, 50, 100), 0, 360, fill = 0)
draw.ellipse((55, 60, 95, 100), outline = 0)
draw.pieslice((55, 60, 95, 100), 90, 180, outline = 0)
draw.pieslice((55, 60, 95, 100), 270, 360, fill = 0)
draw.polygon([(110,0),(110,50),(150,25)],outline = 0)
draw.polygon([(190,0),(190,50),(150,25)],fill = 0)
draw.text((120, 60), 'e-Paper demo', font = font15, fill = 0)
draw.text((110, 90), u'微雪电子', font = font24, fill = 0)
epd.display(epd.getbuffer(image))
time.sleep(2)
logging.info("2.read bmp file...")
image = Image.open(os.path.join(picdir, '2in13.bmp'))
epd.display(epd.getbuffer(image))
time.sleep(2)
logging.info("3.read bmp file on window...")
image1 = Image.new('1', (epd.height, epd.width), 255)
bmp = Image.open(os.path.join(picdir, '100x100.bmp'))
image1.paste(bmp, (2,2))
epd.display(epd.getbuffer(image1))
time.sleep(2)
o("4.show time...")
epd.init(epd.lut_partial_update)
epd.Clear(0xFF)
time_image = Image.new('1', (epd.height, epd.width), 255)
time_draw = ImageDraw.Draw(time_image)
num = 0
while (True):
time_draw.rectangle((120, 80, 220, 105), fill = 255)
time_draw.text((120, 80), time.strftime('%H:%M:%S'), font = font24, fill = 0)
epd.display(epd.getbuffer(time_image))
num = num + 1
if(num == 10):
break
logging.info("Clear...")
epd.init(epd.lut_full_update)
epd.Clear(0xFF)
logging.info("Goto Sleep...")
epd.sleep()
except IOError as e:
logging.info(e)
except KeyboardInterrupt:
logging.info("ctrl + c:")
epd2in13.epdconfig.module_exit()
exit()
| true | true |
f72cf7000c24d4d32e896071d41604079d19da89 | 3,666 | py | Python | mpsci/distributions/binomial.py | WarrenWeckesser/mpsci | 675f0f3b76700529558a3bae2a1b2ca09552233b | [
"BSD-2-Clause"
] | 7 | 2019-03-27T17:25:41.000Z | 2022-03-31T03:55:29.000Z | mpsci/distributions/binomial.py | WarrenWeckesser/mpsci | 675f0f3b76700529558a3bae2a1b2ca09552233b | [
"BSD-2-Clause"
] | 2 | 2019-05-09T16:09:45.000Z | 2021-01-04T03:55:09.000Z | mpsci/distributions/binomial.py | WarrenWeckesser/mpsci | 675f0f3b76700529558a3bae2a1b2ca09552233b | [
"BSD-2-Clause"
] | null | null | null | """
Binomial distribution
---------------------
"""
import mpmath
from ..fun import logbinomial
__all__ = ['pmf', 'logpmf', 'cdf', 'sf', 'mean', 'var']
def _validate_np(n, p):
if p < 0 or p > 1:
raise ValueError('p must be in the range [0, 1]')
if n < 0:
raise ValueError('n must be a nonnegative integer.')
return
def pmf(k, n, p):
"""
Probability mass function of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
p = mpmath.mpf(p)
return (mpmath.binomial(n, k) *
mpmath.power(p, k) *
mpmath.power(1 - p, n - k))
def logpmf(k, n, p):
"""
Natural log of the probability mass function of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
return (logbinomial(n, k)
+ k*mpmath.log(p)
+ mpmath.fsum([n, -k])*mpmath.log1p(-p))
def cdf(k, n, p, method='incbeta'):
"""
Cumulative distribution function of the binomial distribution.
`method` must be either "sumpmf" or "incbeta". When `method` is "sumpmf",
the CDF is computed with a simple sum of the PMF values. When `method`
is "incbeta", the incomplete beta function is used. This method is
generally faster than the "sumpmf" method, but for large values of k
or n, the incomplete beta function of mpmath might fail.
"""
_validate_np(n, p)
if method not in ['sumpmf', 'incbeta']:
raise ValueError('method must be "sum" or "incbeta"')
if method == 'incbeta':
with mpmath.extradps(5):
p = mpmath.mpf(p)
# XXX For large values of k and/or n, betainc fails. The failure
# occurs in one of the hypergeometric functions.
return mpmath.betainc(n - k, k + 1, x1=0, x2=1 - p,
regularized=True)
else:
# method is "sumpmf"
with mpmath.extradps(5):
c = mpmath.fsum([mpmath.exp(logpmf(t, n, p))
for t in range(k + 1)])
return c
def sf(k, n, p, method='incbeta'):
"""
Survival function of the binomial distribution.
`method` must be either "sumpmf" or "incbeta". When `method` is "sumpmf",
the survival function is computed with a simple sum of the PMF values.
When `method` is "incbeta", the incomplete beta function is used. This
method is generally faster than the "sumpmf" method, but for large values
of k or n, the incomplete beta function of mpmath might fail.
"""
_validate_np(n, p)
if method not in ['sumpmf', 'incbeta']:
raise ValueError('method must be "sum" or "incbeta"')
if method == 'incbeta':
with mpmath.extradps(5):
p = mpmath.mpf(p)
# XXX For large values of k and/or n, betainc fails. The failure
# occurs in one of the hypergeometric functions.
return mpmath.betainc(n - k, k + 1, x1=1-p, x2=1,
regularized=True)
else:
# method is "sumpmf"
with mpmath.extradps(5):
c = mpmath.fsum([mpmath.exp(logpmf(t, n, p))
for t in range(k + 1, n + 1)])
return c
def mean(n, p):
"""
Mean of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
n = mpmath.mpf(n)
p = mpmath.mpf(p)
return n*p
def var(n, p):
"""
Variance of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
n = mpmath.mpf(n)
p = mpmath.mpf(p)
return n * p * (1 - p)
| 30.297521 | 78 | 0.563011 |
import mpmath
from ..fun import logbinomial
__all__ = ['pmf', 'logpmf', 'cdf', 'sf', 'mean', 'var']
def _validate_np(n, p):
if p < 0 or p > 1:
raise ValueError('p must be in the range [0, 1]')
if n < 0:
raise ValueError('n must be a nonnegative integer.')
return
def pmf(k, n, p):
_validate_np(n, p)
with mpmath.extradps(5):
p = mpmath.mpf(p)
return (mpmath.binomial(n, k) *
mpmath.power(p, k) *
mpmath.power(1 - p, n - k))
def logpmf(k, n, p):
_validate_np(n, p)
with mpmath.extradps(5):
return (logbinomial(n, k)
+ k*mpmath.log(p)
+ mpmath.fsum([n, -k])*mpmath.log1p(-p))
def cdf(k, n, p, method='incbeta'):
_validate_np(n, p)
if method not in ['sumpmf', 'incbeta']:
raise ValueError('method must be "sum" or "incbeta"')
if method == 'incbeta':
with mpmath.extradps(5):
p = mpmath.mpf(p)
return mpmath.betainc(n - k, k + 1, x1=0, x2=1 - p,
regularized=True)
else:
with mpmath.extradps(5):
c = mpmath.fsum([mpmath.exp(logpmf(t, n, p))
for t in range(k + 1)])
return c
def sf(k, n, p, method='incbeta'):
_validate_np(n, p)
if method not in ['sumpmf', 'incbeta']:
raise ValueError('method must be "sum" or "incbeta"')
if method == 'incbeta':
with mpmath.extradps(5):
p = mpmath.mpf(p)
return mpmath.betainc(n - k, k + 1, x1=1-p, x2=1,
regularized=True)
else:
with mpmath.extradps(5):
c = mpmath.fsum([mpmath.exp(logpmf(t, n, p))
for t in range(k + 1, n + 1)])
return c
def mean(n, p):
_validate_np(n, p)
with mpmath.extradps(5):
n = mpmath.mpf(n)
p = mpmath.mpf(p)
return n*p
def var(n, p):
_validate_np(n, p)
with mpmath.extradps(5):
n = mpmath.mpf(n)
p = mpmath.mpf(p)
return n * p * (1 - p)
| true | true |
f72cf726678d2cd74a55d44450a33b0c8d9c834f | 792 | py | Python | pytezos/rpc/errors.py | arvidj/pytezos | a8545d9408f086eba91b4af7e12c488672144ff6 | [
"MIT"
] | 1 | 2020-08-11T02:31:24.000Z | 2020-08-11T02:31:24.000Z | pytezos/rpc/errors.py | arvidj/pytezos | a8545d9408f086eba91b4af7e12c488672144ff6 | [
"MIT"
] | null | null | null | pytezos/rpc/errors.py | arvidj/pytezos | a8545d9408f086eba91b4af7e12c488672144ff6 | [
"MIT"
] | 1 | 2022-03-20T19:01:00.000Z | 2022-03-20T19:01:00.000Z | from pytezos.rpc.node import RpcError
class MichelsonBadContractParameter(RpcError, error_id='michelson_v1.bad_contract_parameter'):
""" Either no parameter was supplied to a contract with a non-unit parameter type, a non-unit parameter was passed
to an account, or a parameter was supplied of the wrong type
"""
class MichelsonBadReturn(RpcError, error_id='michelson_v1.bad_return'):
""" Unexpected stack at the end of a lambda or script
"""
class MichelsonRuntimeError(RpcError, error_id='michelson_v1'):
""" Catch all michelson_v1 errors
"""
class TezArithmeticError(RpcError, error_id='tez'):
""" Catch all tez errors
"""
class MichelsonScriptRejected(RpcError, error_id='script_rejected'):
""" A FAILWITH instruction was reached
"""
| 28.285714 | 118 | 0.736111 | from pytezos.rpc.node import RpcError
class MichelsonBadContractParameter(RpcError, error_id='michelson_v1.bad_contract_parameter'):
class MichelsonBadReturn(RpcError, error_id='michelson_v1.bad_return'):
class MichelsonRuntimeError(RpcError, error_id='michelson_v1'):
class TezArithmeticError(RpcError, error_id='tez'):
class MichelsonScriptRejected(RpcError, error_id='script_rejected'):
| true | true |
f72cf8174f5a2872015dadbef99938a1a8e72272 | 1,116 | py | Python | thingsboard_gateway/storage/event_storage_reader_pointer.py | xinge-ok/thingsboard-gateway | 6dab6030a6becf0ce9d34bc95a3a1f1e0838cb14 | [
"Apache-2.0"
] | 1 | 2020-02-24T09:08:35.000Z | 2020-02-24T09:08:35.000Z | thingsboard_gateway/storage/event_storage_reader_pointer.py | xinge-ok/thingsboard-gateway | 6dab6030a6becf0ce9d34bc95a3a1f1e0838cb14 | [
"Apache-2.0"
] | null | null | null | thingsboard_gateway/storage/event_storage_reader_pointer.py | xinge-ok/thingsboard-gateway | 6dab6030a6becf0ce9d34bc95a3a1f1e0838cb14 | [
"Apache-2.0"
] | null | null | null |
# Copyright 2019. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class EventStorageReaderPointer:
def __init__(self, file, line):
self.file = file
self.line = line
def __eq__(self, other):
return self.file == other.file and self.line == other.line
def __hash__(self):
return hash((self.file, self.line))
def get_file(self):
return self.file
def get_line(self):
return self.line
def set_file(self, file):
self.file = file
def set_line(self, line):
self.line = line
| 28.615385 | 78 | 0.662186 |
class EventStorageReaderPointer:
def __init__(self, file, line):
self.file = file
self.line = line
def __eq__(self, other):
return self.file == other.file and self.line == other.line
def __hash__(self):
return hash((self.file, self.line))
def get_file(self):
return self.file
def get_line(self):
return self.line
def set_file(self, file):
self.file = file
def set_line(self, line):
self.line = line
| true | true |
f72cf8cdaafe9d7009fc1fbe038716d9fe13a281 | 81,150 | py | Python | pandas/tests/frame/test_analytics.py | dhimmel/pandas | 776fed3ab63d74ddef6e5af1a702b10c2a30bbb6 | [
"BSD-3-Clause"
] | 1 | 2018-08-01T05:39:55.000Z | 2018-08-01T05:39:55.000Z | pandas/tests/frame/test_analytics.py | dhimmel/pandas | 776fed3ab63d74ddef6e5af1a702b10c2a30bbb6 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/frame/test_analytics.py | dhimmel/pandas | 776fed3ab63d74ddef6e5af1a702b10c2a30bbb6 | [
"BSD-3-Clause"
] | 1 | 2019-05-26T12:57:52.000Z | 2019-05-26T12:57:52.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import warnings
from datetime import timedelta
import operator
import pytest
from string import ascii_lowercase
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas.compat import lrange, PY35
from pandas import (compat, isna, notna, DataFrame, Series,
MultiIndex, date_range, Timestamp, Categorical,
_np_version_under1p12,
to_datetime, to_timedelta)
import pandas as pd
import pandas.core.nanops as nanops
import pandas.core.algorithms as algorithms
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tests.frame.common import TestData
class TestDataFrameAnalytics(TestData):
# ---------------------------------------------------------------------=
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.loc['A', 'B'] = expected.loc['B', 'A'] = nan
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
# exclude non-numeric types
result = self.mixed_frame.corr()
expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
# RuntimeWarning
with warnings.catch_warnings(record=True):
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_cov(self):
# min_periods no NAs (corner case)
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
tm.assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
assert isna(result.values).all()
# with NAs
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
tm.assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
# exclude non-numeric types
result = self.mixed_frame.cov()
expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
tm.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
res = df.describe()
tm.assert_frame_equal(res, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(res) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
res = df.describe(include='all')
tm.assert_frame_equal(res, expected)
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notna(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH #423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_nunique(self):
f = lambda s: len(algorithms.unique1d(s.dropna()))
self._check_stat_op('nunique', f, has_skipna=False,
check_dtype=False, check_dates=True)
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True,
skipna_alternative=np.nansum)
# mixed types (with upcasting happening)
self._check_stat_op('sum', np.sum,
frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False,
check_less_precise=True)
@pytest.mark.parametrize(
"method", ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH #676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
with warnings.catch_warnings(record=True):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = self.tsframe.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(self.tsframe)
def test_cummax(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = self.tsframe.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(self.tsframe)
def test_max(self):
with warnings.catch_warnings(record=True):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH #9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
pytest.raises(TypeError, lambda: getattr(df1, meth)(
axis=1, numeric_only=False))
pytest.raises(TypeError, lambda: getattr(df2, meth)(
axis=1, numeric_only=False))
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH 16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_cumsum(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = self.tsframe.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(self.tsframe)
def test_cumprod(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = self.tsframe.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(self.tsframe)
# ints
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_skew(self):
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
@td.skip_if_no_scipy
def test_kurt(self):
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True,
check_dates=False, check_less_precise=False,
skipna_alternative=None):
if frame is None:
frame = self.frame
# set some NAs
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if name in ['sum', 'prod']:
exp = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, exp, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)
# make sure works on mixed-type frame
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name in ['sum', 'prod']:
unit = int(name == 'prod')
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not compat.PY3, reason="only PY3")
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
from datetime import timedelta
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
from pandas.core.tools.timedeltas import (
_coerce_scalar_to_timedelta_type as _coerce)
result = mixed.min()
expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)),
_coerce(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(self.mixed_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
assert means['bool'] == self.frame['bool'].values.mean()
def test_stats_mixed_type(self):
# don't blow up
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
# Miscellanea
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# Index of max / min
def test_idxmin(self):
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmax, axis=2)
# ----------------------------------------------------------------------
# Logical reductions
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
# skip pathological failure cases
# class CantNonzero(object):
# def __nonzero__(self):
# raise ValueError
# df[4] = CantNonzero()
# it works!
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
# df[4][4] = np.nan
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH-21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# https://github.com/pandas-dev/pandas/issues/19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# https://github.com/pandas-dev/pandas/issues/19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with tm.assert_raises_regex(ValueError, xpr):
getattr(df, method)(axis=None, level='out')
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
pytest.raises(ValueError, f, axis=2)
# make sure works on mixed-type frame
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail(object):
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH #4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH 15473
df1_ts = DataFrame({'date':
pd.to_datetime(['2014-01-01', '2014-01-02'])})
df1_td = DataFrame({'date':
[pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
df2 = DataFrame({'date': []})
df3 = DataFrame()
expected = DataFrame({'date': [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],
'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with pytest.raises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(df.round(partial_round_dict),
expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(df.round(wrong_round_dict),
expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
tm.assert_frame_equal(big_df.round(negative_round_dict),
expected_neg_rounded)
# nan in Series round
nan_round_Series = Series({'col1': nan, 'col2': 1})
# TODO(wesm): unused?
expected_nan_round = DataFrame({ # noqa
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
with pytest.raises(TypeError):
df.round(nan_round_Series)
# Make sure this doesn't break existing Series.round
tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
tm.assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
tm.assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_numpy_round(self):
# See gh-12600
df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
out = np.round(df, decimals=0)
expected = DataFrame([[2., 1.], [0., 7.]])
tm.assert_frame_equal(out, expected)
msg = "the 'out' parameter is not supported"
with tm.assert_raises_regex(ValueError, msg):
np.round(df, decimals=0, out=df)
def test_round_mixed_type(self):
# GH11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
round_0 = DataFrame({'col1': [1., 2., 3., 4.],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
tm.assert_frame_equal(df.round(), round_0)
tm.assert_frame_equal(df.round(1), df)
tm.assert_frame_equal(df.round({'col1': 1}), df)
tm.assert_frame_equal(df.round({'col1': 0}), round_0)
tm.assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)
tm.assert_frame_equal(df.round({'col3': 1}), df)
def test_round_issue(self):
# GH11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
tm.assert_index_equal(rounded.index, dfs.index)
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
pytest.raises(ValueError, df.round, decimals)
def test_built_in_round(self):
if not compat.PY3:
pytest.skip("build in round cannot be overridden "
"prior to Python 3")
# GH11763
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(round(df), expected_rounded)
def test_pct_change(self):
# GH 11150
pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(
0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# Clip
def test_clip(self):
median = self.frame.median().median()
original = self.frame.copy()
capped = self.frame.clip_upper(median)
assert not (capped.values > median).any()
floored = self.frame.clip_lower(median)
assert not (floored.values < median).any()
double = self.frame.clip(upper=median, lower=median)
assert not (double.values != median).any()
# Verify that self.frame was not changed inplace
assert (self.frame.values == original.values).all()
def test_inplace_clip(self):
# GH #15388
median = self.frame.median().median()
frame_copy = self.frame.copy()
frame_copy.clip_upper(median, inplace=True)
assert not (frame_copy.values > median).any()
frame_copy = self.frame.copy()
frame_copy.clip_lower(median, inplace=True)
assert not (frame_copy.values < median).any()
frame_copy = self.frame.copy()
frame_copy.clip(upper=median, lower=median, inplace=True)
assert not (frame_copy.values != median).any()
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
assert (clipped_df.values[lb_mask] == lb).all()
assert (clipped_df.values[ub_mask] == ub).all()
assert (clipped_df.values[mask] == df.values[mask]).all()
def test_clip_mixed_numeric(self):
# TODO(jreback)
# clip on mixed integer or floats
# with integer clippers coerces to float
df = DataFrame({'A': [1, 2, 3],
'B': [1., np.nan, 3.]})
result = df.clip(1, 2)
expected = DataFrame({'A': [1, 2, 2.],
'B': [1., np.nan, 2.]})
tm.assert_frame_equal(result, expected, check_like=True)
@pytest.mark.parametrize("inplace", [True, False])
def test_clip_against_series(self, inplace):
# GH #6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
original = df.copy()
clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)
if inplace:
clipped_df = df
for i in range(2):
lb_mask = original.iloc[:, i] <= lb
ub_mask = original.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
tm.assert_series_equal(result, lb[lb_mask], check_names=False)
assert result.name == i
result = clipped_df.loc[ub_mask, i]
tm.assert_series_equal(result, ub[ub_mask], check_names=False)
assert result.name == i
tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("lower", [[2, 3, 4], np.asarray([2, 3, 4])])
@pytest.mark.parametrize("axis,res", [
(0, [[2., 2., 3.], [4., 5., 6.], [7., 7., 7.]]),
(1, [[2., 3., 4.], [4., 5., 6.], [5., 6., 7.]])
])
def test_clip_against_list_like(self, inplace, lower, axis, res):
# GH #15390
original = self.simple.copy(deep=True)
result = original.clip(lower=lower, upper=[5, 6, 7],
axis=axis, inplace=inplace)
expected = pd.DataFrame(res,
columns=original.columns,
index=original.index)
if inplace:
result = original
tm.assert_frame_equal(result, expected, check_exact=True)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_clip_against_frame(self, axis):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=axis)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
tm.assert_frame_equal(clipped_df[mask], df[mask])
def test_clip_with_na_args(self):
"""Should process np.nan argument as None """
# GH # 17276
tm.assert_frame_equal(self.frame.clip(np.nan), self.frame)
tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan),
self.frame)
# GH #19992
df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6],
'col_2': [7, 8, 9]})
result = df.clip(lower=[4, 5, np.nan], axis=0)
expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan],
'col_2': [7, 8, np.nan]})
tm.assert_frame_equal(result, expected)
result = df.clip(lower=[4, 5, np.nan], axis=1)
expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6],
'col_2': [np.nan, np.nan, np.nan]})
tm.assert_frame_equal(result, expected)
# Matrix-like
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
tm.assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
result = a.dot(b1['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
# can pass correct-length arrays
row = a.iloc[0].values
result = a.dot(row)
exp = a.dot(a.iloc[0])
tm.assert_series_equal(result, exp)
with tm.assert_raises_regex(ValueError,
'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
# TODO(wesm): unused
B = DataFrame(b) # noqa
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
df.dot(df2)
@pytest.mark.skipif(not PY35,
reason='matmul supported for Python>=3.5')
@pytest.mark.xfail(
_np_version_under1p12,
reason="unpredictable return types under numpy < 1.12")
def test_matmul(self):
# matmul test is for GH #10259
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
# DataFrame @ DataFrame
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(a, b.one)
expected = Series(np.dot(a.values, b.one.values),
index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
# np.array @ DataFrame
result = operator.matmul(a.values, b)
expected = np.dot(a.values, b.values)
tm.assert_almost_equal(result, expected)
# nested list @ DataFrame (__rmatmul__)
result = operator.matmul(a.values.tolist(), b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_almost_equal(result.values, expected.values)
# mixed dtype DataFrame @ DataFrame
a['q'] = a.q.round().astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# different dtypes DataFrame @ DataFrame
a = a.astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
operator.matmul(df, df2)
@pytest.fixture
def df_duplicates():
return pd.DataFrame({'a': [1, 2, 3, 4, 4],
'b': [1, 1, 1, 1, 1],
'c': [0, 1, 2, 5, 4]},
index=[0, 0, 1, 1, 1])
@pytest.fixture
def df_strings():
return pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
@pytest.fixture
def df_main_dtypes():
return pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
class TestNLargestNSmallest(object):
dtype_error_msg_template = ("Column {column!r} has dtype {dtype}, cannot "
"use method {method!r} with this dtype")
# ----------------------------------------------------------------------
# Top / bottom
@pytest.mark.parametrize('order', [
['a'],
['c'],
['a', 'b'],
['a', 'c'],
['b', 'a'],
['b', 'c'],
['a', 'b', 'c'],
['c', 'a', 'b'],
['c', 'b', 'a'],
['b', 'c', 'a'],
['b', 'a', 'c'],
# dups!
['b', 'c', 'c']])
@pytest.mark.parametrize('n', range(1, 11))
def test_n(self, df_strings, nselect_method, n, order):
# GH10393
df = df_strings
if 'b' in order:
error_msg = self.dtype_error_msg_template.format(
column='b', method=nselect_method, dtype='object')
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, nselect_method)(n, order)
else:
ascending = nselect_method == 'nsmallest'
result = getattr(df, nselect_method)(n, order)
expected = df.sort_values(order, ascending=ascending).head(n)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('columns', [
('group', 'category_string'), ('group', 'string')])
def test_n_error(self, df_main_dtypes, nselect_method, columns):
df = df_main_dtypes
col = columns[1]
error_msg = self.dtype_error_msg_template.format(
column=col, method=nselect_method, dtype=df[col].dtype)
# escape some characters that may be in the repr
error_msg = (error_msg.replace('(', '\\(').replace(")", "\\)")
.replace("[", "\\[").replace("]", "\\]"))
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, nselect_method)(2, columns)
def test_n_all_dtypes(self, df_main_dtypes):
df = df_main_dtypes
df.nsmallest(2, list(set(df) - {'category_string', 'string'}))
df.nlargest(2, list(set(df) - {'category_string', 'string'}))
def test_n_identical_values(self):
# GH15297
df = pd.DataFrame({'a': [1] * 5, 'b': [1, 2, 3, 4, 5]})
result = df.nlargest(3, 'a')
expected = pd.DataFrame(
{'a': [1] * 3, 'b': [1, 2, 3]}, index=[0, 1, 2]
)
tm.assert_frame_equal(result, expected)
result = df.nsmallest(3, 'a')
expected = pd.DataFrame({'a': [1] * 3, 'b': [1, 2, 3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('order', [
['a', 'b', 'c'],
['c', 'b', 'a'],
['a'],
['b'],
['a', 'b'],
['c', 'b']])
@pytest.mark.parametrize('n', range(1, 6))
def test_n_duplicate_index(self, df_duplicates, n, order):
# GH 13412
df = df_duplicates
result = df.nsmallest(n, order)
expected = df.sort_values(order).head(n)
tm.assert_frame_equal(result, expected)
result = df.nlargest(n, order)
expected = df.sort_values(order, ascending=False).head(n)
tm.assert_frame_equal(result, expected)
def test_duplicate_keep_all_ties(self):
# see gh-16818
df = pd.DataFrame({'a': [5, 4, 4, 2, 3, 3, 3, 3],
'b': [10, 9, 8, 7, 5, 50, 10, 20]})
result = df.nlargest(4, 'a', keep='all')
expected = pd.DataFrame({'a': {0: 5, 1: 4, 2: 4, 4: 3,
5: 3, 6: 3, 7: 3},
'b': {0: 10, 1: 9, 2: 8, 4: 5,
5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
result = df.nsmallest(2, 'a', keep='all')
expected = pd.DataFrame({'a': {3: 2, 4: 3, 5: 3, 6: 3, 7: 3},
'b': {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({'A': [np.nan, 2.0, np.nan]})
s = Series([1, 1, 1])
s_nan = Series([np.nan, np.nan, 1])
with tm.assert_produces_warning(None):
df_nan.clip_lower(s, axis=0)
for op in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:
getattr(df, op)(s_nan, axis=0)
def test_series_nat_conversion(self):
# GH 18521
# Check rank does not mutate DataFrame
df = DataFrame(np.random.randn(10, 3), dtype='float64')
expected = df.copy()
df.rank()
result = df
tm.assert_frame_equal(result, expected)
| 37.656613 | 79 | 0.509698 |
from __future__ import print_function
import warnings
from datetime import timedelta
import operator
import pytest
from string import ascii_lowercase
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas.compat import lrange, PY35
from pandas import (compat, isna, notna, DataFrame, Series,
MultiIndex, date_range, Timestamp, Categorical,
_np_version_under1p12,
to_datetime, to_timedelta)
import pandas as pd
import pandas.core.nanops as nanops
import pandas.core.algorithms as algorithms
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tests.frame.common import TestData
class TestDataFrameAnalytics(TestData):
@td.skip_if_no_scipy
def test_corr_pearson(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.loc['A', 'B'] = expected.loc['B', 'A'] = nan
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
result = self.mixed_frame.corr()
expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_cov(self):
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
tm.assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
assert isna(result.values).all()
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
tm.assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
result = self.mixed_frame.cov()
expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe.add(noise, axis=0)
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
result = cat.describe()
assert len(result.columns) == 1
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
tm.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_describe_categorical_columns(self):
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
res = df.describe()
tm.assert_frame_equal(res, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(res) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
res = df.describe(include='all')
tm.assert_frame_equal(res, expected)
def test_reduce_mixed_frame(self):
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notna(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_nunique(self):
f = lambda s: len(algorithms.unique1d(s.dropna()))
self._check_stat_op('nunique', f, has_skipna=False,
check_dtype=False, check_dates=True)
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True,
skipna_alternative=np.nansum)
self._check_stat_op('sum', np.sum,
frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False,
check_less_precise=True)
@pytest.mark.parametrize(
"method", ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
with warnings.catch_warnings(record=True):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin()
cummin_xs = self.tsframe.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(self.tsframe)
def test_cummax(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax()
cummax_xs = self.tsframe.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(self.tsframe)
def test_max(self):
with warnings.catch_warnings(record=True):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
pytest.raises(TypeError, lambda: getattr(df1, meth)(
axis=1, numeric_only=False))
pytest.raises(TypeError, lambda: getattr(df2, meth)(
axis=1, numeric_only=False))
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_cumsum(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum()
cumsum_xs = self.tsframe.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(self.tsframe)
def test_cumprod(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
cumprod_xs = self.tsframe.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(self.tsframe)
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_skew(self):
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
@td.skip_if_no_scipy
def test_kurt(self):
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True,
check_dates=False, check_less_precise=False,
skipna_alternative=None):
if frame is None:
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if name in ['sum', 'prod']:
exp = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, exp, check_dtype=False,
check_less_precise=check_less_precise)
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name in ['sum', 'prod']:
unit = int(name == 'prod')
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not compat.PY3, reason="only PY3")
def test_mode_sortwarning(self):
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
from datetime import timedelta
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
from pandas.core.tools.timedeltas import (
_coerce_scalar_to_timedelta_type as _coerce)
result = mixed.min()
expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)),
_coerce(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(self.mixed_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
assert means['bool'] == self.frame['bool'].values.mean()
def test_stats_mixed_type(self):
# don't blow up
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
result = dm.cumsum()
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
def test_idxmin(self):
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmax, axis=2)
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
])
def test_any_all_np_func(self, func, data, expected):
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with tm.assert_raises_regex(ValueError, xpr):
getattr(df, method)(axis=None, level='out')
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
pytest.raises(ValueError, f, axis=2)
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail(object):
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def test_isin(self):
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
df1_ts = DataFrame({'date':
pd.to_datetime(['2014-01-01', '2014-01-02'])})
df1_td = DataFrame({'date':
[pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
df2 = DataFrame({'date': []})
df3 = DataFrame()
expected = DataFrame({'date': [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
def test_round(self):
df = DataFrame()
tm.assert_frame_equal(df, df.round())
df = DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
decimals = 2
expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],
'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
round_list = [1, 2]
with pytest.raises(TypeError):
df.round(round_list)
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(df.round(partial_round_dict),
expected_partially_rounded)
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(df.round(wrong_round_dict),
expected_partially_rounded)
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
tm.assert_frame_equal(big_df.round(negative_round_dict),
expected_neg_rounded)
nan_round_Series = Series({'col1': nan, 'col2': 1})
expected_nan_round = DataFrame({
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
with pytest.raises(TypeError):
df.round(nan_round_Series)
tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
tm.assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
tm.assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_numpy_round(self):
# See gh-12600
df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
out = np.round(df, decimals=0)
expected = DataFrame([[2., 1.], [0., 7.]])
tm.assert_frame_equal(out, expected)
msg = "the 'out' parameter is not supported"
with tm.assert_raises_regex(ValueError, msg):
np.round(df, decimals=0, out=df)
def test_round_mixed_type(self):
# GH11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
round_0 = DataFrame({'col1': [1., 2., 3., 4.],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
tm.assert_frame_equal(df.round(), round_0)
tm.assert_frame_equal(df.round(1), df)
tm.assert_frame_equal(df.round({'col1': 1}), df)
tm.assert_frame_equal(df.round({'col1': 0}), round_0)
tm.assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)
tm.assert_frame_equal(df.round({'col3': 1}), df)
def test_round_issue(self):
# GH11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
tm.assert_index_equal(rounded.index, dfs.index)
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
pytest.raises(ValueError, df.round, decimals)
def test_built_in_round(self):
if not compat.PY3:
pytest.skip("build in round cannot be overridden "
"prior to Python 3")
# GH11763
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(round(df), expected_rounded)
def test_pct_change(self):
# GH 11150
pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(
0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# Clip
def test_clip(self):
median = self.frame.median().median()
original = self.frame.copy()
capped = self.frame.clip_upper(median)
assert not (capped.values > median).any()
floored = self.frame.clip_lower(median)
assert not (floored.values < median).any()
double = self.frame.clip(upper=median, lower=median)
assert not (double.values != median).any()
# Verify that self.frame was not changed inplace
assert (self.frame.values == original.values).all()
def test_inplace_clip(self):
# GH #15388
median = self.frame.median().median()
frame_copy = self.frame.copy()
frame_copy.clip_upper(median, inplace=True)
assert not (frame_copy.values > median).any()
frame_copy = self.frame.copy()
frame_copy.clip_lower(median, inplace=True)
assert not (frame_copy.values < median).any()
frame_copy = self.frame.copy()
frame_copy.clip(upper=median, lower=median, inplace=True)
assert not (frame_copy.values != median).any()
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
assert (clipped_df.values[lb_mask] == lb).all()
assert (clipped_df.values[ub_mask] == ub).all()
assert (clipped_df.values[mask] == df.values[mask]).all()
def test_clip_mixed_numeric(self):
# TODO(jreback)
# clip on mixed integer or floats
# with integer clippers coerces to float
df = DataFrame({'A': [1, 2, 3],
'B': [1., np.nan, 3.]})
result = df.clip(1, 2)
expected = DataFrame({'A': [1, 2, 2.],
'B': [1., np.nan, 2.]})
tm.assert_frame_equal(result, expected, check_like=True)
@pytest.mark.parametrize("inplace", [True, False])
def test_clip_against_series(self, inplace):
# GH #6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
original = df.copy()
clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)
if inplace:
clipped_df = df
for i in range(2):
lb_mask = original.iloc[:, i] <= lb
ub_mask = original.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
tm.assert_series_equal(result, lb[lb_mask], check_names=False)
assert result.name == i
result = clipped_df.loc[ub_mask, i]
tm.assert_series_equal(result, ub[ub_mask], check_names=False)
assert result.name == i
tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("lower", [[2, 3, 4], np.asarray([2, 3, 4])])
@pytest.mark.parametrize("axis,res", [
(0, [[2., 2., 3.], [4., 5., 6.], [7., 7., 7.]]),
(1, [[2., 3., 4.], [4., 5., 6.], [5., 6., 7.]])
])
def test_clip_against_list_like(self, inplace, lower, axis, res):
# GH #15390
original = self.simple.copy(deep=True)
result = original.clip(lower=lower, upper=[5, 6, 7],
axis=axis, inplace=inplace)
expected = pd.DataFrame(res,
columns=original.columns,
index=original.index)
if inplace:
result = original
tm.assert_frame_equal(result, expected, check_exact=True)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_clip_against_frame(self, axis):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=axis)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
tm.assert_frame_equal(clipped_df[mask], df[mask])
def test_clip_with_na_args(self):
# GH # 17276
tm.assert_frame_equal(self.frame.clip(np.nan), self.frame)
tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan),
self.frame)
# GH #19992
df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6],
'col_2': [7, 8, 9]})
result = df.clip(lower=[4, 5, np.nan], axis=0)
expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan],
'col_2': [7, 8, np.nan]})
tm.assert_frame_equal(result, expected)
result = df.clip(lower=[4, 5, np.nan], axis=1)
expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6],
'col_2': [np.nan, np.nan, np.nan]})
tm.assert_frame_equal(result, expected)
# Matrix-like
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
tm.assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
result = a.dot(b1['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
# can pass correct-length arrays
row = a.iloc[0].values
result = a.dot(row)
exp = a.dot(a.iloc[0])
tm.assert_series_equal(result, exp)
with tm.assert_raises_regex(ValueError,
'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
# TODO(wesm): unused
B = DataFrame(b) # noqa
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
df.dot(df2)
@pytest.mark.skipif(not PY35,
reason='matmul supported for Python>=3.5')
@pytest.mark.xfail(
_np_version_under1p12,
reason="unpredictable return types under numpy < 1.12")
def test_matmul(self):
# matmul test is for GH #10259
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
# DataFrame @ DataFrame
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(a, b.one)
expected = Series(np.dot(a.values, b.one.values),
index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
# np.array @ DataFrame
result = operator.matmul(a.values, b)
expected = np.dot(a.values, b.values)
tm.assert_almost_equal(result, expected)
# nested list @ DataFrame (__rmatmul__)
result = operator.matmul(a.values.tolist(), b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_almost_equal(result.values, expected.values)
# mixed dtype DataFrame @ DataFrame
a['q'] = a.q.round().astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# different dtypes DataFrame @ DataFrame
a = a.astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
operator.matmul(df, df2)
@pytest.fixture
def df_duplicates():
return pd.DataFrame({'a': [1, 2, 3, 4, 4],
'b': [1, 1, 1, 1, 1],
'c': [0, 1, 2, 5, 4]},
index=[0, 0, 1, 1, 1])
@pytest.fixture
def df_strings():
return pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
@pytest.fixture
def df_main_dtypes():
return pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
class TestNLargestNSmallest(object):
dtype_error_msg_template = ("Column {column!r} has dtype {dtype}, cannot "
"use method {method!r} with this dtype")
# ----------------------------------------------------------------------
# Top / bottom
@pytest.mark.parametrize('order', [
['a'],
['c'],
['a', 'b'],
['a', 'c'],
['b', 'a'],
['b', 'c'],
['a', 'b', 'c'],
['c', 'a', 'b'],
['c', 'b', 'a'],
['b', 'c', 'a'],
['b', 'a', 'c'],
# dups!
['b', 'c', 'c']])
@pytest.mark.parametrize('n', range(1, 11))
def test_n(self, df_strings, nselect_method, n, order):
# GH10393
df = df_strings
if 'b' in order:
error_msg = self.dtype_error_msg_template.format(
column='b', method=nselect_method, dtype='object')
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, nselect_method)(n, order)
else:
ascending = nselect_method == 'nsmallest'
result = getattr(df, nselect_method)(n, order)
expected = df.sort_values(order, ascending=ascending).head(n)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('columns', [
('group', 'category_string'), ('group', 'string')])
def test_n_error(self, df_main_dtypes, nselect_method, columns):
df = df_main_dtypes
col = columns[1]
error_msg = self.dtype_error_msg_template.format(
column=col, method=nselect_method, dtype=df[col].dtype)
# escape some characters that may be in the repr
error_msg = (error_msg.replace('(', '\\(').replace(")", "\\)")
.replace("[", "\\[").replace("]", "\\]"))
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, nselect_method)(2, columns)
def test_n_all_dtypes(self, df_main_dtypes):
df = df_main_dtypes
df.nsmallest(2, list(set(df) - {'category_string', 'string'}))
df.nlargest(2, list(set(df) - {'category_string', 'string'}))
def test_n_identical_values(self):
# GH15297
df = pd.DataFrame({'a': [1] * 5, 'b': [1, 2, 3, 4, 5]})
result = df.nlargest(3, 'a')
expected = pd.DataFrame(
{'a': [1] * 3, 'b': [1, 2, 3]}, index=[0, 1, 2]
)
tm.assert_frame_equal(result, expected)
result = df.nsmallest(3, 'a')
expected = pd.DataFrame({'a': [1] * 3, 'b': [1, 2, 3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('order', [
['a', 'b', 'c'],
['c', 'b', 'a'],
['a'],
['b'],
['a', 'b'],
['c', 'b']])
@pytest.mark.parametrize('n', range(1, 6))
def test_n_duplicate_index(self, df_duplicates, n, order):
# GH 13412
df = df_duplicates
result = df.nsmallest(n, order)
expected = df.sort_values(order).head(n)
tm.assert_frame_equal(result, expected)
result = df.nlargest(n, order)
expected = df.sort_values(order, ascending=False).head(n)
tm.assert_frame_equal(result, expected)
def test_duplicate_keep_all_ties(self):
# see gh-16818
df = pd.DataFrame({'a': [5, 4, 4, 2, 3, 3, 3, 3],
'b': [10, 9, 8, 7, 5, 50, 10, 20]})
result = df.nlargest(4, 'a', keep='all')
expected = pd.DataFrame({'a': {0: 5, 1: 4, 2: 4, 4: 3,
5: 3, 6: 3, 7: 3},
'b': {0: 10, 1: 9, 2: 8, 4: 5,
5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
result = df.nsmallest(2, 'a', keep='all')
expected = pd.DataFrame({'a': {3: 2, 4: 3, 5: 3, 6: 3, 7: 3},
'b': {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({'A': [np.nan, 2.0, np.nan]})
s = Series([1, 1, 1])
s_nan = Series([np.nan, np.nan, 1])
with tm.assert_produces_warning(None):
df_nan.clip_lower(s, axis=0)
for op in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:
getattr(df, op)(s_nan, axis=0)
def test_series_nat_conversion(self):
# GH 18521
# Check rank does not mutate DataFrame
df = DataFrame(np.random.randn(10, 3), dtype='float64')
expected = df.copy()
df.rank()
result = df
tm.assert_frame_equal(result, expected)
| true | true |
f72cf916fbd8ea467321863ca89fb57083e4ec13 | 36,218 | py | Python | pydca/meanfield_dca/meanfield_dca.py | MehariBZ/pydca | 034e0707a13e6e43da1343630047d47caeca896e | [
"MIT"
] | 1 | 2021-03-28T01:57:38.000Z | 2021-03-28T01:57:38.000Z | pydca/meanfield_dca/meanfield_dca.py | MehariBZ/pydca | 034e0707a13e6e43da1343630047d47caeca896e | [
"MIT"
] | null | null | null | pydca/meanfield_dca/meanfield_dca.py | MehariBZ/pydca | 034e0707a13e6e43da1343630047d47caeca896e | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division
from . import msa_numerics
from pydca.fasta_reader import fasta_reader
import logging
import numpy as np
"""This module implements Direc Coupling Analysis (DCA) of residue coevolution
for protein and RNA sequences using the mean-field algorithm. The final
coevolution score is computed from the direct probability. The general steps
carried out are outlined as follows
For a detailed information about Direct Coupling Analysis, one can refer to the
following articles:
a) Identification of direct residue contacts in protein-protein interaction
by message-passing
Martin Weigt, Robert A White, Hendrik Szurmant, James A Hoch, Terence Hwa
Journal: Proceedings of the National Academy of Sciences
Volume: 106
Issue: 1
Pages: 67-72
b) Direct-coupling analysis of residue coevolution captures native contacts
across many protein families
Faruck Morcos, Andrea Pagnani, Bryan Lunt, Arianna Bertolino,
Debora S Marks, Chris Sander, Riccardo Zecchina, Jose N Onuchic,
Terence Hwa, Martin Weigt
Journal: Proceedings of the National Academy of Sciences
Volume: 108
Issue: 49
Pages: E1293-E1301
Author(s) Mehari B. Zerihun, Alexander Schug
"""
logger = logging.getLogger(__name__)
class MeanFieldDCAException(Exception):
"""
"""
class MeanFieldDCA:
"""MeanFieldDCA class. Instances of this class are used to carry out Direct
Coupling Analysis (DCA) of residue coevolution using the mean-field DCA
algorithm.
"""
def __init__(self, msa_file_name, biomolecule, pseudocount=None, seqid=None):
"""MeanFieldDCA object class initializer
Parameters
----------
msa_file : str
Name of the FASTA formatted file containing alignmnet
biomolecule : str
Type of biomolecule (must be protein or RNA, lower or
upper case)
pseudocount : float
Parameter for regularizing data before DCA analysis.
Default value is 0.5
seqid : float
This parameter's value measure the maximum
similarity two or more sequences can have so that they can be
considered distinct, or lumped together otherwise.
Returns
-------
None : None
"""
self.__pseudocount = pseudocount if pseudocount is not None else 0.5
self.__seqid = seqid if seqid is not None else 0.8
#Validate the value of pseudo count incase user provide an invalid one
if self.__pseudocount >= 1.0 or self.__pseudocount < 0:
logger.error('\n\tValue of relative pseudo-count must be'
' between 0 and 1.0. Typical value is 0.5')
raise ValueError
#Validate the value of sequence identity
if self.__seqid > 1.0 or self.__seqid <= 0.0:
logger.error('\n\tValue of sequence-identity must'
' not exceed 1 nor less than 0. Typical values are 0.7, 0.8., 0.9')
raise ValueError
biomolecule = biomolecule.strip().upper()
self.__msa_file_name = msa_file_name
if biomolecule=='RNA':
self.__num_site_states = 5
elif biomolecule=='PROTEIN':
self.__num_site_states = 21
else:
logger.error(
'\n\tUnknown biomolecule ... must be protein (PROTEIN) or rna (RNA)',
)
raise ValueError
self.__sequences = fasta_reader.get_alignment_int_form(
self.__msa_file_name,
biomolecule=biomolecule,
)
self.__num_sequences = len(self.__sequences)
self.__sequences_len = len(self.__sequences[0])
self.__biomolecule = biomolecule
if self.__seqid < 1.0:
self.__sequences_weight = self.compute_sequences_weight()
else :
# assign each sequence a weight of one
self.__sequences_weight = np.ones((self.__num_sequences,), dtype = np.float64)
self.__effective_num_sequences = np.sum(self.__sequences_weight)
#sometimes users might enter the wrong biomolecule type
#verify biomolecule type
mf_dca_info = """\n\tCreated a MeanFieldDCA object with the following attributes
\tbiomolecule: {}
\ttotal states at sites: {}
\tpseudocount: {}
\tsequence identity: {}
\talignment length: {}
\ttotal number of unique sequences (excluding redundant sequences with 100 percent similarity): {}
\teffective number of sequences (with sequence identity {}): {}
""".format(
biomolecule,
self.__num_site_states,
self.__pseudocount,
self.__seqid,
self.__sequences_len,
self.__num_sequences,
self.__seqid,
self.__effective_num_sequences,
)
logger.info(mf_dca_info)
return None
def __str__(self):
"""Describes the MeanFieldDCA object.
Parameters
----------
self: MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
description : str
A representation about objects created from
the MeanFieldDCA class.
"""
description = '<instance of MeanFieldDCA>'
return description
def __call__(self, pseudocount = 0.5 , seqid = 0.8):
"""Resets the value of pseudo count and sequence identity through
the instance.
Parameters
----------
self : MeanFieldDCA
MeanFieldDCA instance.
pseudocount : float
The value of the raltive pseudo count. It must be between
0 and 1. Default value is 0.5.
seqid : float
Threshold sequence similarity for computing sequences weight.
This parameter must be between 0 and 1. Typical values are
0.7, 0.8, 0.9 or something in between these numbers.
Returns
-------
None : None
"""
#warn the user that paramertes are being reset
self.__pseudocount = pseudocount
self.__seqid = seqid
logger.warning('\n\tYou have changed one of the parameters (pseudo count or sequence identity)'
'\n\tfrom their default values'
'\n\tpseudocount: {} \n\tsequence_identity: {}'.format(
self.__pseudocount, self.__seqid,
)
)
return None
@property
def alignment(self):
"""Alignment data getter.
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
--------
self.__sequences : list
A 2d list of alignment sequences in integer representation.
"""
return self.__sequences
@property
def biomolecule(self):
"""Sequence type getter
Parameters
----------
Self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__biomolecule : str
Biomolecule type (protein or RNA)
"""
return self.__biomolecule
@property
def sequences_len(self):
"""Sequences length getter.
Parameters
---------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__sequences_len : int
Sequences length in alignment data
"""
return self.__sequences_len
@property
def num_site_states(self):
"""Get number of states for an MSA (eg. 5 for RNAs and 21 for proteins)
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__num_site_states : int
Maximum number of states in a sequence site
"""
return self.__num_site_states
@property
def num_sequences(self):
"""Getter for the number of sequences read from alignment file
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__num_sequences : int
The total number of sequences in alignment data
"""
return self.__num_sequences
@property
def sequence_identity(self):
"""Getter for the value of sequence indentity.
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__seqid : float
Cut-off value for sequences similarity above which sequences are
considered identical
"""
return self.__seqid
@property
def pseudocount(self):
"""Getter for value of pseudo count
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__pseudocount : float
Value of pseudo count usef for regularization
"""
return self.__pseudocount
@property
def sequences_weight(self):
"""Getter for the weight of each sequences in alignment data.
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
self.__sequences_weight : np.array(dtype=np.float64)
A 1d numpy array containing the weight of each sequences in the
alignment.
"""
return self.__sequences_weight
@property
def effective_num_sequences(self):
"""Getter for the effective number of sequences.
Parameters
----------
self : MeanFieldDCA
Instance of MeanFieldDCA class
Returns
-------
np.sum(self.__sequences_weight) : float
The sum of each sequence's weight.
"""
return np.sum(self.__sequences_weight)
def compute_sequences_weight(self):
"""Computes the weight of each sequences in the alignment. If the
sequences identity is one, each sequences has equal weight and this is
the maximum weight a sequence in the alignment data can have. Whenever
the sequence identity is set a value less than one, sequences that have
similarity beyond the sequence identity are lumped together. If there are
m similar sequences, their corresponding weight is the reciprocal.
Parameters
----------
self : MeanFieldDCA
The instance
Returns
-------
weights : np.array
A 1d numpy array of size self.__num_sequences containing the
weight of each sequence.
"""
logger.info('\n\tComputing sequences weights')
weights = msa_numerics.compute_sequences_weight(
alignment_data= np.array(self.__sequences, dtype=np.int32),
seqid = self.__seqid,
)
return weights
def get_single_site_freqs(self):
"""Computes single site frequency counts.
Parameters
----------
self : MeanFieldDCA
The instance.
Returns
-------
single_site_freqs : np.array
A 2d numpy array of shape (L, q) containing the frequency
count of residues at sequence sites. L is the length of
sequences in the alignment, and q is the maximum possible
states a site can accommodate. The last state (q) of each
site represents a gap.
"""
logger.info('\n\tComputing single site frequencies')
single_site_freqs = msa_numerics.compute_single_site_freqs(
alignment_data = np.array(self.__sequences),
num_site_states = self.__num_site_states,
seqs_weight = self.__sequences_weight,
)
return single_site_freqs
def get_reg_single_site_freqs(self):
"""Regularizes single site frequencies.
Parameters
----------
self : MeanFieldDCA
The instance
Returns
-------
reg_single_site_freqs : np.array
A 2d numpy array of shape (L, q) containing regularized single
site frequencies. L and q are the sequences length and maximum
number of site-states respectively.
"""
single_site_freqs = self.get_single_site_freqs()
logger.info('\n\tRegularizing single site frequencies')
reg_single_site_freqs = msa_numerics.get_reg_single_site_freqs(
single_site_freqs = single_site_freqs,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
pseudocount = self.__pseudocount,
)
return reg_single_site_freqs
def get_pair_site_freqs(self):
"""Computes pair site frequencies
Parameters
----------
self : MeanFieldDCA
The instance.
Returns
-------
pair_site_freqs : np.array
A 2d numpy array of pair site frequncies. It has a shape of
(N, q-1, q-1) where N is the number of unique site pairs and q
is the maximum number of states a site can accommodate. Note
site pairig is performed in the following order: (0, 0), (0, 1),
..., (0, L-1), ...(L-1, L) where L is the sequences length. This
ordering is critical that any computation involding pair site
frequencies must be implemented in the righ order of pairs.
"""
logger.info('\n\tComputing pair site frequencies')
pair_site_freqs = msa_numerics.compute_pair_site_freqs(
alignment_data = np.array(self.__sequences),
num_site_states = self.__num_site_states,
seqs_weight = self.__sequences_weight,
)
return pair_site_freqs
def get_reg_pair_site_freqs(self):
"""Regularizes pair site frequencies
Parameters
----------
self : MeanFieldDCA
The instance.
Returns
-------
reg_pair_site_freqs : np.array
A 3d numpy array of shape (N, q-1, q-1) containing regularized
pair site frequencies. N is the number of unique site pairs and
q is the maximum number of states in a sequence site. The
ordering of pairs follows numbering like (unregularized) pair
site frequencies.
"""
pair_site_freqs = self.get_pair_site_freqs()
logger.info('\n\tRegularizing pair site frequencies')
reg_pair_site_freqs = msa_numerics.get_reg_pair_site_freqs(
pair_site_freqs = pair_site_freqs,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
pseudocount = self.__pseudocount,
)
return reg_pair_site_freqs
def construct_corr_mat(self, reg_fi, reg_fij):
"""Constructs the correlation matrix from regularized frequencies.
Parameters
----------
self : MeanFieldDCA
The instance.
reg_fi : np.array
Regularized single site frequencies.
reg_fij : np.array
Regularized pair site frequncies.
Returns
-------
corr_mat : np.array
A 2d numpy array of (N, N) where N = L*(q-1) where L and q are
the length of sequences and number of states in a site
respectively.
"""
logger.info('\n\tConstructing the correlation matrix')
corr_mat = msa_numerics.construct_corr_mat(
reg_fi = reg_fi,
reg_fij = reg_fij,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
)
return corr_mat
def compute_couplings(self, corr_mat):
"""Computing couplings by inverting the matrix of correlations. Note that
the couplings are the negative of the inverse of the correlation matrix.
Parameters
----------
self : MeanFieldDCA
The instance.
corr_mat : np.array
The correlation matrix formed from regularized pair site and
single site frequencies.
Returns
-------
couplings : np.array
A 2d numpy array of the same shape as the correlation matrix.
"""
logger.info('\n\tComputing couplings')
try:
couplings = msa_numerics.compute_couplings(corr_mat = corr_mat)
except Exception as e:
logger.error('\n\tCorrelation {}\n\tYou set the pseudocount {}.'
' You might need to increase it.'.format(e, self.__pseudocount)
)
raise
# capture couplings to avoid recomputing
self.__couplings = couplings
logger.info('\n\tMaximum and minimum couplings: {}, {}'.format(
np.max(couplings), np.min(couplings)))
return couplings
def compute_two_site_model_fields(self, couplings, reg_fi):
"""Computes two site model fields by fitting the marginal probabilities
of the direct probability with the empirical data obtained from the
alignment
Parameters
----------
self : MeanFieldDCA
The instance.
couplings : np.array
A 2d numpy array of couplings computed from the correlation matrix.
reg_fi : np.array
A 3d numpy array of regularized single site frequencies.
Returns
-------
two_site_model_fields : np.array
A 3d numpy array of shape (N, q, q) where N is the total number
of unique site pairs and q is the maximum number of states a site
can accommodate. The ordering of site pairs is the same as those
in pair site frequencies.
"""
logger.info('\n\tComputing two site model fields')
two_site_model_fields = msa_numerics.compute_two_site_model_fields(
couplings = couplings,
reg_fi = reg_fi,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
)
return two_site_model_fields
def compute_fields(self, couplings=None):
"""Computes the local fields of the global probability of sequence space.
Parameters
----------
self : MeanFieldDCA
An instance of MeanFieldDCA class
couplings : np.array
A 2d numpy array of the couplings. If not give, will be computed.
Returns
-------
fields : dict
A dictionary of fields whose keys are sites in MSA and whose values
are arrays of fields per site.
"""
if couplings is None:
reg_fi = self.get_reg_single_site_freqs()
reg_fij = self.get_reg_pair_site_freqs()
corr_mat = self.construct_corr_mat(reg_fi, reg_fij)
couplings = self.compute_couplings(corr_mat)
else:
reg_fi = self.get_reg_single_site_freqs()
q = self.__num_site_states
fields = dict()
logger.info('\n\tComputing local fields of the global probability function')
for i in range(self.__sequences_len):
pi = reg_fi[i]
piq = pi[-1]
sum = np.zeros((q-1, 1))
row_start = i * (q - 1)
row_end = row_start + (q - 1)
for j in range(self.__sequences_len):
if j != i:
pj = reg_fi[j]
col_start = j * (q - 1)
col_end = col_start + (q - 1)
couplings_ij = couplings[row_start:row_end, col_start:col_end]
pj_col_vec = np.reshape(pj[:-1], (q-1, 1))
sum += np.dot(couplings_ij, pj_col_vec)
fields_i = np.log(pi[:-1]/piq) - np.reshape(sum, (q-1, ))
fields[i] = fields_i
return fields
def shift_couplings(self, couplings_ij):
"""Shifts the couplings value.
Parameters
----------
self : MeanFieldDCA
An instance of MeanFieldDCA class
couplings_ij : np.array
1d array of couplings for site pair (i, j)
Returns
-------
shifted_couplings_ij : np.array
A 2d array of the couplings for site pair (i, j)
"""
qm1 = self.__num_site_states - 1
couplings_ij = np.reshape(couplings_ij, (qm1,qm1))
avx = np.mean(couplings_ij, axis=1)
avx = np.reshape(avx, (qm1, 1))
avy = np.mean(couplings_ij, axis=0)
avy = np.reshape(avy, (1, qm1))
av = np.mean(couplings_ij)
couplings_ij = couplings_ij - avx - avy + av
return couplings_ij
def compute_params(self, seqbackmapper=None, ranked_by=None, linear_dist=None, num_site_pairs=None):
"""Computes fields and couplings with the couplings ranked by DCA score.
Parameters
----------
self : MeanFieldDCA
An instanc of MeanFieldDCA class
seqbackmapper : SequenceBackmapper
An instance of SequenceBackmapper class
ranked_by : str
DCA score type usef to rank the couplings by their site pairs.
By default they are ranked by the Frobenius Norm of couplings with
average product correction.
linear_dist : int
Minimum separation beteween site pairs (i, j).
num_site_pairs : int
Number of site pairs whose couplings are to be otained.
Returns
-------
fields, couplings : tuple
A tuple of lists of fields and couplings.
"""
if ranked_by is None: ranked_by = 'fn_apc'
if linear_dist is None: linear_dist = 4
RANKING_METHODS = ('FN', 'FN_APC', 'DI', 'DI_APC')
ranked_by = ranked_by.strip().upper()
if ranked_by not in RANKING_METHODS:
logger.error('\n\tInvalid ranking criterion {}.\nChoose from {}'.format(ranked_by, RANKING_METHODS))
raise MeanFieldDCAException
if ranked_by == 'FN': dca_scores = self.compute_sorted_FN(seqbackmapper=seqbackmapper)
if ranked_by == 'FN_APC': dca_scores = self.compute_sorted_FN_APC(seqbackmapper=seqbackmapper)
if ranked_by == 'DI': dca_scores = self.compute_sorted_DI(seqbackmapper=seqbackmapper)
if ranked_by == 'DI_APC': dca_scores = self.compute_sorted_DI_APC(seqbackmapper=seqbackmapper)
fields = self.compute_fields(couplings=self.__couplings)
qm1 = self.__num_site_states - 1
if seqbackmapper is not None:
# mapping_dict has keys from MSA sites and values from refseq sites
# we need to reverse this mapping as the fields and couplings are from MSA sites
mapping_dict = {
value : key for key, value in self.__refseq_mapping_dict.items()
}
else:
mapping_dict = {
i : i for i in range(self.__sequences_len)
}
# set default number of site pairs whose couplings are to be extracted
if num_site_pairs is None :
num_site_pairs = len(seqbackmapper.ref_sequence) if seqbackmapper is not None else len(mapping_dict.keys())
# we need only the fields corresponding to mapped sites
fields_mapped = list()
logger.info('\n\tExtracting fields')
for i in mapping_dict.keys():
site_in_msa = mapping_dict[i]
fields_im = fields[site_in_msa]
site_fields = i, fields_im
fields_mapped.append(site_fields)
# extract couplings
logger.info('\n\tExtracting couplings for top {} site pairs (i, j) with |i - j| > {} and ranked by {}'.format(
num_site_pairs, linear_dist, ranked_by)
)
couplings_ranked_by_dca_score = list()
count_pairs = 0
for pair, score in dca_scores:
site_1_in_refseq, site_2_in_refseq = pair[0], pair[1]
if abs(site_1_in_refseq - site_2_in_refseq) > linear_dist:
count_pairs += 1
if count_pairs > num_site_pairs: break
i, j = mapping_dict[site_1_in_refseq], mapping_dict[site_2_in_refseq]
if(i > j):
logger.error('\n\tInvalid site pair. Site pair (i, j) should be ordered in i < j')
raise MeanFieldDCAException
row_start = i * qm1
row_end = row_start + qm1
column_start = j * qm1
column_end = column_start + qm1
couplings_ij = self.__couplings[row_start:row_end, column_start:column_end]
couplings_ij = self.shift_couplings(couplings_ij) # now couplings_ij is a 2d numpy array
couplings_ij = np.reshape(couplings_ij, (qm1*qm1,))
pair_couplings_ij = pair, couplings_ij
couplings_ranked_by_dca_score.append(pair_couplings_ij)
if count_pairs < num_site_pairs:
logger.warning('\n\tObtained couplings for only {} ranked site pairs.'
'\n\tThis is the maximum number of site paris we can obtain under '
'the given criteria'.format(count_pairs)
)
return tuple(fields_mapped), tuple(couplings_ranked_by_dca_score)
def get_mapped_site_pairs_dca_scores(self, sorted_dca_scores, seqbackmapper):
"""Filters mapped site pairs with a reference sequence.
Parameters
-----------
self : MeanFieldDCA
An instance of MeanFieldDCA class
sorted_dca_scores : tuple of tuples
A tuple of tuples of site-pair and DCA score sorted by DCA scores
in reverse order.
seqbackmapper : SequenceBackmapper
An instance of SequenceBackmapper class
Returns
-------
sorted_scores_mapped : tuple
A tuple of tuples of site pairs and dca score
"""
mapping_dict = seqbackmapper.map_to_reference_sequence()
# Add attribute __reseq_mapping_dict
self.__refseq_mapping_dict = mapping_dict
sorted_scores_mapped = list()
num_mapped_pairs = 0
for pair, score in sorted_dca_scores:
try:
mapped_pair = mapping_dict[pair[0]], mapping_dict[pair[1]]
except KeyError:
pass
else:
current_pair_score = mapped_pair, score
sorted_scores_mapped.append(current_pair_score)
num_mapped_pairs += 1
# sort mapped pairs in case they were not
sorted_scores_mapped = sorted(sorted_scores_mapped, key = lambda k : k[1], reverse=True)
logger.info('\n\tTotal number of mapped sites: {}'.format(num_mapped_pairs))
return tuple(sorted_scores_mapped)
def get_site_pair_di_score(self):
"""Obtains computed direct information (DI) scores from backend and
puts them a list of tuples of in (site-pair, score) form.
Parameters
----------
self : MeanFieldDCA
The instance.
Returns
-------
site_pair_di_score : list
A list of tuples containing site pairs and DCA score, i.e., the
list [((i, j), score), ...] for all unique ite pairs (i, j)
such that j > i.
"""
reg_fi = self.get_reg_single_site_freqs()
reg_fij = self.get_reg_pair_site_freqs()
corr_mat = self.construct_corr_mat(reg_fi, reg_fij)
couplings = self.compute_couplings(corr_mat)
fields_ij = self.compute_two_site_model_fields(couplings, reg_fi)
logger.info('\n\tComputing direct information')
unsorted_DI = msa_numerics.compute_direct_info(
couplings = couplings,
fields_ij = fields_ij,
reg_fi = reg_fi,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
)
site_pair_di_score= dict()
pair_counter = 0
for i in range(self.__sequences_len - 1):
for j in range(i + 1, self.__sequences_len):
site_pair = (i , j)
site_pair_di_score[site_pair] = unsorted_DI[pair_counter]
pair_counter += 1
return site_pair_di_score
def compute_sorted_DI(self, seqbackmapper=None):
"""Computes direct informations for each pair of sites and sorts them in
descending order of DCA score.
Parameters
----------
self : MeanFieldDCA
The instance.
seqbackmapper : SequenceBackmapper
An instance of SequenceBackmapper class.
Returns
-------
sorted_DI : list
A list of tuples containing site pairs and DCA score, i.e., the
contents of sorted_DI are [((i, j), score), ...] for all unique
site pairs (i, j) such that j > i.
"""
unsorted_DI = self.get_site_pair_di_score()
sorted_DI = sorted(unsorted_DI.items(), key = lambda k : k[1], reverse=True)
if seqbackmapper is not None:
sorted_DI = self.get_mapped_site_pairs_dca_scores(sorted_DI, seqbackmapper)
return sorted_DI
def compute_sorted_DI_APC(self, seqbackmapper=None):
"""Computes the average DI score for every site.
Parameters
----------
self : MeanFieldDCA
An instance of MeanFieldDCA class
seqbackmapper : SequenceBackmapper
An instance of SequenceBackmapper class.
Returns
-------
sorted_DI_APC : list
A list of tuples containing site pairs and DCA score, i.e., the
contents of sorted_DI are [((i, j), score), ...] for all unique
site pairs (i, j) such that j > i. These DI scores are average
product corrected.
"""
sorted_DI = self.compute_sorted_DI() # we must not supply seqbackmapper at this point.
# the backmapping is done at the end of APC step
logger.info('\n\tPerforming average product correction (APC) of DI scores')
# compute the average score of each site
av_score_sites = list()
N = self.__sequences_len
for i in range(N):
i_scores = [score for pair, score in sorted_DI if i in pair]
assert len(i_scores) == N - 1
i_scores_sum = sum(i_scores)
i_scores_ave = i_scores_sum/float(N - 1)
av_score_sites.append(i_scores_ave)
# compute average product corrected DI
av_all_scores = sum(av_score_sites)/float(N)
sorted_DI_APC = list()
for pair, score in sorted_DI:
i, j = pair
score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)
sorted_DI_APC.append((pair, score_apc))
# sort the scores as doing APC may have disrupted the ordering
sorted_DI_APC = sorted(sorted_DI_APC, key = lambda k : k[1], reverse=True)
# Now we must do backmapping if seqbackmapper is provided.
if seqbackmapper is not None:
sorted_DI_APC = self.get_mapped_site_pairs_dca_scores(sorted_DI_APC, seqbackmapper)
return sorted_DI_APC
def compute_sorted_FN(self, seqbackmapper=None):
"""Computes the Frobenius norm of couplings.
Parameters
----------
self : MeanFieldDCA
An instance of MeanFieldDCA class.
seqbackmapper : SequenceBackmapper
An instance of SequenceBackmapper class.
Returns
-------
fn_sorted : list
A list of tuples containing site pairs and DCA score, i.e., the
list [((i, j), score), ...] for all unique
site pairs (i, j) such that j > i.
"""
reg_fi = self.get_reg_single_site_freqs()
reg_fij = self.get_reg_pair_site_freqs()
corr_mat = self.construct_corr_mat(reg_fi, reg_fij)
couplings = self.compute_couplings(corr_mat)
logger.info('\n\tComputing Frobenius norm of couplings')
num_sites = self.__sequences_len
q = self.__num_site_states
frobenius_norm = list()
for i in range(num_sites):
row_start = i * (q - 1)
row_end = row_start + (q - 1)
for j in range(i + 1, num_sites):
site_pair = (i, j)
col_start = j * (q - 1)
col_end = col_start + (q - 1)
cij = couplings[row_start:row_end, col_start:col_end]
cij_mean_1 = np.reshape(np.mean(cij, axis=0), (1, q-1))
cij_mean_2 = np.reshape(np.mean(cij, axis=1), (q-1, 1))
cij_mean = np.mean(cij)
cij_new = cij - cij_mean_1 - cij_mean_2 + cij_mean
fn_ij = np.sqrt(np.sum(cij_new * cij_new))
frobenius_norm.append((site_pair, fn_ij))
fn_sorted = sorted(frobenius_norm, key = lambda x : x[1], reverse=True)
if seqbackmapper is not None:
fn_sorted = self.get_mapped_site_pairs_dca_scores(fn_sorted, seqbackmapper)
return fn_sorted
def compute_sorted_FN_APC(self, seqbackmapper = None):
"""Performs average product correction (APC) on DCA scores
Parameters
----------
self : MeanFieldDCA
An instance of MeanFieldDCA class.
seqbackmapper : SequenceBackmapper
An instance of SequenceBackmapper class.
Returns
-------
sorted_FN_APC : list
A list of tuples containing site pairs and DCA score, i.e., the
list [((i, j), score), ...] for all unique site pairs (i, j)
such that j > i. The DCA scores are average product corrected.
"""
raw_FN = self.compute_sorted_FN() # Must not supply seqbackmapper at this stage.
logger.info('\n\tPerforming average product correction (APC) to Frobenius'
' norm of couplings.'
)
# compute the average score of each site
av_score_sites = list()
N = self.__sequences_len
for i in range(N):
i_scores = [score for pair, score in raw_FN if i in pair]
assert len(i_scores) == N - 1
i_scores_sum = sum(i_scores)
i_scores_ave = i_scores_sum/float(N - 1)
av_score_sites.append(i_scores_ave)
# compute average product corrected DI
av_all_scores = sum(av_score_sites)/float(N)
sorted_FN_APC = list()
for pair, score in raw_FN:
i, j = pair
score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)
sorted_FN_APC.append((pair, score_apc))
sorted_FN_APC = sorted(sorted_FN_APC, key=lambda x : x[1], reverse=True)
# Must do backmapping is sebackmapper is not None
if seqbackmapper is not None:
sorted_FN_APC = self.get_mapped_site_pairs_dca_scores(sorted_FN_APC, seqbackmapper)
return sorted_FN_APC
if __name__ == '__main__':
"""
"""
| 36.881874 | 120 | 0.585344 | from __future__ import absolute_import, division
from . import msa_numerics
from pydca.fasta_reader import fasta_reader
import logging
import numpy as np
logger = logging.getLogger(__name__)
class MeanFieldDCAException(Exception):
class MeanFieldDCA:
def __init__(self, msa_file_name, biomolecule, pseudocount=None, seqid=None):
self.__pseudocount = pseudocount if pseudocount is not None else 0.5
self.__seqid = seqid if seqid is not None else 0.8
if self.__pseudocount >= 1.0 or self.__pseudocount < 0:
logger.error('\n\tValue of relative pseudo-count must be'
' between 0 and 1.0. Typical value is 0.5')
raise ValueError
if self.__seqid > 1.0 or self.__seqid <= 0.0:
logger.error('\n\tValue of sequence-identity must'
' not exceed 1 nor less than 0. Typical values are 0.7, 0.8., 0.9')
raise ValueError
biomolecule = biomolecule.strip().upper()
self.__msa_file_name = msa_file_name
if biomolecule=='RNA':
self.__num_site_states = 5
elif biomolecule=='PROTEIN':
self.__num_site_states = 21
else:
logger.error(
'\n\tUnknown biomolecule ... must be protein (PROTEIN) or rna (RNA)',
)
raise ValueError
self.__sequences = fasta_reader.get_alignment_int_form(
self.__msa_file_name,
biomolecule=biomolecule,
)
self.__num_sequences = len(self.__sequences)
self.__sequences_len = len(self.__sequences[0])
self.__biomolecule = biomolecule
if self.__seqid < 1.0:
self.__sequences_weight = self.compute_sequences_weight()
else :
self.__sequences_weight = np.ones((self.__num_sequences,), dtype = np.float64)
self.__effective_num_sequences = np.sum(self.__sequences_weight)
mf_dca_info = """\n\tCreated a MeanFieldDCA object with the following attributes
\tbiomolecule: {}
\ttotal states at sites: {}
\tpseudocount: {}
\tsequence identity: {}
\talignment length: {}
\ttotal number of unique sequences (excluding redundant sequences with 100 percent similarity): {}
\teffective number of sequences (with sequence identity {}): {}
""".format(
biomolecule,
self.__num_site_states,
self.__pseudocount,
self.__seqid,
self.__sequences_len,
self.__num_sequences,
self.__seqid,
self.__effective_num_sequences,
)
logger.info(mf_dca_info)
return None
def __str__(self):
description = '<instance of MeanFieldDCA>'
return description
def __call__(self, pseudocount = 0.5 , seqid = 0.8):
self.__pseudocount = pseudocount
self.__seqid = seqid
logger.warning('\n\tYou have changed one of the parameters (pseudo count or sequence identity)'
'\n\tfrom their default values'
'\n\tpseudocount: {} \n\tsequence_identity: {}'.format(
self.__pseudocount, self.__seqid,
)
)
return None
@property
def alignment(self):
return self.__sequences
@property
def biomolecule(self):
return self.__biomolecule
@property
def sequences_len(self):
return self.__sequences_len
@property
def num_site_states(self):
return self.__num_site_states
@property
def num_sequences(self):
return self.__num_sequences
@property
def sequence_identity(self):
return self.__seqid
@property
def pseudocount(self):
return self.__pseudocount
@property
def sequences_weight(self):
return self.__sequences_weight
@property
def effective_num_sequences(self):
return np.sum(self.__sequences_weight)
def compute_sequences_weight(self):
logger.info('\n\tComputing sequences weights')
weights = msa_numerics.compute_sequences_weight(
alignment_data= np.array(self.__sequences, dtype=np.int32),
seqid = self.__seqid,
)
return weights
def get_single_site_freqs(self):
logger.info('\n\tComputing single site frequencies')
single_site_freqs = msa_numerics.compute_single_site_freqs(
alignment_data = np.array(self.__sequences),
num_site_states = self.__num_site_states,
seqs_weight = self.__sequences_weight,
)
return single_site_freqs
def get_reg_single_site_freqs(self):
single_site_freqs = self.get_single_site_freqs()
logger.info('\n\tRegularizing single site frequencies')
reg_single_site_freqs = msa_numerics.get_reg_single_site_freqs(
single_site_freqs = single_site_freqs,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
pseudocount = self.__pseudocount,
)
return reg_single_site_freqs
def get_pair_site_freqs(self):
logger.info('\n\tComputing pair site frequencies')
pair_site_freqs = msa_numerics.compute_pair_site_freqs(
alignment_data = np.array(self.__sequences),
num_site_states = self.__num_site_states,
seqs_weight = self.__sequences_weight,
)
return pair_site_freqs
def get_reg_pair_site_freqs(self):
pair_site_freqs = self.get_pair_site_freqs()
logger.info('\n\tRegularizing pair site frequencies')
reg_pair_site_freqs = msa_numerics.get_reg_pair_site_freqs(
pair_site_freqs = pair_site_freqs,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
pseudocount = self.__pseudocount,
)
return reg_pair_site_freqs
def construct_corr_mat(self, reg_fi, reg_fij):
logger.info('\n\tConstructing the correlation matrix')
corr_mat = msa_numerics.construct_corr_mat(
reg_fi = reg_fi,
reg_fij = reg_fij,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
)
return corr_mat
def compute_couplings(self, corr_mat):
logger.info('\n\tComputing couplings')
try:
couplings = msa_numerics.compute_couplings(corr_mat = corr_mat)
except Exception as e:
logger.error('\n\tCorrelation {}\n\tYou set the pseudocount {}.'
' You might need to increase it.'.format(e, self.__pseudocount)
)
raise
self.__couplings = couplings
logger.info('\n\tMaximum and minimum couplings: {}, {}'.format(
np.max(couplings), np.min(couplings)))
return couplings
def compute_two_site_model_fields(self, couplings, reg_fi):
logger.info('\n\tComputing two site model fields')
two_site_model_fields = msa_numerics.compute_two_site_model_fields(
couplings = couplings,
reg_fi = reg_fi,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
)
return two_site_model_fields
def compute_fields(self, couplings=None):
if couplings is None:
reg_fi = self.get_reg_single_site_freqs()
reg_fij = self.get_reg_pair_site_freqs()
corr_mat = self.construct_corr_mat(reg_fi, reg_fij)
couplings = self.compute_couplings(corr_mat)
else:
reg_fi = self.get_reg_single_site_freqs()
q = self.__num_site_states
fields = dict()
logger.info('\n\tComputing local fields of the global probability function')
for i in range(self.__sequences_len):
pi = reg_fi[i]
piq = pi[-1]
sum = np.zeros((q-1, 1))
row_start = i * (q - 1)
row_end = row_start + (q - 1)
for j in range(self.__sequences_len):
if j != i:
pj = reg_fi[j]
col_start = j * (q - 1)
col_end = col_start + (q - 1)
couplings_ij = couplings[row_start:row_end, col_start:col_end]
pj_col_vec = np.reshape(pj[:-1], (q-1, 1))
sum += np.dot(couplings_ij, pj_col_vec)
fields_i = np.log(pi[:-1]/piq) - np.reshape(sum, (q-1, ))
fields[i] = fields_i
return fields
def shift_couplings(self, couplings_ij):
qm1 = self.__num_site_states - 1
couplings_ij = np.reshape(couplings_ij, (qm1,qm1))
avx = np.mean(couplings_ij, axis=1)
avx = np.reshape(avx, (qm1, 1))
avy = np.mean(couplings_ij, axis=0)
avy = np.reshape(avy, (1, qm1))
av = np.mean(couplings_ij)
couplings_ij = couplings_ij - avx - avy + av
return couplings_ij
def compute_params(self, seqbackmapper=None, ranked_by=None, linear_dist=None, num_site_pairs=None):
if ranked_by is None: ranked_by = 'fn_apc'
if linear_dist is None: linear_dist = 4
RANKING_METHODS = ('FN', 'FN_APC', 'DI', 'DI_APC')
ranked_by = ranked_by.strip().upper()
if ranked_by not in RANKING_METHODS:
logger.error('\n\tInvalid ranking criterion {}.\nChoose from {}'.format(ranked_by, RANKING_METHODS))
raise MeanFieldDCAException
if ranked_by == 'FN': dca_scores = self.compute_sorted_FN(seqbackmapper=seqbackmapper)
if ranked_by == 'FN_APC': dca_scores = self.compute_sorted_FN_APC(seqbackmapper=seqbackmapper)
if ranked_by == 'DI': dca_scores = self.compute_sorted_DI(seqbackmapper=seqbackmapper)
if ranked_by == 'DI_APC': dca_scores = self.compute_sorted_DI_APC(seqbackmapper=seqbackmapper)
fields = self.compute_fields(couplings=self.__couplings)
qm1 = self.__num_site_states - 1
if seqbackmapper is not None:
mapping_dict = {
value : key for key, value in self.__refseq_mapping_dict.items()
}
else:
mapping_dict = {
i : i for i in range(self.__sequences_len)
}
if num_site_pairs is None :
num_site_pairs = len(seqbackmapper.ref_sequence) if seqbackmapper is not None else len(mapping_dict.keys())
fields_mapped = list()
logger.info('\n\tExtracting fields')
for i in mapping_dict.keys():
site_in_msa = mapping_dict[i]
fields_im = fields[site_in_msa]
site_fields = i, fields_im
fields_mapped.append(site_fields)
logger.info('\n\tExtracting couplings for top {} site pairs (i, j) with |i - j| > {} and ranked by {}'.format(
num_site_pairs, linear_dist, ranked_by)
)
couplings_ranked_by_dca_score = list()
count_pairs = 0
for pair, score in dca_scores:
site_1_in_refseq, site_2_in_refseq = pair[0], pair[1]
if abs(site_1_in_refseq - site_2_in_refseq) > linear_dist:
count_pairs += 1
if count_pairs > num_site_pairs: break
i, j = mapping_dict[site_1_in_refseq], mapping_dict[site_2_in_refseq]
if(i > j):
logger.error('\n\tInvalid site pair. Site pair (i, j) should be ordered in i < j')
raise MeanFieldDCAException
row_start = i * qm1
row_end = row_start + qm1
column_start = j * qm1
column_end = column_start + qm1
couplings_ij = self.__couplings[row_start:row_end, column_start:column_end]
couplings_ij = self.shift_couplings(couplings_ij)
couplings_ij = np.reshape(couplings_ij, (qm1*qm1,))
pair_couplings_ij = pair, couplings_ij
couplings_ranked_by_dca_score.append(pair_couplings_ij)
if count_pairs < num_site_pairs:
logger.warning('\n\tObtained couplings for only {} ranked site pairs.'
'\n\tThis is the maximum number of site paris we can obtain under '
'the given criteria'.format(count_pairs)
)
return tuple(fields_mapped), tuple(couplings_ranked_by_dca_score)
def get_mapped_site_pairs_dca_scores(self, sorted_dca_scores, seqbackmapper):
mapping_dict = seqbackmapper.map_to_reference_sequence()
self.__refseq_mapping_dict = mapping_dict
sorted_scores_mapped = list()
num_mapped_pairs = 0
for pair, score in sorted_dca_scores:
try:
mapped_pair = mapping_dict[pair[0]], mapping_dict[pair[1]]
except KeyError:
pass
else:
current_pair_score = mapped_pair, score
sorted_scores_mapped.append(current_pair_score)
num_mapped_pairs += 1
sorted_scores_mapped = sorted(sorted_scores_mapped, key = lambda k : k[1], reverse=True)
logger.info('\n\tTotal number of mapped sites: {}'.format(num_mapped_pairs))
return tuple(sorted_scores_mapped)
def get_site_pair_di_score(self):
reg_fi = self.get_reg_single_site_freqs()
reg_fij = self.get_reg_pair_site_freqs()
corr_mat = self.construct_corr_mat(reg_fi, reg_fij)
couplings = self.compute_couplings(corr_mat)
fields_ij = self.compute_two_site_model_fields(couplings, reg_fi)
logger.info('\n\tComputing direct information')
unsorted_DI = msa_numerics.compute_direct_info(
couplings = couplings,
fields_ij = fields_ij,
reg_fi = reg_fi,
seqs_len = self.__sequences_len,
num_site_states = self.__num_site_states,
)
site_pair_di_score= dict()
pair_counter = 0
for i in range(self.__sequences_len - 1):
for j in range(i + 1, self.__sequences_len):
site_pair = (i , j)
site_pair_di_score[site_pair] = unsorted_DI[pair_counter]
pair_counter += 1
return site_pair_di_score
def compute_sorted_DI(self, seqbackmapper=None):
unsorted_DI = self.get_site_pair_di_score()
sorted_DI = sorted(unsorted_DI.items(), key = lambda k : k[1], reverse=True)
if seqbackmapper is not None:
sorted_DI = self.get_mapped_site_pairs_dca_scores(sorted_DI, seqbackmapper)
return sorted_DI
def compute_sorted_DI_APC(self, seqbackmapper=None):
sorted_DI = self.compute_sorted_DI()
logger.info('\n\tPerforming average product correction (APC) of DI scores')
av_score_sites = list()
N = self.__sequences_len
for i in range(N):
i_scores = [score for pair, score in sorted_DI if i in pair]
assert len(i_scores) == N - 1
i_scores_sum = sum(i_scores)
i_scores_ave = i_scores_sum/float(N - 1)
av_score_sites.append(i_scores_ave)
av_all_scores = sum(av_score_sites)/float(N)
sorted_DI_APC = list()
for pair, score in sorted_DI:
i, j = pair
score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)
sorted_DI_APC.append((pair, score_apc))
sorted_DI_APC = sorted(sorted_DI_APC, key = lambda k : k[1], reverse=True)
if seqbackmapper is not None:
sorted_DI_APC = self.get_mapped_site_pairs_dca_scores(sorted_DI_APC, seqbackmapper)
return sorted_DI_APC
def compute_sorted_FN(self, seqbackmapper=None):
reg_fi = self.get_reg_single_site_freqs()
reg_fij = self.get_reg_pair_site_freqs()
corr_mat = self.construct_corr_mat(reg_fi, reg_fij)
couplings = self.compute_couplings(corr_mat)
logger.info('\n\tComputing Frobenius norm of couplings')
num_sites = self.__sequences_len
q = self.__num_site_states
frobenius_norm = list()
for i in range(num_sites):
row_start = i * (q - 1)
row_end = row_start + (q - 1)
for j in range(i + 1, num_sites):
site_pair = (i, j)
col_start = j * (q - 1)
col_end = col_start + (q - 1)
cij = couplings[row_start:row_end, col_start:col_end]
cij_mean_1 = np.reshape(np.mean(cij, axis=0), (1, q-1))
cij_mean_2 = np.reshape(np.mean(cij, axis=1), (q-1, 1))
cij_mean = np.mean(cij)
cij_new = cij - cij_mean_1 - cij_mean_2 + cij_mean
fn_ij = np.sqrt(np.sum(cij_new * cij_new))
frobenius_norm.append((site_pair, fn_ij))
fn_sorted = sorted(frobenius_norm, key = lambda x : x[1], reverse=True)
if seqbackmapper is not None:
fn_sorted = self.get_mapped_site_pairs_dca_scores(fn_sorted, seqbackmapper)
return fn_sorted
def compute_sorted_FN_APC(self, seqbackmapper = None):
raw_FN = self.compute_sorted_FN()
logger.info('\n\tPerforming average product correction (APC) to Frobenius'
' norm of couplings.'
)
av_score_sites = list()
N = self.__sequences_len
for i in range(N):
i_scores = [score for pair, score in raw_FN if i in pair]
assert len(i_scores) == N - 1
i_scores_sum = sum(i_scores)
i_scores_ave = i_scores_sum/float(N - 1)
av_score_sites.append(i_scores_ave)
av_all_scores = sum(av_score_sites)/float(N)
sorted_FN_APC = list()
for pair, score in raw_FN:
i, j = pair
score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)
sorted_FN_APC.append((pair, score_apc))
sorted_FN_APC = sorted(sorted_FN_APC, key=lambda x : x[1], reverse=True)
if seqbackmapper is not None:
sorted_FN_APC = self.get_mapped_site_pairs_dca_scores(sorted_FN_APC, seqbackmapper)
return sorted_FN_APC
if __name__ == '__main__':
| true | true |
f72cf9518bb05b881c788963248acf6812065892 | 2,110 | py | Python | pyDist/MultiKeyData.py | alekLukanen/pyDist | ffb2c3feb20afba078fec7381c8785eb1e2b0543 | [
"MIT"
] | 5 | 2017-12-24T08:11:16.000Z | 2019-02-07T22:13:26.000Z | pyDist/MultiKeyData.py | alekLukanen/pyDist | ffb2c3feb20afba078fec7381c8785eb1e2b0543 | [
"MIT"
] | 1 | 2021-06-01T23:17:31.000Z | 2021-06-01T23:17:31.000Z | pyDist/MultiKeyData.py | alekLukanen/pyDist | ffb2c3feb20afba078fec7381c8785eb1e2b0543 | [
"MIT"
] | null | null | null |
class MultiKeyData(object):
def __init__(self):
self._keys = {}
self._values = {}
self._links = {}
self._index = 0
def __add_item(self, key, value):
if key not in self._keys:
self._keys[key] = self._index
self._values[self._index] = value
self._links[self._index] = 1
return 1
else:
self._values[self._keys[key]] = value
return 0
def multi_set(self, keys, value):
count = 0
for key in keys:
count += self.__add_item(key, value)
if count>0:
self._links[self._index] += count-1
self._index += 1
def get_values(self):
return list(self._values.values())
def get_keys(self):
return list(self._keys.keys())
def __getitem__(self, key):
return self._values[self._keys[key]] if key in self._keys else None
def __setitem__(self, key, value):
self._index += self.__add_item(key, value)
def __delitem__(self, key):
index = self._keys[key]
self._links[index] += -1
del self._keys[key]
if self._links[index]==0:
del self._links[index]
del self._values[index]
def __str__(self):
return f'keys: {self._keys}\n' \
f'values: {self._values}\n' \
f'links: {self._links}'
if __name__ == '__main__':
print('MultiKeuData Test')
data = MultiKeyData()
data['a'] = 101
data['b'] = 201
print("data['b']: ", data['b'])
print('-------------')
print('data: ')
print(data)
print('-------------')
data.multi_set(('a', 'b', 'c', 'd'), 'hello, world!')
print(data)
print('-------------')
data.multi_set(('a', 'b', 'c', 'd'), 'hello, world!')
print(data)
print('-------------')
data.multi_set(('a', 'b', 'c', 'd', 'e'), 'hello, world!')
print(data)
print('-------------')
del data['e']
print(data)
print('-------------')
print('keys: ', data.get_keys())
print('values: ', data.get_values())
| 24.534884 | 75 | 0.505687 |
class MultiKeyData(object):
def __init__(self):
self._keys = {}
self._values = {}
self._links = {}
self._index = 0
def __add_item(self, key, value):
if key not in self._keys:
self._keys[key] = self._index
self._values[self._index] = value
self._links[self._index] = 1
return 1
else:
self._values[self._keys[key]] = value
return 0
def multi_set(self, keys, value):
count = 0
for key in keys:
count += self.__add_item(key, value)
if count>0:
self._links[self._index] += count-1
self._index += 1
def get_values(self):
return list(self._values.values())
def get_keys(self):
return list(self._keys.keys())
def __getitem__(self, key):
return self._values[self._keys[key]] if key in self._keys else None
def __setitem__(self, key, value):
self._index += self.__add_item(key, value)
def __delitem__(self, key):
index = self._keys[key]
self._links[index] += -1
del self._keys[key]
if self._links[index]==0:
del self._links[index]
del self._values[index]
def __str__(self):
return f'keys: {self._keys}\n' \
f'values: {self._values}\n' \
f'links: {self._links}'
if __name__ == '__main__':
print('MultiKeuData Test')
data = MultiKeyData()
data['a'] = 101
data['b'] = 201
print("data['b']: ", data['b'])
print('-------------')
print('data: ')
print(data)
print('-------------')
data.multi_set(('a', 'b', 'c', 'd'), 'hello, world!')
print(data)
print('-------------')
data.multi_set(('a', 'b', 'c', 'd'), 'hello, world!')
print(data)
print('-------------')
data.multi_set(('a', 'b', 'c', 'd', 'e'), 'hello, world!')
print(data)
print('-------------')
del data['e']
print(data)
print('-------------')
print('keys: ', data.get_keys())
print('values: ', data.get_values())
| true | true |
f72cf995acdc249a52c5780d6bbc9e63253eff06 | 18,720 | py | Python | awesometkinter/utils.py | python-gui-application/AwesomeTkinter | 73f638ac432bafbbd4296588a3d20f27f8570577 | [
"MIT"
] | 61 | 2020-09-16T14:22:08.000Z | 2022-03-18T07:38:15.000Z | awesometkinter/utils.py | python-gui-application/AwesomeTkinter | 73f638ac432bafbbd4296588a3d20f27f8570577 | [
"MIT"
] | 10 | 2020-09-15T10:52:24.000Z | 2021-12-24T00:57:22.000Z | awesometkinter/utils.py | python-gui-application/AwesomeTkinter | 73f638ac432bafbbd4296588a3d20f27f8570577 | [
"MIT"
] | 6 | 2020-11-17T06:33:01.000Z | 2021-11-05T08:04:29.000Z | import base64
import math
import platform
import tkinter as tk
from tkinter import ttk
import PIL
from PIL import Image, ImageTk, ImageColor, ImageDraw, ImageFilter
import hashlib
import io
def identify_operating_system():
"""identify current operating system
Returns:
(str): 'Windows', 'Linux', or 'Darwin' for mac
"""
return platform.system()
def calc_md5(binary_data):
return hashlib.md5(binary_data).hexdigest()
def generate_unique_name(*args):
"""get md5 encoding for any arguments that have a string representation
Returns:
md5 string
"""
name = ''.join([str(x) for x in args])
try:
name = calc_md5(name.encode())
except:
pass
return name
def invert_color(color):
"""return inverted hex color
"""
color = color_to_rgba(color)
r, g, b, a = color
inverted_color = rgb2hex(255 - r, 255 - g, 255 - b)
return inverted_color
def rgb2hex(r, g, b):
return '#{:02x}{:02x}{:02x}'.format(r, g, b)
def change_img_color(img, new_color, old_color=None):
"""Change image color
Args:
img: pillow image
new_color (str): new image color, ex: 'red', '#ff00ff', (255, 0, 0), (255, 0, 0, 255)
old_color (str): color to be replaced, if omitted, all colors will be replaced with new color keeping
alpha channel.
Returns:
pillow image
"""
# convert image to RGBA color scheme
img = img.convert('RGBA')
# load pixels data
pixdata = img.load()
# handle color
new_color = color_to_rgba(new_color)
old_color = color_to_rgba(old_color)
for y in range(img.size[1]):
for x in range(img.size[0]):
alpha = pixdata[x, y][-1]
if old_color:
if pixdata[x, y] == old_color:
r, g, b, _ = new_color
pixdata[x, y] = (r, g, b, alpha)
else:
r, g, b, _ = new_color
pixdata[x, y] = (r, g, b, alpha)
return img
def resize_img(img, size, keep_aspect_ratio=True):
"""resize image using pillow
Args:
img (PIL.Image): pillow image object
size(int or tuple(in, int)): width of image or tuple of (width, height)
keep_aspect_ratio(bool): maintain aspect ratio relative to width
Returns:
(PIL.Image): pillow image
"""
if isinstance(size, int):
size = (size, size)
# get ratio
width, height = img.size
requested_width = size[0]
if keep_aspect_ratio:
ratio = width / requested_width
requested_height = height / ratio
else:
requested_height = size[1]
size = (int(requested_width), int(requested_height))
img = img.resize(size, resample=PIL.Image.LANCZOS)
return img
def mix_images(background_img, foreground_img):
"""paste an image on top of another image
Args:
background_img: pillow image in background
foreground_img: pillow image in foreground
Returns:
pillow image
"""
background_img = background_img.convert('RGBA')
foreground_img = foreground_img.convert('RGBA')
img_w, img_h = foreground_img.size
bg_w, bg_h = background_img.size
offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
background_img.paste(foreground_img, offset, mask=foreground_img)
return background_img
def color_to_rgba(color):
"""Convert color names or hex notation to RGBA,
Args:
color (str): color e.g. 'white' or '#333' or formats like #rgb or #rrggbb
Returns:
(4-tuple): tuple of format (r, g, b, a) e.g. it will return (255, 0, 0, 255) for solid red
"""
if color is None:
return None
if isinstance(color, (tuple, list)):
if len(color) == 3:
r, g, b = color
color = (r, g, b, 255)
return color
else:
return ImageColor.getcolor(color, 'RGBA')
def is_dark(color):
"""rough check if color is dark or light
Returns:
(bool): True if color is dark, False if light
"""
r, g, b, a = color_to_rgba(color)
# calculate lumina, reference https://stackoverflow.com/a/1855903
lumina = (0.299 * r + 0.587 * g + 0.114 * b) / 255
return True if lumina < 0.6 else False
def calc_font_color(bg):
"""calculate font color based on given background
Args:
bg (str): background color
Returns:
(str): color name, e.g. "white" for dark background and "black" for light background
"""
return 'white' if is_dark(bg) else 'black'
def calc_contrast_color(color, offset):
"""calculate a contrast color
for darker colors will get a slightly lighter color depend on "offset" and for light colors will get a darker color
Args:
color (str): color
offset (int): 1 to 254
Returns:
(str): color
"""
r, g, b, a = color_to_rgba(color)
if is_dark(color):
new_color = [x + offset if x + offset <= 255 else 255 for x in (r, g, b)]
else:
new_color = [x - offset if x - offset >= 0 else 0 for x in (r, g, b)]
return rgb2hex(*new_color)
def text_to_image(text, text_color, bg_color, size):
"""Not implemented"""
pass
# img = Image.new('RGBA', size, color_to_rgba(text_color))
# draw = ImageDraw.Draw(img)
# font = ImageFont.truetype(current_path + "s.ttf", size - int(0.15 * width))
# draw.text((pad, -pad), str(num), font=font, fill=color_to_rgba(bg_color))
def create_pil_image(fp=None, color=None, size=None, b64=None):
"""create pillow Image object
Args:
fp: A filename (string), pathlib.Path object or a file object. The file object must implement read(), seek(),
and tell() methods, and be opened in binary mode.
color (str): color in tkinter format, e.g. 'red', '#3300ff', also color can be a tuple or a list of RGB,
e.g. (255, 0, 255)
size (int or 2-tuple(int, int)): an image required size in a (width, height) tuple
b64 (str): base64 hex representation of an image, if "fp" is given this parameter will be ignored
Returns:
pillow image object
"""
if not fp and b64:
fp = io.BytesIO(base64.b64decode(b64))
img = Image.open(fp)
# change color
if color:
img = change_img_color(img, color)
# resize
if size:
if isinstance(size, int):
size = (size, size)
img = resize_img(img, size)
return img
def create_image(fp=None, img=None, color=None, size=None, b64=None):
"""create tkinter PhotoImage object
it can modify size and color of original image
Args:
fp: A filename (string), pathlib.Path object or a file object. The file object must implement read(), seek(),
and tell() methods, and be opened in binary mode.
img (pillow image): if exist fp or b64 arguments will be ignored
color (str): color in tkinter format, e.g. 'red', '#3300ff', also color can be a tuple or a list of RGB,
e.g. (255, 0, 255)
size (int or 2-tuple(int, int)): an image required size in a (width, height) tuple
b64 (str): base64 hex representation of an image, if "fp" is given this parameter will be ignored
Returns:
tkinter PhotoImage object
"""
# create pillow image
if not img:
img = create_pil_image(fp, color, size, b64)
# create tkinter images using pillow ImageTk
img = ImageTk.PhotoImage(img)
return img
def create_circle(size=100, thickness=None, color='black', fill=None, antialias=4, offset=0):
"""create high quality circle
the idea to smooth circle line is to draw a bigger size circle and then resize it to the requested size
inspired from https://stackoverflow.com/a/34926008
Args:
size (tuple or list, or int): outer diameter of the circle or width of bounding box
thickness (int): outer line thickness in pixels
color (str): outer line color
fill (str): fill color, default is a transparent fill
antialias (int): used to enhance outer line quality and make it smoother
offset (int): correct cut edges of circle outline
Returns:
PIL image: a circle on a transparent image
"""
if isinstance(size, int):
size = (size, size)
else:
size = size
fill_color = color_to_rgba(fill) or '#0000'
requested_size = size
# calculate thickness to be 2% of circle diameter
thickness = thickness or max(size[0] * 2 // 100, 2)
offset = offset or thickness // 2
# make things bigger
size = [x * antialias for x in requested_size]
thickness *= antialias
# create a transparent image with a big size
img = Image.new(size=size, mode='RGBA', color='#0000')
draw = ImageDraw.Draw(img)
# draw circle with a required color
draw.ellipse([offset, offset, size[0] - offset, size[1] - offset], outline=color, fill=fill_color, width=thickness)
img = img.filter(ImageFilter.BLUR)
# resize image back to the requested size
img = img.resize(requested_size, Image.LANCZOS)
# change color again will enhance quality (weird)
if fill:
img = change_img_color(img, color, old_color=color)
img = change_img_color(img, fill, old_color=fill)
else:
img = change_img_color(img, color)
return img
def apply_gradient(img, gradient='vertical', colors=None, keep_transparency=True):
"""apply gradient color for pillow image
Args:
img: pillow image
gradient (str): vertical, horizontal, diagonal, radial
colors (iterable): 2-colors for the gradient
keep_transparency (bool): keep original transparency
"""
size = img.size
colors = colors or ['black', 'white']
color1 = color_to_rgba(colors[0])
color2 = color_to_rgba(colors[1])
# load pixels data
pixdata = img.load()
if gradient in ('horizontal', 'vertical', 'diagonal'):
for x in range(0, size[0]):
for y in range(0, size[1]):
if gradient == 'horizontal':
ratio1 = x / size[1]
elif gradient == 'vertical':
ratio1 = y / size[1]
elif gradient == 'diagonal':
ratio1 = (y + x) / size[1]
ratio2 = 1 - ratio1
r = ratio1 * color2[0] + ratio2 * color1[0]
g = ratio1 * color2[1] + ratio2 * color1[1]
b = ratio1 * color2[2] + ratio2 * color1[2]
if keep_transparency:
a = pixdata[x, y][-1]
else:
a = ratio1 * color2[3] + ratio2 * color1[3]
r, g, b, a = (int(x) for x in (r, g, b, a))
# Place the pixel
img.putpixel((x, y), (r, g, b, a))
elif gradient == 'radial': # inspired by https://stackoverflow.com/a/30669765
d = min(size)
radius = d // 2
for x in range(0, size[0]):
for y in range(0, size[1]):
# Find the distance to the center
distance_to_center = math.sqrt((x - size[0] / 2) ** 2 + (y - size[1] / 2) ** 2)
ratio1 = distance_to_center / radius
ratio2 = 1 - ratio1
r = ratio1 * color2[0] + ratio2 * color1[0]
g = ratio1 * color2[1] + ratio2 * color1[1]
b = ratio1 * color2[2] + ratio2 * color1[2]
if keep_transparency:
a = pixdata[x, y][-1]
else:
a = ratio1 * color2[3] + ratio2 * color1[3]
r, g, b, a = (int(x) for x in (r, g, b, a))
# Place the pixel
img.putpixel((x, y), (r, g, b, a))
return img
def scroll_with_mousewheel(widget, target=None, modifier='Shift', apply_to_children=False):
"""scroll a widget with mouse wheel
Args:
widget: tkinter widget
target: scrollable tkinter widget, in case you need "widget" to catch mousewheel event and make another widget
to scroll, useful for child widget in a scrollable frame
modifier (str): Modifier to use with mousewheel to scroll horizontally, default is shift key
apply_to_children (bool): bind all children
Examples:
scroll_with_mousewheel(my_text_widget, target='my_scrollable_frame')
to make a scrollable canvas:
for w in my_canvas:
scroll_with_mousewheel(w, target=my_canvas)
"""
def _scroll_with_mousewheel(widget):
target_widget = target if target else widget
def scroll_vertically(event):
# scroll vertically ----------------------------------
if event.num == 4 or event.delta > 0:
target_widget.yview_scroll(-1, "unit")
elif event.num == 5 or event.delta < 0:
target_widget.yview_scroll(1, "unit")
return 'break'
# bind events for vertical scroll ----------------------------------------------
if hasattr(target_widget, 'yview_scroll'):
# linux
widget.bind("<Button-4>", scroll_vertically, add='+')
widget.bind("<Button-5>", scroll_vertically, add='+')
# windows and mac
widget.bind("<MouseWheel>", scroll_vertically, add='+')
# scroll horizontally ---------------------------------------
def scroll_horizontally(event):
# scroll horizontally
if event.num == 4 or event.delta > 0:
target_widget.xview_scroll(-10, "unit")
elif event.num == 5 or event.delta < 0:
target_widget.xview_scroll(10, "unit")
return 'break'
# bind events for horizontal scroll ----------------------------------------------
if hasattr(target_widget, 'xview_scroll'):
# linux
widget.bind(f"<{modifier}-Button-4>", scroll_horizontally, add='+')
widget.bind(f"<{modifier}-Button-5>", scroll_horizontally, add='+')
# windows and mac
widget.bind(f"<{modifier}-MouseWheel>", scroll_horizontally, add='+')
_scroll_with_mousewheel(widget)
def handle_children(w):
for child in w.winfo_children():
_scroll_with_mousewheel(child)
# recursive call
if child.winfo_children():
handle_children(child)
if apply_to_children:
handle_children(widget)
def unbind_mousewheel(widget):
"""unbind mousewheel for a specific widget, e.g. combobox which have mouswheel scroll by default"""
# linux
widget.unbind("<Button-4>")
widget.unbind("<Button-5>")
# windows and mac
widget.unbind("<MouseWheel>")
def get_widget_attribute(widget, attr):
"""get an attribute of a widget
Args:
widget: tkinter widget "tk or ttk"
attr (str): attribute or property e.g. 'background'
Returns:
attribute value, e.g. '#ffffff' for a background color
"""
# if it is ttk based will get style applied, it will raise an error if the widget not a ttk
try:
style_name = widget.cget('style') or widget.winfo_class()
s = ttk.Style()
value = s.lookup(style_name, attr)
return value
except:
pass
try:
# if it's a tk widget will use cget
return widget.cget(attr)
except:
pass
return None
def configure_widget(widget, **kwargs):
"""configure widget's attributes"""
for k, v in kwargs.items():
# set widget attribute
try:
# treat as a "tk" widget, it will raise if widget is a "ttk"
widget.config(**{k: v})
continue
except:
pass
try:
# in case above failed, it might be a ttk widget
style_name = widget.cget('style') or widget.winfo_class()
s = ttk.Style()
s.configure(style_name, **{k: v})
except:
pass
def set_default_theme():
# select tkinter theme required for things to be right on windows,
# only 'alt', 'default', or 'classic' can work fine on windows 10
s = ttk.Style()
s.theme_use('default')
def theme_compatibility_check(print_warning=False):
"""check if current theme is compatible
Return:
bool: True or False
"""
compatible_themes = ['alt', 'default', 'classic']
s = ttk.Style()
current_theme = s.theme_use()
if current_theme not in compatible_themes:
if print_warning:
print(f'AwesomeTkinter Warning: Widgets might not work properly under current theme ({current_theme})\n'
f"compatible_themes are ['alt', 'default', 'classic']\n"
f"you can set default theme using atk.set_default_theme() or style.theme_use('default')")
return False
return True
def center_window(window, width=None, height=None, set_geometry_wh=True, reference=None):
"""center a tkinter window on screen's center and set its geometry if width and height given
Args:
window (tk.root or tk.Toplevel): a window to be centered
width (int): window's width
height (int): window's height
set_geometry_wh (bool): include width and height in geometry
reference: tk window e.g parent window as a reference
"""
# update_idletasks will cause a window to show early at the top left corner
# then change position to center in non-proffesional way
# window.update_idletasks()
if width and height:
if reference:
refx = reference.winfo_x() + reference.winfo_width() // 2
refy = reference.winfo_y() + reference.winfo_height() // 2
else:
refx = window.winfo_screenwidth() // 2
refy = window.winfo_screenheight() // 2
x = refx - width // 2
y = refy - height // 2
if set_geometry_wh:
window.geometry(f'{width}x{height}+{x}+{y}')
else:
window.geometry(f'+{x}+{y}')
else:
window.eval('tk::PlaceWindow . center')
__all__ = ['identify_operating_system', 'calc_md5', 'generate_unique_name', 'invert_color', 'rgb2hex',
'change_img_color', 'resize_img', 'mix_images', 'color_to_rgba', 'is_dark', 'calc_font_color',
'calc_contrast_color', 'text_to_image', 'create_pil_image', 'create_image', 'create_circle',
'scroll_with_mousewheel', 'unbind_mousewheel', 'get_widget_attribute', 'ImageTk', 'set_default_theme',
'theme_compatibility_check', 'configure_widget', 'center_window']
| 30.291262 | 119 | 0.595459 | import base64
import math
import platform
import tkinter as tk
from tkinter import ttk
import PIL
from PIL import Image, ImageTk, ImageColor, ImageDraw, ImageFilter
import hashlib
import io
def identify_operating_system():
return platform.system()
def calc_md5(binary_data):
return hashlib.md5(binary_data).hexdigest()
def generate_unique_name(*args):
name = ''.join([str(x) for x in args])
try:
name = calc_md5(name.encode())
except:
pass
return name
def invert_color(color):
color = color_to_rgba(color)
r, g, b, a = color
inverted_color = rgb2hex(255 - r, 255 - g, 255 - b)
return inverted_color
def rgb2hex(r, g, b):
return '#{:02x}{:02x}{:02x}'.format(r, g, b)
def change_img_color(img, new_color, old_color=None):
img = img.convert('RGBA')
pixdata = img.load()
new_color = color_to_rgba(new_color)
old_color = color_to_rgba(old_color)
for y in range(img.size[1]):
for x in range(img.size[0]):
alpha = pixdata[x, y][-1]
if old_color:
if pixdata[x, y] == old_color:
r, g, b, _ = new_color
pixdata[x, y] = (r, g, b, alpha)
else:
r, g, b, _ = new_color
pixdata[x, y] = (r, g, b, alpha)
return img
def resize_img(img, size, keep_aspect_ratio=True):
if isinstance(size, int):
size = (size, size)
width, height = img.size
requested_width = size[0]
if keep_aspect_ratio:
ratio = width / requested_width
requested_height = height / ratio
else:
requested_height = size[1]
size = (int(requested_width), int(requested_height))
img = img.resize(size, resample=PIL.Image.LANCZOS)
return img
def mix_images(background_img, foreground_img):
background_img = background_img.convert('RGBA')
foreground_img = foreground_img.convert('RGBA')
img_w, img_h = foreground_img.size
bg_w, bg_h = background_img.size
offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
background_img.paste(foreground_img, offset, mask=foreground_img)
return background_img
def color_to_rgba(color):
if color is None:
return None
if isinstance(color, (tuple, list)):
if len(color) == 3:
r, g, b = color
color = (r, g, b, 255)
return color
else:
return ImageColor.getcolor(color, 'RGBA')
def is_dark(color):
r, g, b, a = color_to_rgba(color)
lumina = (0.299 * r + 0.587 * g + 0.114 * b) / 255
return True if lumina < 0.6 else False
def calc_font_color(bg):
return 'white' if is_dark(bg) else 'black'
def calc_contrast_color(color, offset):
r, g, b, a = color_to_rgba(color)
if is_dark(color):
new_color = [x + offset if x + offset <= 255 else 255 for x in (r, g, b)]
else:
new_color = [x - offset if x - offset >= 0 else 0 for x in (r, g, b)]
return rgb2hex(*new_color)
def text_to_image(text, text_color, bg_color, size):
pass
def create_pil_image(fp=None, color=None, size=None, b64=None):
if not fp and b64:
fp = io.BytesIO(base64.b64decode(b64))
img = Image.open(fp)
if color:
img = change_img_color(img, color)
if size:
if isinstance(size, int):
size = (size, size)
img = resize_img(img, size)
return img
def create_image(fp=None, img=None, color=None, size=None, b64=None):
if not img:
img = create_pil_image(fp, color, size, b64)
img = ImageTk.PhotoImage(img)
return img
def create_circle(size=100, thickness=None, color='black', fill=None, antialias=4, offset=0):
if isinstance(size, int):
size = (size, size)
else:
size = size
fill_color = color_to_rgba(fill) or '#0000'
requested_size = size
thickness = thickness or max(size[0] * 2 // 100, 2)
offset = offset or thickness // 2
size = [x * antialias for x in requested_size]
thickness *= antialias
img = Image.new(size=size, mode='RGBA', color='#0000')
draw = ImageDraw.Draw(img)
draw.ellipse([offset, offset, size[0] - offset, size[1] - offset], outline=color, fill=fill_color, width=thickness)
img = img.filter(ImageFilter.BLUR)
img = img.resize(requested_size, Image.LANCZOS)
if fill:
img = change_img_color(img, color, old_color=color)
img = change_img_color(img, fill, old_color=fill)
else:
img = change_img_color(img, color)
return img
def apply_gradient(img, gradient='vertical', colors=None, keep_transparency=True):
size = img.size
colors = colors or ['black', 'white']
color1 = color_to_rgba(colors[0])
color2 = color_to_rgba(colors[1])
pixdata = img.load()
if gradient in ('horizontal', 'vertical', 'diagonal'):
for x in range(0, size[0]):
for y in range(0, size[1]):
if gradient == 'horizontal':
ratio1 = x / size[1]
elif gradient == 'vertical':
ratio1 = y / size[1]
elif gradient == 'diagonal':
ratio1 = (y + x) / size[1]
ratio2 = 1 - ratio1
r = ratio1 * color2[0] + ratio2 * color1[0]
g = ratio1 * color2[1] + ratio2 * color1[1]
b = ratio1 * color2[2] + ratio2 * color1[2]
if keep_transparency:
a = pixdata[x, y][-1]
else:
a = ratio1 * color2[3] + ratio2 * color1[3]
r, g, b, a = (int(x) for x in (r, g, b, a))
img.putpixel((x, y), (r, g, b, a))
elif gradient == 'radial':
d = min(size)
radius = d // 2
for x in range(0, size[0]):
for y in range(0, size[1]):
distance_to_center = math.sqrt((x - size[0] / 2) ** 2 + (y - size[1] / 2) ** 2)
ratio1 = distance_to_center / radius
ratio2 = 1 - ratio1
r = ratio1 * color2[0] + ratio2 * color1[0]
g = ratio1 * color2[1] + ratio2 * color1[1]
b = ratio1 * color2[2] + ratio2 * color1[2]
if keep_transparency:
a = pixdata[x, y][-1]
else:
a = ratio1 * color2[3] + ratio2 * color1[3]
r, g, b, a = (int(x) for x in (r, g, b, a))
img.putpixel((x, y), (r, g, b, a))
return img
def scroll_with_mousewheel(widget, target=None, modifier='Shift', apply_to_children=False):
def _scroll_with_mousewheel(widget):
target_widget = target if target else widget
def scroll_vertically(event):
if event.num == 4 or event.delta > 0:
target_widget.yview_scroll(-1, "unit")
elif event.num == 5 or event.delta < 0:
target_widget.yview_scroll(1, "unit")
return 'break'
if hasattr(target_widget, 'yview_scroll'):
widget.bind("<Button-4>", scroll_vertically, add='+')
widget.bind("<Button-5>", scroll_vertically, add='+')
widget.bind("<MouseWheel>", scroll_vertically, add='+')
def scroll_horizontally(event):
if event.num == 4 or event.delta > 0:
target_widget.xview_scroll(-10, "unit")
elif event.num == 5 or event.delta < 0:
target_widget.xview_scroll(10, "unit")
return 'break'
if hasattr(target_widget, 'xview_scroll'):
widget.bind(f"<{modifier}-Button-4>", scroll_horizontally, add='+')
widget.bind(f"<{modifier}-Button-5>", scroll_horizontally, add='+')
widget.bind(f"<{modifier}-MouseWheel>", scroll_horizontally, add='+')
_scroll_with_mousewheel(widget)
def handle_children(w):
for child in w.winfo_children():
_scroll_with_mousewheel(child)
if child.winfo_children():
handle_children(child)
if apply_to_children:
handle_children(widget)
def unbind_mousewheel(widget):
widget.unbind("<Button-4>")
widget.unbind("<Button-5>")
widget.unbind("<MouseWheel>")
def get_widget_attribute(widget, attr):
try:
style_name = widget.cget('style') or widget.winfo_class()
s = ttk.Style()
value = s.lookup(style_name, attr)
return value
except:
pass
try:
return widget.cget(attr)
except:
pass
return None
def configure_widget(widget, **kwargs):
for k, v in kwargs.items():
# set widget attribute
try:
# treat as a "tk" widget, it will raise if widget is a "ttk"
widget.config(**{k: v})
continue
except:
pass
try:
# in case above failed, it might be a ttk widget
style_name = widget.cget('style') or widget.winfo_class()
s = ttk.Style()
s.configure(style_name, **{k: v})
except:
pass
def set_default_theme():
# select tkinter theme required for things to be right on windows,
# only 'alt', 'default', or 'classic' can work fine on windows 10
s = ttk.Style()
s.theme_use('default')
def theme_compatibility_check(print_warning=False):
compatible_themes = ['alt', 'default', 'classic']
s = ttk.Style()
current_theme = s.theme_use()
if current_theme not in compatible_themes:
if print_warning:
print(f'AwesomeTkinter Warning: Widgets might not work properly under current theme ({current_theme})\n'
f"compatible_themes are ['alt', 'default', 'classic']\n"
f"you can set default theme using atk.set_default_theme() or style.theme_use('default')")
return False
return True
def center_window(window, width=None, height=None, set_geometry_wh=True, reference=None):
# update_idletasks will cause a window to show early at the top left corner
# then change position to center in non-proffesional way
# window.update_idletasks()
if width and height:
if reference:
refx = reference.winfo_x() + reference.winfo_width() // 2
refy = reference.winfo_y() + reference.winfo_height() // 2
else:
refx = window.winfo_screenwidth() // 2
refy = window.winfo_screenheight() // 2
x = refx - width // 2
y = refy - height // 2
if set_geometry_wh:
window.geometry(f'{width}x{height}+{x}+{y}')
else:
window.geometry(f'+{x}+{y}')
else:
window.eval('tk::PlaceWindow . center')
__all__ = ['identify_operating_system', 'calc_md5', 'generate_unique_name', 'invert_color', 'rgb2hex',
'change_img_color', 'resize_img', 'mix_images', 'color_to_rgba', 'is_dark', 'calc_font_color',
'calc_contrast_color', 'text_to_image', 'create_pil_image', 'create_image', 'create_circle',
'scroll_with_mousewheel', 'unbind_mousewheel', 'get_widget_attribute', 'ImageTk', 'set_default_theme',
'theme_compatibility_check', 'configure_widget', 'center_window']
| true | true |
f72cfc43e5ce6dc5144f575469244b87366239db | 16,996 | py | Python | arguments.py | wakafengfan/CPM-1-Finetune | b2c30bd94df31bcd6ee75ba90c347113563d4075 | [
"MIT"
] | 60 | 2020-12-14T01:51:49.000Z | 2021-06-14T05:54:45.000Z | arguments.py | wakafengfan/CPM-1-Finetune | b2c30bd94df31bcd6ee75ba90c347113563d4075 | [
"MIT"
] | 29 | 2020-12-16T13:04:52.000Z | 2021-06-10T12:29:11.000Z | arguments.py | wakafengfan/CPM-1-Finetune | b2c30bd94df31bcd6ee75ba90c347113563d4075 | [
"MIT"
] | 11 | 2020-12-24T07:17:39.000Z | 2021-06-11T07:37:22.000Z | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""argparser configuration"""
import argparse
import os
import torch
import deepspeed
def add_model_config_args(parser):
"""Model arguments"""
group = parser.add_argument_group('model', 'model configuration')
group.add_argument('--pretrained-bert', action='store_true',
help='use a pretrained bert-large-uncased model instead'
'of initializing from scratch. See '
'--tokenizer-model-type to specify which pretrained '
'BERT model to use')
group.add_argument('--attention-dropout', type=float, default=0.1,
help='dropout probability for attention weights')
group.add_argument('--num-attention-heads', type=int, default=16,
help='num of transformer attention heads')
group.add_argument('--hidden-size', type=int, default=1024,
help='tansformer hidden size')
group.add_argument('--intermediate-size', type=int, default=None,
help='transformer embedding dimension for FFN'
'set to 4*`--hidden-size` if it is None')
group.add_argument('--num-layers', type=int, default=24,
help='num decoder layers')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='layer norm epsilon')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='dropout probability for hidden state transformer')
group.add_argument('--max-position-embeddings', type=int, default=512,
help='maximum number of position embeddings to use')
group.add_argument('--vocab-size', type=int, default=30522,
help='vocab size to use for non-character-level '
'tokenization. This value will only be used when '
'creating a tokenizer')
group.add_argument('--deep-init', action='store_true',
help='initialize bert model similar to gpt2 model.'
'scales initialization of projection layers by a '
'factor of 1/sqrt(2N). Necessary to train bert '
'models larger than BERT-Large.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--cpu-optimizer', action='store_true',
help='Run optimizer on CPU')
group.add_argument('--cpu_torch_adam', action='store_true',
help='Use Torch Adam as optimizer on CPU.')
return parser
def add_fp16_config_args(parser):
"""Mixed precision arguments."""
group = parser.add_argument_group('fp16', 'fp16 configurations')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode')
group.add_argument('--fp32-embedding', action='store_true',
help='embedding in fp32')
group.add_argument('--fp32-layernorm', action='store_true',
help='layer norm in fp32')
group.add_argument('--fp32-tokentypes', action='store_true',
help='embedding token types in fp32')
group.add_argument('--fp32-allreduce', action='store_true',
help='all-reduce in fp32')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale')
group.add_argument('--min-scale', type=float, default=1,
help='Minimum loss scale for dynamic loss scale')
return parser
def add_training_args(parser):
"""Training arguments."""
group = parser.add_argument_group('train', 'training configurations')
group.add_argument('--do_train', action='store_true',
help="Do training")
group.add_argument('--do_eval', action='store_true',
help="Do evaluation")
group.add_argument('--zero_shot', action="store_true",
help="do zero-shot")
group.add_argument('--batch-size', type=int, default=4,
help='Data Loader batch size')
group.add_argument('--weight-decay', type=float, default=0.01,
help='weight decay coefficient for L2 regularization')
group.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activation to allow for training '
'with larger models and sequences')
group.add_argument('--checkpoint-num-layers', type=int, default=1,
help='chunk size (number of layers) for checkpointing')
group.add_argument('--deepspeed-activation-checkpointing', action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument('--clip-grad', type=float, default=1.0,
help='gradient clipping')
group.add_argument('--epoch', type=int, default=10,
help='total number of iterations to train over all training runs')
group.add_argument('--log-interval', type=int, default=100,
help='report interval')
group.add_argument('--exit-interval', type=int, default=None,
help='Exit the program after this many new iterations.')
group.add_argument('--seed', type=int, default=1234,
help='random seed')
# Batch prodecuer arguments
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token.')
# Learning rate.
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay LR over,'
' If None defaults to `--train-iters`*`--epochs`')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine', 'exponential'],
help='learning rate decay function')
group.add_argument('--lr', type=float, default=1.0e-4,
help='initial learning rate')
group.add_argument('--warmup', type=float, default=0.01,
help='percentage of data to warmup on (.01 = 1% of all '
'training iters). Default 0.01')
# model checkpointing
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=5000,
help='number of iterations between saves')
group.add_argument('--no-save-optim', action='store_true',
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true',
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Path to a directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true',
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true',
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
# distributed training args
group.add_argument('--distributed-backend', default='nccl',
help='which backend to use for distributed '
'training. One of [gloo, nccl]')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher.')
group.add_argument('--results_dir', type=str, default=None,
help='The dir to save the model.')
group.add_argument('--model_name', type=str, default="test",
help="The name you give to the model.")
# eval
group.add_argument('--eval_ckpt_path', type=str, default=None,
help='The checkpoint path used for evaluation')
return parser
def add_evaluation_args(parser):
"""Evaluation arguments."""
group = parser.add_argument_group('validation', 'validation configurations')
group.add_argument('--eval-batch-size', type=int, default=None,
help='Data Loader batch size for evaluation datasets.'
'Defaults to `--batch-size`')
group.add_argument('--eval-iters', type=int, default=100,
help='number of iterations to run for evaluation'
'validation/test for')
group.add_argument('--eval-interval', type=int, default=1000,
help='interval between running evaluation on validation set')
group.add_argument('--eval-seq-length', type=int, default=None,
help='Maximum sequence length to process for '
'evaluation. Defaults to `--seq-length`')
group.add_argument('--eval-max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use for '
'evaluation. Defaults to '
'math.ceil(`--eval-seq-length`*.15/10)*10')
group.add_argument('--overlapping-eval', type=int, default=32,
help='sliding window for overlapping eval ')
group.add_argument('--cloze-eval', action='store_true',
help='Evaluation dataset from `--valid-data` is a cloze task')
group.add_argument('--eval-hf', action='store_true',
help='perform evaluation with huggingface openai model.'
'use `--load` to specify weights path to be loaded')
group.add_argument('--load-openai', action='store_true',
help='load openai weights into our model. Use `--load` '
'to specify weights path to be loaded')
return parser
def add_text_generate_args(parser):
"""Text generate arguments."""
group = parser.add_argument_group('Text generation', 'configurations')
group.add_argument("--temperature", type=float, default=1.0)
group.add_argument("--top_p", type=float, default=0.0)
group.add_argument("--top_k", type=int, default=0)
group.add_argument("--out-seq-length", type=int, default=256)
return parser
def add_data_args(parser):
"""Train/valid/test data arguments."""
group = parser.add_argument_group('data', 'data configurations')
group.add_argument('--data_dir', type=str, required=True,
help="Training data dir")
group.add_argument('--mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument('--model-parallel-size', type=int, default=1,
help='size of the model parallel.')
group.add_argument('--shuffle', action='store_true',
help='Shuffle data. Shuffling is deterministic '
'based on seed and current epoch.')
group.add_argument('--use-npy-data-loader', action='store_true',
help='Use the numpy data loader. If set, then'
'train-data-path, val-data-path, and test-data-path'
'should also be provided.')
group.add_argument('--num-workers', type=int, default=2,
help="""Number of workers to use for dataloading""")
group.add_argument('--tokenizer-model-type', type=str,
default='bert-large-uncased',
help="Model type to use for sentencepiece tokenization \
(one of ['bpe', 'char', 'unigram', 'word']) or \
bert vocab to use for BertWordPieceTokenizer (one of \
['bert-large-uncased', 'bert-large-cased', etc.])")
group.add_argument('--tokenizer-path', type=str, default='tokenizer.model',
help='path used to save/load sentencepiece tokenization '
'models')
group.add_argument('--tokenizer-type', type=str,
default='BertWordPieceTokenizer',
choices=['CharacterLevelTokenizer',
'SentencePieceTokenizer',
'BertWordPieceTokenizer',
'GPT2BPETokenizer'],
help='what type of tokenizer to use')
group.add_argument("--cache-dir", default=None, type=str,
help="Where to store pre-trained BERT downloads")
group.add_argument('--use-tfrecords', action='store_true',
help='load `--train-data`, `--valid-data`, '
'`--test-data` from BERT tf records instead of '
'normal data pipeline')
group.add_argument('--seq-length', type=int, default=512,
help="Maximum sequence length to process")
group.add_argument('--max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use per sequence.'
'Defaults to math.ceil(`--seq-length`*.15/10)*10.'
'MUST BE SPECIFIED IF `--use-tfrecords` is True.')
return parser
def get_args():
"""Parse all the args."""
parser = argparse.ArgumentParser(description='PyTorch BERT Model')
parser = add_model_config_args(parser)
parser = add_fp16_config_args(parser)
parser = add_training_args(parser)
parser = add_evaluation_args(parser)
parser = add_text_generate_args(parser)
parser = add_data_args(parser)
# Include DeepSpeed configuration arguments
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
if not args.data_dir:
print('WARNING: No data specified')
args.cuda = torch.cuda.is_available()
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
if os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'):
# We are using (OpenMPI) mpirun for launching distributed data parallel processes
local_rank = int(os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'))
local_size = int(os.getenv('OMPI_COMM_WORLD_LOCAL_SIZE'))
# Possibly running with Slurm
num_nodes = int(os.getenv('SLURM_JOB_NUM_NODES', '1'))
nodeid = int(os.getenv('SLURM_NODEID', '0'))
args.local_rank = local_rank
args.rank = nodeid*local_size + local_rank
args.world_size = num_nodes*local_size
args.model_parallel_size = min(args.model_parallel_size, args.world_size)
if args.rank == 0:
print('using world size: {} and model-parallel size: {} '.format(
args.world_size, args.model_parallel_size))
args.dynamic_loss_scale = False
if args.loss_scale is None:
args.dynamic_loss_scale = True
if args.rank == 0:
print(' > using dynamic loss scaling')
# The args fp32_* or fp16_* meant to be active when the
# args fp16 is set. So the default behaviour should all
# be false.
if not args.fp16:
args.fp32_embedding = False
args.fp32_tokentypes = False
args.fp32_layernorm = False
return args
| 49.695906 | 89 | 0.600435 |
import argparse
import os
import torch
import deepspeed
def add_model_config_args(parser):
group = parser.add_argument_group('model', 'model configuration')
group.add_argument('--pretrained-bert', action='store_true',
help='use a pretrained bert-large-uncased model instead'
'of initializing from scratch. See '
'--tokenizer-model-type to specify which pretrained '
'BERT model to use')
group.add_argument('--attention-dropout', type=float, default=0.1,
help='dropout probability for attention weights')
group.add_argument('--num-attention-heads', type=int, default=16,
help='num of transformer attention heads')
group.add_argument('--hidden-size', type=int, default=1024,
help='tansformer hidden size')
group.add_argument('--intermediate-size', type=int, default=None,
help='transformer embedding dimension for FFN'
'set to 4*`--hidden-size` if it is None')
group.add_argument('--num-layers', type=int, default=24,
help='num decoder layers')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='layer norm epsilon')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='dropout probability for hidden state transformer')
group.add_argument('--max-position-embeddings', type=int, default=512,
help='maximum number of position embeddings to use')
group.add_argument('--vocab-size', type=int, default=30522,
help='vocab size to use for non-character-level '
'tokenization. This value will only be used when '
'creating a tokenizer')
group.add_argument('--deep-init', action='store_true',
help='initialize bert model similar to gpt2 model.'
'scales initialization of projection layers by a '
'factor of 1/sqrt(2N). Necessary to train bert '
'models larger than BERT-Large.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--cpu-optimizer', action='store_true',
help='Run optimizer on CPU')
group.add_argument('--cpu_torch_adam', action='store_true',
help='Use Torch Adam as optimizer on CPU.')
return parser
def add_fp16_config_args(parser):
group = parser.add_argument_group('fp16', 'fp16 configurations')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode')
group.add_argument('--fp32-embedding', action='store_true',
help='embedding in fp32')
group.add_argument('--fp32-layernorm', action='store_true',
help='layer norm in fp32')
group.add_argument('--fp32-tokentypes', action='store_true',
help='embedding token types in fp32')
group.add_argument('--fp32-allreduce', action='store_true',
help='all-reduce in fp32')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale')
group.add_argument('--min-scale', type=float, default=1,
help='Minimum loss scale for dynamic loss scale')
return parser
def add_training_args(parser):
group = parser.add_argument_group('train', 'training configurations')
group.add_argument('--do_train', action='store_true',
help="Do training")
group.add_argument('--do_eval', action='store_true',
help="Do evaluation")
group.add_argument('--zero_shot', action="store_true",
help="do zero-shot")
group.add_argument('--batch-size', type=int, default=4,
help='Data Loader batch size')
group.add_argument('--weight-decay', type=float, default=0.01,
help='weight decay coefficient for L2 regularization')
group.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activation to allow for training '
'with larger models and sequences')
group.add_argument('--checkpoint-num-layers', type=int, default=1,
help='chunk size (number of layers) for checkpointing')
group.add_argument('--deepspeed-activation-checkpointing', action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument('--clip-grad', type=float, default=1.0,
help='gradient clipping')
group.add_argument('--epoch', type=int, default=10,
help='total number of iterations to train over all training runs')
group.add_argument('--log-interval', type=int, default=100,
help='report interval')
group.add_argument('--exit-interval', type=int, default=None,
help='Exit the program after this many new iterations.')
group.add_argument('--seed', type=int, default=1234,
help='random seed')
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token.')
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay LR over,'
' If None defaults to `--train-iters`*`--epochs`')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine', 'exponential'],
help='learning rate decay function')
group.add_argument('--lr', type=float, default=1.0e-4,
help='initial learning rate')
group.add_argument('--warmup', type=float, default=0.01,
help='percentage of data to warmup on (.01 = 1% of all '
'training iters). Default 0.01')
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=5000,
help='number of iterations between saves')
group.add_argument('--no-save-optim', action='store_true',
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true',
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Path to a directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true',
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true',
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
group.add_argument('--distributed-backend', default='nccl',
help='which backend to use for distributed '
'training. One of [gloo, nccl]')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher.')
group.add_argument('--results_dir', type=str, default=None,
help='The dir to save the model.')
group.add_argument('--model_name', type=str, default="test",
help="The name you give to the model.")
group.add_argument('--eval_ckpt_path', type=str, default=None,
help='The checkpoint path used for evaluation')
return parser
def add_evaluation_args(parser):
group = parser.add_argument_group('validation', 'validation configurations')
group.add_argument('--eval-batch-size', type=int, default=None,
help='Data Loader batch size for evaluation datasets.'
'Defaults to `--batch-size`')
group.add_argument('--eval-iters', type=int, default=100,
help='number of iterations to run for evaluation'
'validation/test for')
group.add_argument('--eval-interval', type=int, default=1000,
help='interval between running evaluation on validation set')
group.add_argument('--eval-seq-length', type=int, default=None,
help='Maximum sequence length to process for '
'evaluation. Defaults to `--seq-length`')
group.add_argument('--eval-max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use for '
'evaluation. Defaults to '
'math.ceil(`--eval-seq-length`*.15/10)*10')
group.add_argument('--overlapping-eval', type=int, default=32,
help='sliding window for overlapping eval ')
group.add_argument('--cloze-eval', action='store_true',
help='Evaluation dataset from `--valid-data` is a cloze task')
group.add_argument('--eval-hf', action='store_true',
help='perform evaluation with huggingface openai model.'
'use `--load` to specify weights path to be loaded')
group.add_argument('--load-openai', action='store_true',
help='load openai weights into our model. Use `--load` '
'to specify weights path to be loaded')
return parser
def add_text_generate_args(parser):
group = parser.add_argument_group('Text generation', 'configurations')
group.add_argument("--temperature", type=float, default=1.0)
group.add_argument("--top_p", type=float, default=0.0)
group.add_argument("--top_k", type=int, default=0)
group.add_argument("--out-seq-length", type=int, default=256)
return parser
def add_data_args(parser):
group = parser.add_argument_group('data', 'data configurations')
group.add_argument('--data_dir', type=str, required=True,
help="Training data dir")
group.add_argument('--mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument('--model-parallel-size', type=int, default=1,
help='size of the model parallel.')
group.add_argument('--shuffle', action='store_true',
help='Shuffle data. Shuffling is deterministic '
'based on seed and current epoch.')
group.add_argument('--use-npy-data-loader', action='store_true',
help='Use the numpy data loader. If set, then'
'train-data-path, val-data-path, and test-data-path'
'should also be provided.')
group.add_argument('--num-workers', type=int, default=2,
help="""Number of workers to use for dataloading""")
group.add_argument('--tokenizer-model-type', type=str,
default='bert-large-uncased',
help="Model type to use for sentencepiece tokenization \
(one of ['bpe', 'char', 'unigram', 'word']) or \
bert vocab to use for BertWordPieceTokenizer (one of \
['bert-large-uncased', 'bert-large-cased', etc.])")
group.add_argument('--tokenizer-path', type=str, default='tokenizer.model',
help='path used to save/load sentencepiece tokenization '
'models')
group.add_argument('--tokenizer-type', type=str,
default='BertWordPieceTokenizer',
choices=['CharacterLevelTokenizer',
'SentencePieceTokenizer',
'BertWordPieceTokenizer',
'GPT2BPETokenizer'],
help='what type of tokenizer to use')
group.add_argument("--cache-dir", default=None, type=str,
help="Where to store pre-trained BERT downloads")
group.add_argument('--use-tfrecords', action='store_true',
help='load `--train-data`, `--valid-data`, '
'`--test-data` from BERT tf records instead of '
'normal data pipeline')
group.add_argument('--seq-length', type=int, default=512,
help="Maximum sequence length to process")
group.add_argument('--max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use per sequence.'
'Defaults to math.ceil(`--seq-length`*.15/10)*10.'
'MUST BE SPECIFIED IF `--use-tfrecords` is True.')
return parser
def get_args():
parser = argparse.ArgumentParser(description='PyTorch BERT Model')
parser = add_model_config_args(parser)
parser = add_fp16_config_args(parser)
parser = add_training_args(parser)
parser = add_evaluation_args(parser)
parser = add_text_generate_args(parser)
parser = add_data_args(parser)
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
if not args.data_dir:
print('WARNING: No data specified')
args.cuda = torch.cuda.is_available()
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
if os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'):
local_rank = int(os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'))
local_size = int(os.getenv('OMPI_COMM_WORLD_LOCAL_SIZE'))
num_nodes = int(os.getenv('SLURM_JOB_NUM_NODES', '1'))
nodeid = int(os.getenv('SLURM_NODEID', '0'))
args.local_rank = local_rank
args.rank = nodeid*local_size + local_rank
args.world_size = num_nodes*local_size
args.model_parallel_size = min(args.model_parallel_size, args.world_size)
if args.rank == 0:
print('using world size: {} and model-parallel size: {} '.format(
args.world_size, args.model_parallel_size))
args.dynamic_loss_scale = False
if args.loss_scale is None:
args.dynamic_loss_scale = True
if args.rank == 0:
print(' > using dynamic loss scaling')
if not args.fp16:
args.fp32_embedding = False
args.fp32_tokentypes = False
args.fp32_layernorm = False
return args
| true | true |
f72cfd482c15f282554b38514a3e096adee885e0 | 22,294 | py | Python | configs/video_detect.py | me714/Dwin_Transformer | 825a63869c46db4ef83ccc31d479bbd971ffd47c | [
"Apache-2.0"
] | null | null | null | configs/video_detect.py | me714/Dwin_Transformer | 825a63869c46db4ef83ccc31d479bbd971ffd47c | [
"Apache-2.0"
] | null | null | null | configs/video_detect.py | me714/Dwin_Transformer | 825a63869c46db4ef83ccc31d479bbd971ffd47c | [
"Apache-2.0"
] | 1 | 2022-03-15T06:21:57.000Z | 2022-03-15T06:21:57.000Z | import argparse
import math
import os
import shutil
import time
import numpy as np
from pathlib import Path
from ensemble_boxes import *
import copy
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
import matplotlib.pyplot as plt
from itertools import combinations
import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords,
xyxy2xywh, xywh2xyxy, plot_one_box, strip_optimizer, set_logging)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from mmdet.apis import init_detector, inference_detector
fcap = cv2.VideoCapture('/root/Swin-Transformer-Object-Detection/demo/VID_20210909_164000.mp4')
data_root = '/root/Swin-Transformer-Object-Detection/'
config_file = data_root + 'configs/swin.py'
checkpoint_file = data_root + '2021_7_28/epoch_50.pth'
# build the model from a config file and a checkpoint file
swin_model = init_detector(config_file, checkpoint_file, device='cuda:0')
framerate = 10
def get_image(fcap, framerate):
c = 1
while True:
ret, frame = fcap.read()
if ret:
if (c % framerate == 0):
cv2.imwrite(data_root + 'demo/video_frame/' + str(c) + '.jpg', frame)
c += 1
cv2.waitKey(0)
else:
print('the task is end')
break
fcap.release()
def filterbox_iou(rec1, rec2):
"""
computing IoU
:param rec1: (y0, x0, y1, x1), which reflects
(top, left, bottom, right)
:param rec2: (y0, x0, y1, x1)
:return: scala value of IoU
"""
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
return (intersect / (sum_area - intersect)) * 1.0
def detect(save_img=False):
out, source, weights, view_img, save_txt, imgsz = \
opt.save_dir, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')
# Initialize
set_logging()
device = select_device(opt.device)
if os.path.exists(out): # output dir
shutil.rmtree(out) # delete dir
os.makedirs(out) # make new dir
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
modelc.to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
f_detect = 0
counting_img = 0
full_detect = 0
full_truth = 0
img_dict = {}
frame_key = 0
dict2 = {}
for path, img, im0s, vid_cap in dataset:
img_before = img
img = torch.from_numpy(img).to(device)
# img_before = img
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
nms_pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=1,
agnostic=opt.agnostic_nms)
# nms_pred = cross_class_nms(nms_pred, opt.conf_thres, 0.9, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Process detections
for i, det in enumerate(nms_pred): # detections per image
print(det)
dict1 = {'total': 0}
if webcam: # batch_size >= 1
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
else:
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name)
txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
swin_img = cv2.imread(p)
result = inference_detector(swin_model, swin_img)
swin_bbox_list, swin_score_list, swin_label_list = swin_model.show_result(swin_img, result,
out_file=save_path)
yolo_bbox_list = det[:, 0:4].cpu().detach().numpy().tolist()
yolo_score_list = det[:, 4].cpu().detach().numpy().tolist()
yolo_label_list = det[:, 5].cpu().detach().numpy().tolist()
swin_list = ['txd', 'jgc', 'xbs', 'wbs', 'c-pg', 'lwz', 'tc', 'a-pg', 'b-pg', 'g-pg', 'z-pg', 'bbt', 'lxb',
'xgg', 'lsd', 'wt']
yolo_list = ['wt', 'jgc', 'lsd', 'lxb', 'bbt', 'xgg', 'txd', 'lwz', 'tc', 'xbs', 'wbs', 'a-pg', 'b-pg',
'c-pg', 'g-pg', 'z-pg']
swin_trueLabel_list = []
for i in swin_label_list:
swin_trueLabel_list.append(yolo_list.index(swin_list[i]))
# NMS for different class, high thresh
# nms_bbox, nms_score, nms_label = yolo_bbox_list, yolo_score_list, yolo_label_list
# nms_bbox, nms_score, nms_label = torch.from_numpy(np.array(nms_bbox)).reshape(-1, 4), torch.from_numpy(
# np.array(nms_score)).reshape(-1, 1), torch.from_numpy(np.array(nms_label)).reshape(-1, 1)
# two_det = torch.cat((torch.cat((nms_bbox, nms_score), 1), nms_label), 1)
# normalize
# 需要将框进行归一化操作
# for i, single in enumerate(swin_bbox_list):
# swin_bbox_list[i] = [single[0] / 640, single[1] / 480, single[2] / 640, single[3] / 480]
#
# for i, single in enumerate(yolo_bbox_list):
# yolo_bbox_list[i] = [single[0] / 640, single[1] / 480, single[2] / 640, single[3] / 480]
swin_object = [0, 1, 2, 3, 6, 7, 8, 9, 10] # from yolo_list:wt lsd lwz tc xbs wbs
# yolo_list = ['0wt', 'jgc', '2lsd', 'lxb', '4bbt', 'xgg', '6txd', 'lwz', '8tc', 'xbs', '10wbs', 'a-pg', '12b-pg',
# 'c-pg', '14g-pg', 'z-pg']
yolo_label_list_copy = yolo_label_list.copy()
swin_trueLabel_list_copy = swin_trueLabel_list.copy()
for i in yolo_label_list_copy:
if i in swin_object:
index1 = yolo_label_list.index(i)
del yolo_bbox_list[index1]
del yolo_score_list[index1]
del yolo_label_list[index1]
# label_filter = [4, 5, 11, 12, 13, 14, 15]
# filer_box = {}
# filter_list = []
# filter_label_list = []
# for i in range(len(yolo_label_list)):
# if yolo_label_list_copy[i] in label_filter:
# filter_list.append(i)
# filter_label_list.append(yolo_label_list_copy[i])
# yolo_bbox_list_copy = yolo_bbox_list
# yolo_score_list_copy = yolo_score_list
#
#
# for pair in combinations(filter_list, 2):
# box1 = yolo_bbox_list_copy[pair[0]]
# box2 = yolo_bbox_list_copy[pair[1]]
# b_iou = filterbox_iou(box1, box2)
# if b_iou >= 0.9:
# if box1 in yolo_bbox_list and box2 in yolo_bbox_list:
# index_0 = yolo_bbox_list.index(box1)
# index_1 = yolo_bbox_list.index(box2)
# index = index_0 if yolo_score_list[pair[0]] <= yolo_score_list[pair[1]] else index_1
# del yolo_bbox_list[index]
# del yolo_score_list[index]
# del yolo_label_list[index]
for i in swin_trueLabel_list_copy:
if i not in swin_object:
index2 = swin_trueLabel_list.index(i)
del swin_bbox_list[index2]
del swin_score_list[index2]
del swin_trueLabel_list[index2]
two_bbox, two_score, two_label = copy.deepcopy(swin_bbox_list), copy.deepcopy(swin_score_list), copy.deepcopy(swin_trueLabel_list)
for i in range(len(yolo_bbox_list)):
two_bbox.append(yolo_bbox_list[i])
two_score.append(yolo_score_list[i])
two_label.append(yolo_label_list[i])
two_bbox, two_score, two_label = torch.from_numpy(np.array(two_bbox)).reshape(-1, 4), torch.from_numpy(
np.array(two_score)).reshape(-1, 1), torch.from_numpy(np.array(two_label)).reshape(-1, 1)
yolo_bbox_list, yolo_score_list, yolo_label_list = torch.from_numpy(np.array(yolo_bbox_list)).reshape(-1,
4), torch.from_numpy(
np.array(yolo_score_list)).reshape(-1, 1), torch.from_numpy(np.array(yolo_label_list)).reshape(-1, 1)
swin_bbox_list, swin_score_list, swin_trueLabel_list = torch.from_numpy(np.array(swin_bbox_list)).reshape(
-1,
4), torch.from_numpy(
np.array(swin_score_list)).reshape(-1, 1), torch.from_numpy(np.array(swin_trueLabel_list)).reshape(-1,
1)
# det = torch.cat((torch.cat((swin_bbox_list, swin_score_list), 1), swin_trueLabel_list), 1) # only show swin_model inference result
# det = torch.cat((torch.cat((yolo_bbox_list, yolo_score_list), 1), yolo_label_list),1) # only show yolo_model inference result
det = torch.cat((torch.cat((two_bbox, two_score), 1), two_label), 1) # show two_model inference result
# bbox_list = [swin_bbox_list, yolo_bbox_list]
# score_list = [swin_score_list, yolo_score_list]
# label_list = [swin_trueLabel_list, yolo_label_list]
#
# wbf_weight = [1, 1]
# iou_thr = 0.55
# skip_box_thr = 0.0001
#
# boxes, scores, labels = weighted_boxes_fusion(bbox_list, score_list, label_list, weights=wbf_weight,
# iou_thr=iou_thr, skip_box_thr=skip_box_thr)
# for in_file in boxes:
# in_file[0], in_file[1], in_file[2], in_file[3] = int(in_file[0] * 640), int(in_file[1] * 480), int(
# in_file[2] * 640), int(in_file[3] * 480)
# boxes, scores, labels = boxes.reshape(-1, 4), scores.reshape(-1, 1), labels.reshape(-1, 1)
# boxes, scores, labels = torch.from_numpy(boxes), torch.from_numpy(scores), torch.from_numpy(labels)
# det2model = torch.cat((torch.cat((boxes, scores), 1), labels), 1)
# det = det2model
if det is not None and len(det):
numers = len(det)
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, names[int(c)]) # add to string
# Write results 包围框、置信度、种类
for *xyxy, conf, cls in reversed(det):
if dict1.__contains__(cls):
dict1[cls] = dict1[cls] + 1
dict1['total'] = dict1['total'] + 1
else:
dict1[cls] = 0
dict1['total'] = dict1['total'] + 1
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, conf, *xywh) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line) + '\n') % line)
if save_img or view_img: # Add bbox to image
label = '%s %.2f' % (names[int(cls)], conf)
img1 = im0.copy()
# if cv2.waitKey(1)==32:
# count = 0
# for filename in os.listdir('new_image/'):
# if filename.endswith('.jpg'):
# count += 1
# # print(count)
# print(f"保存第{count + 1}张图片")
# # 保存图像,保存到上一层的imgs文件夹内,以1、2、3、4...为文件名保存图像
# cv2.imwrite('new_image/{}.jpg'.format(count + 1), img1)
# plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=0.5) # 线的粗细
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=2) # 线的粗细
# print(f"\n{names[int(cls)]}的包围框坐标是{int(xyxy[0]),int(xyxy[1]),int(xyxy[2]),int(xyxy[3])}")
# print(f"\n{names[int(cls)]}的中心坐标是{(int(xyxy[0])+int(xyxy[2]))/2, (int(xyxy[1])+int(xyxy[3]))/2}")
# Print time (inference + NMS)
# print('%sDone. (%.3fs)' % (s, t2 - t1))
print(f"{s}")
print(f"s")
# 打印坐标、种类
# print('%s' % (names[int(cls)]))
# Stream results
# view_img = True
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == 'images':
txt = f".numers={len(det)}"
cv2.putText(im0, txt,
(50, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, (34, 157, 255), 2)
cv2.imwrite(save_path, im0)
else:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
im_after = im0
img_dict[frame_key] = dict1
frame_key += 1
detected = len(det)
img_category = save_path.split('/')[-1].split('_')[0]
if img_category == 'body':
true = 17
elif img_category =='op':
true = 12
else:
true = 29
root_path = '/root/results/'
if detected == true:
plt.figure()
plt.subplot(1, 3, 1)
plt.title('original image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(img_before.transpose(1, 2, 0))
plt.subplot(1, 3, 2)
plt.title('detected image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(im_after)
plt.text(700, 300, f"Original:{true}", size=10)
plt.text(700, 100, f"Detected:{detected}", size=10)
# plt.text(700, 100, f"Average confidence:{conf}%")
plt.savefig(root_path + f'{img_category}_{counting_img}.jpg', bbox_inches='tight', pad_inches=0.1,
dpi=800)
counting_img += 1
full_detect += detected
full_truth += true
elif detected != true and f_detect <= 7 and random.uniform(0, 1) > 0.65:
plt.figure()
plt.subplot(1, 3, 1)
plt.title(f'original image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(img_before.transpose(1, 2, 0))
plt.subplot(1, 3, 2)
plt.title(f'detected image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(im_after)
plt.text(700, 300, f"Original:{true}", size=10)
plt.text(700, 100, f"Detected:{detected}", size=10)
plt.savefig(root_path + f'{img_category}_{counting_img}.jpg', bbox_inches='tight', pad_inches=0.1,
dpi=800)
counting_img += 1
f_detect+=1
full_detect += detected
full_truth += true
else:
# print('wrong-------', save_path)
pass
# plt.show()
# plt.figure()
# plt.axis([0, 640, 0, 480])
# plt.text(700, 300, f"Origina:{count_acc}%")
# plt.text(700, 200, f"Detected:{classify_acc}%")
# plt.text(700, 100, f"Average confidence:{conf}%")
# break
if save_txt or save_img:
print('Results saved to %s' % Path(out))
full_time = time.time() - t0
print('Done. (%.3fs)' % full_time)
merege = math.ceil(full_detect/frame_key)
for i in img_dict:
if img_dict[i]['total'] == merege:
dict2 = img_dict[i]
plt.figure()
plt.xticks([])
plt.yticks([])
plt.axis([0, 640, 0, 680])
plt.text(50, 620, f"Calming detection report:{dict2}", color='blue', size=5)
plt.text(50, 520, f"Calming detection report", color='blue', size=10)
plt.text(50, 420, f"the detect: {merege}", color='blue', size=10)
plt.text(50, 320, f"All equipment Detected: {full_detect}", size=10)
plt.text(50, 220, f"All equipment manually counted: {full_truth}", size=10)
plt.text(50, 120, f"Counting Accuracy: %.2f" % (full_detect*100/full_truth) + '%', size=10)
plt.text(50, 40, f"Average time: %.2f" % (full_time/counting_img) + " s", size=10)
print('dfddddddddddddddddddddddddddddddddddddddddd')
plt.savefig('/root/Downloads/report.jpg')
if __name__ == '__main__':
get_image(fcap,framerate)
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='super_yolo.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='/root/Swin-Transformer-Object-Detection/demo/video_frame', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.85, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-dir', type=str, default='/root/Calming_final_test/results', help='directory to save results')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
opt = parser.parse_args()
print(opt)
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
strip_optimizer(opt.weights)
else:
detect()
| 44.322068 | 157 | 0.540056 | import argparse
import math
import os
import shutil
import time
import numpy as np
from pathlib import Path
from ensemble_boxes import *
import copy
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
import matplotlib.pyplot as plt
from itertools import combinations
import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords,
xyxy2xywh, xywh2xyxy, plot_one_box, strip_optimizer, set_logging)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from mmdet.apis import init_detector, inference_detector
fcap = cv2.VideoCapture('/root/Swin-Transformer-Object-Detection/demo/VID_20210909_164000.mp4')
data_root = '/root/Swin-Transformer-Object-Detection/'
config_file = data_root + 'configs/swin.py'
checkpoint_file = data_root + '2021_7_28/epoch_50.pth'
swin_model = init_detector(config_file, checkpoint_file, device='cuda:0')
framerate = 10
def get_image(fcap, framerate):
c = 1
while True:
ret, frame = fcap.read()
if ret:
if (c % framerate == 0):
cv2.imwrite(data_root + 'demo/video_frame/' + str(c) + '.jpg', frame)
c += 1
cv2.waitKey(0)
else:
print('the task is end')
break
fcap.release()
def filterbox_iou(rec1, rec2):
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
sum_area = S_rec1 + S_rec2
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
if left_line >= right_line or top_line >= bottom_line:
return 0
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
return (intersect / (sum_area - intersect)) * 1.0
def detect(save_img=False):
out, source, weights, view_img, save_txt, imgsz = \
opt.save_dir, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
webcam = source.isnumeric() or source.startswith(('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')
set_logging()
device = select_device(opt.device)
if os.path.exists(out):
shutil.rmtree(out)
os.makedirs(out)
half = device.type != 'cpu'
model = attempt_load(weights, map_location=device)
imgsz = check_img_size(imgsz, s=model.stride.max())
if half:
model.half()
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2)
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model'])
modelc.to(device).eval()
vid_path, vid_writer = None, None
if webcam:
view_img = True
cudnn.benchmark = True
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device)
_ = model(img.half() if half else img) if device.type != 'cpu' else None
f_detect = 0
counting_img = 0
full_detect = 0
full_truth = 0
img_dict = {}
frame_key = 0
dict2 = {}
for path, img, im0s, vid_cap in dataset:
img_before = img
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float()
img /= 255.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
nms_pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=1,
agnostic=opt.agnostic_nms)
t2 = time_synchronized()
for i, det in enumerate(nms_pred):
print(det)
dict1 = {'total': 0}
if webcam:
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
else:
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name)
txt_path = str(Path(out) / Path(p).stem) + ('_%g' % dataset.frame if dataset.mode == 'video' else '')
s += '%gx%g ' % img.shape[2:]
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]
swin_img = cv2.imread(p)
result = inference_detector(swin_model, swin_img)
swin_bbox_list, swin_score_list, swin_label_list = swin_model.show_result(swin_img, result,
out_file=save_path)
yolo_bbox_list = det[:, 0:4].cpu().detach().numpy().tolist()
yolo_score_list = det[:, 4].cpu().detach().numpy().tolist()
yolo_label_list = det[:, 5].cpu().detach().numpy().tolist()
swin_list = ['txd', 'jgc', 'xbs', 'wbs', 'c-pg', 'lwz', 'tc', 'a-pg', 'b-pg', 'g-pg', 'z-pg', 'bbt', 'lxb',
'xgg', 'lsd', 'wt']
yolo_list = ['wt', 'jgc', 'lsd', 'lxb', 'bbt', 'xgg', 'txd', 'lwz', 'tc', 'xbs', 'wbs', 'a-pg', 'b-pg',
'c-pg', 'g-pg', 'z-pg']
swin_trueLabel_list = []
for i in swin_label_list:
swin_trueLabel_list.append(yolo_list.index(swin_list[i]))
swin_object = [0, 1, 2, 3, 6, 7, 8, 9, 10]
yolo_label_list_copy = yolo_label_list.copy()
swin_trueLabel_list_copy = swin_trueLabel_list.copy()
for i in yolo_label_list_copy:
if i in swin_object:
index1 = yolo_label_list.index(i)
del yolo_bbox_list[index1]
del yolo_score_list[index1]
del yolo_label_list[index1]
for i in swin_trueLabel_list_copy:
if i not in swin_object:
index2 = swin_trueLabel_list.index(i)
del swin_bbox_list[index2]
del swin_score_list[index2]
del swin_trueLabel_list[index2]
two_bbox, two_score, two_label = copy.deepcopy(swin_bbox_list), copy.deepcopy(swin_score_list), copy.deepcopy(swin_trueLabel_list)
for i in range(len(yolo_bbox_list)):
two_bbox.append(yolo_bbox_list[i])
two_score.append(yolo_score_list[i])
two_label.append(yolo_label_list[i])
two_bbox, two_score, two_label = torch.from_numpy(np.array(two_bbox)).reshape(-1, 4), torch.from_numpy(
np.array(two_score)).reshape(-1, 1), torch.from_numpy(np.array(two_label)).reshape(-1, 1)
yolo_bbox_list, yolo_score_list, yolo_label_list = torch.from_numpy(np.array(yolo_bbox_list)).reshape(-1,
4), torch.from_numpy(
np.array(yolo_score_list)).reshape(-1, 1), torch.from_numpy(np.array(yolo_label_list)).reshape(-1, 1)
swin_bbox_list, swin_score_list, swin_trueLabel_list = torch.from_numpy(np.array(swin_bbox_list)).reshape(
-1,
4), torch.from_numpy(
np.array(swin_score_list)).reshape(-1, 1), torch.from_numpy(np.array(swin_trueLabel_list)).reshape(-1,
1)
, two_label), 1)
if det is not None and len(det):
numers = len(det)
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum()
s += '%g %ss, ' % (n, names[int(c)])
for *xyxy, conf, cls in reversed(det):
if dict1.__contains__(cls):
dict1[cls] = dict1[cls] + 1
dict1['total'] = dict1['total'] + 1
else:
dict1[cls] = 0
dict1['total'] = dict1['total'] + 1
if save_txt:
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
line = (cls, conf, *xywh) if opt.save_conf else (cls, *xywh)
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line) + '\n') % line)
if save_img or view_img:
label = '%s %.2f' % (names[int(cls)], conf)
img1 = im0.copy()
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=2)
print(f"{s}")
print(f"s")
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'):
raise StopIteration
if save_img:
if dataset.mode == 'images':
txt = f".numers={len(det)}"
cv2.putText(im0, txt,
(50, 100),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, (34, 157, 255), 2)
cv2.imwrite(save_path, im0)
else:
if vid_path != save_path:
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release()
fourcc = 'mp4v'
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(im0)
im_after = im0
img_dict[frame_key] = dict1
frame_key += 1
detected = len(det)
img_category = save_path.split('/')[-1].split('_')[0]
if img_category == 'body':
true = 17
elif img_category =='op':
true = 12
else:
true = 29
root_path = '/root/results/'
if detected == true:
plt.figure()
plt.subplot(1, 3, 1)
plt.title('original image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(img_before.transpose(1, 2, 0))
plt.subplot(1, 3, 2)
plt.title('detected image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(im_after)
plt.text(700, 300, f"Original:{true}", size=10)
plt.text(700, 100, f"Detected:{detected}", size=10)
plt.savefig(root_path + f'{img_category}_{counting_img}.jpg', bbox_inches='tight', pad_inches=0.1,
dpi=800)
counting_img += 1
full_detect += detected
full_truth += true
elif detected != true and f_detect <= 7 and random.uniform(0, 1) > 0.65:
plt.figure()
plt.subplot(1, 3, 1)
plt.title(f'original image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(img_before.transpose(1, 2, 0))
plt.subplot(1, 3, 2)
plt.title(f'detected image', size=10)
plt.axis([0, 640, 0, 480])
plt.xticks([])
plt.yticks([])
plt.imshow(im_after)
plt.text(700, 300, f"Original:{true}", size=10)
plt.text(700, 100, f"Detected:{detected}", size=10)
plt.savefig(root_path + f'{img_category}_{counting_img}.jpg', bbox_inches='tight', pad_inches=0.1,
dpi=800)
counting_img += 1
f_detect+=1
full_detect += detected
full_truth += true
else:
pass
if save_txt or save_img:
print('Results saved to %s' % Path(out))
full_time = time.time() - t0
print('Done. (%.3fs)' % full_time)
merege = math.ceil(full_detect/frame_key)
for i in img_dict:
if img_dict[i]['total'] == merege:
dict2 = img_dict[i]
plt.figure()
plt.xticks([])
plt.yticks([])
plt.axis([0, 640, 0, 680])
plt.text(50, 620, f"Calming detection report:{dict2}", color='blue', size=5)
plt.text(50, 520, f"Calming detection report", color='blue', size=10)
plt.text(50, 420, f"the detect: {merege}", color='blue', size=10)
plt.text(50, 320, f"All equipment Detected: {full_detect}", size=10)
plt.text(50, 220, f"All equipment manually counted: {full_truth}", size=10)
plt.text(50, 120, f"Counting Accuracy: %.2f" % (full_detect*100/full_truth) + '%', size=10)
plt.text(50, 40, f"Average time: %.2f" % (full_time/counting_img) + " s", size=10)
print('dfddddddddddddddddddddddddddddddddddddddddd')
plt.savefig('/root/Downloads/report.jpg')
if __name__ == '__main__':
get_image(fcap,framerate)
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='super_yolo.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='/root/Swin-Transformer-Object-Detection/demo/video_frame', help='source')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.85, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-dir', type=str, default='/root/Calming_final_test/results', help='directory to save results')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
opt = parser.parse_args()
print(opt)
with torch.no_grad():
if opt.update:
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
strip_optimizer(opt.weights)
else:
detect()
| true | true |
f72cfd7e4073d6621ae92411769b73ecd011c187 | 3,768 | py | Python | api/vk_methods.py | greenjew/deeploma | 499de7ad844546acf0760aa00096d66216fd3ee9 | [
"MIT"
] | null | null | null | api/vk_methods.py | greenjew/deeploma | 499de7ad844546acf0760aa00096d66216fd3ee9 | [
"MIT"
] | null | null | null | api/vk_methods.py | greenjew/deeploma | 499de7ad844546acf0760aa00096d66216fd3ee9 | [
"MIT"
] | 1 | 2020-07-08T16:26:18.000Z | 2020-07-08T16:26:18.000Z | import requests as r
import pandas as pd
import time
from datetime import datetime
import re
TOKEN_VK = '23acc95023acc95023acc9504023c092a1223ac23acc9507ef4dc240205bcafea27244d' # vk service token
version = 5.101
def get_members(group_id):
try_count = 0
while try_count < 2:
try:
response = r.get('https://api.vk.com/method/groups.getById',
params={
'v': version,
'access_token': TOKEN_VK,
'group_ids': group_id,
'fields': 'members_count'
})
return response.json()['response'][0]['members_count']
except:
try_count += 1
time.sleep(0.06)
def cleanText(raw_text):
cleanr = re.compile('<.*?>|(\[.*?\|)|\]')
cleantext = re.sub(cleanr, '', raw_text)
return cleantext
def load_from_vk(group_id, date_from, date_to):
headers = ['group_name', 'members', 'post_date', 'post_link', 'text', 'views', 'likes', 'reposts', 'comments']
posts_in_group = []
offset = 0
members = get_members(group_id)
date_ok = True
last_try = 0
# Выгружаем посты на стенке, пока не выйдем за "левую" дату
while date_ok or last_try <= 1:
res = r.get('https://api.vk.com/method/wall.get',
params={
'v': version,
'access_token': TOKEN_VK,
'domain': group_id,
'offset': offset,
'count': '100',
'extended': '1',
'fields': 'name'
})
try:
response = res.json()['response']
except:
if res.json()['error']['error_code'] != 0:
raise Exception(group_id, 'channel_not_found')
if response['count'] == 0: # если в выгрузке пусто, переходим к следующей группе
date_ok = False
last_try = 2
continue
# считаем посты удовлетворяющие условию по датам
all_posts = response['items']
group_name = response['groups'][0]['name']
if all(datetime.fromtimestamp(post['date']).date() < date_from
for post in all_posts):
date_ok = False
last_try += 1
else:
for post in all_posts:
post_info = []
post_date = datetime.fromtimestamp(post['date'])
if date_from < post_date.date() < date_to:
print(post_date)
post_link = 'https://vk.com/wall' + str(post['owner_id']) + '_' + str(post['id'])
post_text = cleanText(post['text'])
post_info.append((group_name, members, post_date, post_link, post_text,
post['views']['count'], post['likes']['count'], post['reposts']['count'],
post['comments']['count']))
posts_in_group.extend(post_info)
offset += len(all_posts)
time.sleep(0.06)
posts_data = pd.DataFrame(posts_in_group, columns=headers)
mean_ = int(posts_data.groupby(posts_data['post_date'].dt.to_period('d')).mean()['views'].mean())
std_ = int(posts_data.groupby(posts_data['post_date'].dt.to_period('d')).std()['views'].mean())
def three_sigma_anomaly(views):
ano_cut_off = 3 * std_
upper_cut = mean_ + ano_cut_off
if views > upper_cut:
return 'Да'
else:
return 'Нет'
anomalies = posts_data.views.apply(three_sigma_anomaly)
posts_data['is_anomaly'] = anomalies
return posts_data | 35.885714 | 114 | 0.522293 | import requests as r
import pandas as pd
import time
from datetime import datetime
import re
TOKEN_VK = '23acc95023acc95023acc9504023c092a1223ac23acc9507ef4dc240205bcafea27244d'
version = 5.101
def get_members(group_id):
try_count = 0
while try_count < 2:
try:
response = r.get('https://api.vk.com/method/groups.getById',
params={
'v': version,
'access_token': TOKEN_VK,
'group_ids': group_id,
'fields': 'members_count'
})
return response.json()['response'][0]['members_count']
except:
try_count += 1
time.sleep(0.06)
def cleanText(raw_text):
cleanr = re.compile('<.*?>|(\[.*?\|)|\]')
cleantext = re.sub(cleanr, '', raw_text)
return cleantext
def load_from_vk(group_id, date_from, date_to):
headers = ['group_name', 'members', 'post_date', 'post_link', 'text', 'views', 'likes', 'reposts', 'comments']
posts_in_group = []
offset = 0
members = get_members(group_id)
date_ok = True
last_try = 0
while date_ok or last_try <= 1:
res = r.get('https://api.vk.com/method/wall.get',
params={
'v': version,
'access_token': TOKEN_VK,
'domain': group_id,
'offset': offset,
'count': '100',
'extended': '1',
'fields': 'name'
})
try:
response = res.json()['response']
except:
if res.json()['error']['error_code'] != 0:
raise Exception(group_id, 'channel_not_found')
if response['count'] == 0:
date_ok = False
last_try = 2
continue
all_posts = response['items']
group_name = response['groups'][0]['name']
if all(datetime.fromtimestamp(post['date']).date() < date_from
for post in all_posts):
date_ok = False
last_try += 1
else:
for post in all_posts:
post_info = []
post_date = datetime.fromtimestamp(post['date'])
if date_from < post_date.date() < date_to:
print(post_date)
post_link = 'https://vk.com/wall' + str(post['owner_id']) + '_' + str(post['id'])
post_text = cleanText(post['text'])
post_info.append((group_name, members, post_date, post_link, post_text,
post['views']['count'], post['likes']['count'], post['reposts']['count'],
post['comments']['count']))
posts_in_group.extend(post_info)
offset += len(all_posts)
time.sleep(0.06)
posts_data = pd.DataFrame(posts_in_group, columns=headers)
mean_ = int(posts_data.groupby(posts_data['post_date'].dt.to_period('d')).mean()['views'].mean())
std_ = int(posts_data.groupby(posts_data['post_date'].dt.to_period('d')).std()['views'].mean())
def three_sigma_anomaly(views):
ano_cut_off = 3 * std_
upper_cut = mean_ + ano_cut_off
if views > upper_cut:
return 'Да'
else:
return 'Нет'
anomalies = posts_data.views.apply(three_sigma_anomaly)
posts_data['is_anomaly'] = anomalies
return posts_data | true | true |
f72cfe472a8204ac2f26dd570050027f127c9500 | 956 | py | Python | examples/OGLE-BLG-ECL-234840/plot_v8.py | NewCPM/MCPM | 9fb9b7725ccc4452701be47d103ab61f81b4595b | [
"MIT"
] | 2 | 2018-04-10T22:35:11.000Z | 2018-05-16T21:00:40.000Z | examples/OGLE-BLG-ECL-234840/plot_v8.py | CPM-project/MCPM | 9fb9b7725ccc4452701be47d103ab61f81b4595b | [
"MIT"
] | null | null | null | examples/OGLE-BLG-ECL-234840/plot_v8.py | CPM-project/MCPM | 9fb9b7725ccc4452701be47d103ab61f81b4595b | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
in_data = "run_6/run_6_e2_phot_prf_limit.dat"
in_model = "run_6/run_6_e2_phot.res"
out_file = "run_6/plot_eb234840_v8.png"
kwargs = {'color': 'red', 'marker': '.', 'ls': 'none'}
x_lim = [7500., 7528.]
y_lim = [-4000., 500.]
kwargs_1 = {'color': 'blue', 'ls': ':', 'lw': 2, 'zorder': 10}
xlabel = 'BJD - 2450000'
ylabel = 'delta flux'
band = np.arange(7500, 7508.0001)
kwargs_band = {'color': 'blue', 'lw': 2, 'zorder': 10}
################
# End of settings
(times, values, errors) = np.loadtxt(in_data, unpack=True)
(times_model, _, _, values_model) = np.loadtxt(in_model, unpack=True)
plt.errorbar(times, values, yerr=errors, **kwargs)
mask = (times_model > band[-1])
plt.plot(times_model[mask], values_model[mask], **kwargs_1)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xlim(x_lim)
plt.ylim(y_lim)
plt.plot(band, band*0., **kwargs_band)
plt.savefig(out_file)
| 23.9 | 69 | 0.676778 | import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
in_data = "run_6/run_6_e2_phot_prf_limit.dat"
in_model = "run_6/run_6_e2_phot.res"
out_file = "run_6/plot_eb234840_v8.png"
kwargs = {'color': 'red', 'marker': '.', 'ls': 'none'}
x_lim = [7500., 7528.]
y_lim = [-4000., 500.]
kwargs_1 = {'color': 'blue', 'ls': ':', 'lw': 2, 'zorder': 10}
xlabel = 'BJD - 2450000'
ylabel = 'delta flux'
band = np.arange(7500, 7508.0001)
kwargs_band = {'color': 'blue', 'lw': 2, 'zorder': 10}
npack=True)
plt.errorbar(times, values, yerr=errors, **kwargs)
mask = (times_model > band[-1])
plt.plot(times_model[mask], values_model[mask], **kwargs_1)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xlim(x_lim)
plt.ylim(y_lim)
plt.plot(band, band*0., **kwargs_band)
plt.savefig(out_file)
| true | true |
f72cfecb9a75e28d76c6235057fe3ad2011e3f3f | 4,092 | py | Python | code/txburstML.py | astrophys/Python_Debugging_Examples | 510b4b6966166dddc14eda3f6813700386d2324f | [
"MIT"
] | null | null | null | code/txburstML.py | astrophys/Python_Debugging_Examples | 510b4b6966166dddc14eda3f6813700386d2324f | [
"MIT"
] | null | null | null | code/txburstML.py | astrophys/Python_Debugging_Examples | 510b4b6966166dddc14eda3f6813700386d2324f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import argparse
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from joblib import delayed,Parallel
import os
def whichKeep(est_params):
kon = np.array(est_params)[:,0]
koff = np.array(est_params)[:,1]
ksyn = np.array(est_params)[:,2]
which_kon = ~(kon < 2*1e-3)*~(kon > 1e3 - 1)
which_koff = ~(koff < 2*1e-3)*~(koff > 1e3 - 1)
which_burst = ksyn/koff > 1
which_ksyn = ksyn > 1
which = which_burst*which_koff*which_kon*which_ksyn
return which
def MaximumLikelihood(vals, export_asymp_ci = False, fix = 0, metod = 'L-BFGS-B'):
from scipy.interpolate import interp1d
from scipy.optimize import minimize
from scipy import special
from scipy.stats import poisson,norm
from scipy.special import j_roots
from scipy.special import beta as beta_fun
import numpy as np
if len(vals) == 0:
return np.array([np.nan, np.nan, np.nan])
def dBP(at, alpha, bet, lam):
at.shape = (len(at), 1)
np.repeat(at, 50, axis = 1)
def fun(at, m):
if(max(m) < 1e6):
return(poisson.pmf(at,m))
else:
return(norm.pdf(at,loc=m,scale=sqrt(m)))
x,w = j_roots(50,alpha = bet - 1, beta = alpha - 1)
gs = np.sum(w*fun(at, m = lam*(1+x)/2), axis=1)
prob = 1/beta_fun(alpha, bet)*2**(-alpha-bet+1)*gs
return(prob)
def LogLikelihood(x, vals):
kon = x[0]
koff = x[1]
ksyn = x[2]
return(-np.sum(np.log( dBP(vals,kon,koff,ksyn) + 1e-10) ) )
x0 = MomentInference(vals)
if np.isnan(x0).any() or any(x0 < 0):
x0 = np.array([10,10,10])
bnds = ((1e-3,1e3),(1e-3,1e3), (1, 1e4))
vals_ = np.copy(vals) # Otherwise the structure is violated.
try:
ll = minimize(LogLikelihood, x0, args = (vals_), method=metod, bounds=bnds)
except:
return np.array([np.nan,np.nan,np.nan])
#se = ll.hess_inv.todense().diagonal()
estim = ll.x
return estim
# moment-based inference
def MomentInference(vals, export_moments=False):
# code from Anton Larsson's R implementation
from scipy import stats # needs imports inside function when run in ipyparallel
import numpy as np
m1 = float(np.mean(vals))
m2 = float(sum(vals*(vals - 1))/len(vals))
m3 = float(sum(vals*(vals - 1)*(vals - 2))/len(vals))
# sanity check on input (e.g. need at least on expression level)
if sum(vals) == 0: return np.nan
if m1 == 0: return np.nan
if m2 == 0: return np.nan
r1=m1
r2=m2/m1
r3=m3/m2
if (r1*r2-2*r1*r3 + r2*r3) == 0: return np.nan
if ((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3)) == 0: return np.nan
if (r1 - 2*r2 + r3) == 0: return np.nan
lambda_est = (2*r1*(r3-r2))/(r1*r2-2*r1*r3 + r2*r3)
mu_est = (2*(r3-r2)*(r1-r3)*(r2-r1))/((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3))
v_est = (2*r1*r3 - r1*r2 - r2*r3)/(r1 - 2*r2 + r3)
if export_moments:
return np.array([lambda_est, mu_est, v_est, r1, r2, r3])
return np.array([lambda_est, mu_est, v_est])
parser = argparse.ArgumentParser(description='Maximum likelihood inference of bursting kinetics from scRNA-seq data')
parser.add_argument('file', metavar='file', type=str, nargs=1,help='.csv file with allelic-resolution transcript counts' )
parser.add_argument('--njobs', default=[50], nargs=1, type=int, help='Number of jobs for the parallelization, default 50')
args = parser.parse_args()
filename = args.file[0]
njobs = args.njobs[0]
print('Reading file ' + filename)
rpkm = pd.read_csv(filename, index_col=0)
print('Inferring kinetics:')
params = Parallel(n_jobs=njobs, verbose = 3)(delayed(MaximumLikelihood)(np.around(rpkm[pd.notnull(rpkm)])) for i,rpkm in rpkm.iterrows())
keep = whichKeep(params)
print('Inferred kinetics of {} genes out of {} total'.format(np.sum(keep), len(keep)))
base = os.path.splitext(os.path.basename(filename))[0]
base = base + '_ML.pkl'
print('Saving result to ' + base)
pd.to_pickle(pd.DataFrame([ params, list(keep)], columns=rpkm.index).T, base)
| 35.582609 | 137 | 0.631232 |
import argparse
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from joblib import delayed,Parallel
import os
def whichKeep(est_params):
kon = np.array(est_params)[:,0]
koff = np.array(est_params)[:,1]
ksyn = np.array(est_params)[:,2]
which_kon = ~(kon < 2*1e-3)*~(kon > 1e3 - 1)
which_koff = ~(koff < 2*1e-3)*~(koff > 1e3 - 1)
which_burst = ksyn/koff > 1
which_ksyn = ksyn > 1
which = which_burst*which_koff*which_kon*which_ksyn
return which
def MaximumLikelihood(vals, export_asymp_ci = False, fix = 0, metod = 'L-BFGS-B'):
from scipy.interpolate import interp1d
from scipy.optimize import minimize
from scipy import special
from scipy.stats import poisson,norm
from scipy.special import j_roots
from scipy.special import beta as beta_fun
import numpy as np
if len(vals) == 0:
return np.array([np.nan, np.nan, np.nan])
def dBP(at, alpha, bet, lam):
at.shape = (len(at), 1)
np.repeat(at, 50, axis = 1)
def fun(at, m):
if(max(m) < 1e6):
return(poisson.pmf(at,m))
else:
return(norm.pdf(at,loc=m,scale=sqrt(m)))
x,w = j_roots(50,alpha = bet - 1, beta = alpha - 1)
gs = np.sum(w*fun(at, m = lam*(1+x)/2), axis=1)
prob = 1/beta_fun(alpha, bet)*2**(-alpha-bet+1)*gs
return(prob)
def LogLikelihood(x, vals):
kon = x[0]
koff = x[1]
ksyn = x[2]
return(-np.sum(np.log( dBP(vals,kon,koff,ksyn) + 1e-10) ) )
x0 = MomentInference(vals)
if np.isnan(x0).any() or any(x0 < 0):
x0 = np.array([10,10,10])
bnds = ((1e-3,1e3),(1e-3,1e3), (1, 1e4))
vals_ = np.copy(vals)
try:
ll = minimize(LogLikelihood, x0, args = (vals_), method=metod, bounds=bnds)
except:
return np.array([np.nan,np.nan,np.nan])
estim = ll.x
return estim
def MomentInference(vals, export_moments=False):
from scipy import stats # needs imports inside function when run in ipyparallel
import numpy as np
m1 = float(np.mean(vals))
m2 = float(sum(vals*(vals - 1))/len(vals))
m3 = float(sum(vals*(vals - 1)*(vals - 2))/len(vals))
# sanity check on input (e.g. need at least on expression level)
if sum(vals) == 0: return np.nan
if m1 == 0: return np.nan
if m2 == 0: return np.nan
r1=m1
r2=m2/m1
r3=m3/m2
if (r1*r2-2*r1*r3 + r2*r3) == 0: return np.nan
if ((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3)) == 0: return np.nan
if (r1 - 2*r2 + r3) == 0: return np.nan
lambda_est = (2*r1*(r3-r2))/(r1*r2-2*r1*r3 + r2*r3)
mu_est = (2*(r3-r2)*(r1-r3)*(r2-r1))/((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3))
v_est = (2*r1*r3 - r1*r2 - r2*r3)/(r1 - 2*r2 + r3)
if export_moments:
return np.array([lambda_est, mu_est, v_est, r1, r2, r3])
return np.array([lambda_est, mu_est, v_est])
parser = argparse.ArgumentParser(description='Maximum likelihood inference of bursting kinetics from scRNA-seq data')
parser.add_argument('file', metavar='file', type=str, nargs=1,help='.csv file with allelic-resolution transcript counts' )
parser.add_argument('--njobs', default=[50], nargs=1, type=int, help='Number of jobs for the parallelization, default 50')
args = parser.parse_args()
filename = args.file[0]
njobs = args.njobs[0]
print('Reading file ' + filename)
rpkm = pd.read_csv(filename, index_col=0)
print('Inferring kinetics:')
params = Parallel(n_jobs=njobs, verbose = 3)(delayed(MaximumLikelihood)(np.around(rpkm[pd.notnull(rpkm)])) for i,rpkm in rpkm.iterrows())
keep = whichKeep(params)
print('Inferred kinetics of {} genes out of {} total'.format(np.sum(keep), len(keep)))
base = os.path.splitext(os.path.basename(filename))[0]
base = base + '_ML.pkl'
print('Saving result to ' + base)
pd.to_pickle(pd.DataFrame([ params, list(keep)], columns=rpkm.index).T, base)
| true | true |
f72cffe0eeccd3aa5694823b8d218f07ea6e87a0 | 1,413 | py | Python | AI hack/Codes and Samples/image_process.py | AdarshSrivatsa98/AIhackathon | 147f6f2ada2ebf1ba6e87df3c3d1d6ee964ac7ee | [
"BSD-3-Clause"
] | 1 | 2021-03-29T04:27:27.000Z | 2021-03-29T04:27:27.000Z | codes and samples/image_process.py | SrivatsaAdarsh/Obstacle-Detection-using-CNN | 008940faffb8a9977b8dcc7a21dda4f328f0a81f | [
"MIT"
] | null | null | null | codes and samples/image_process.py | SrivatsaAdarsh/Obstacle-Detection-using-CNN | 008940faffb8a9977b8dcc7a21dda4f328f0a81f | [
"MIT"
] | null | null | null | import time
import cv2
import sys
import os,os.path
path = sys.argv[1]
data_path=sys.argv[2]
fpsLimit = 0.8
index = 0
currentFrame=0
intframe =0
startTime = time.time()
# Playing video from file:
cap = cv2.VideoCapture(str(path))
try:
if not os.path.exists('data_path'):
os.makedirs('data_path')
except OSError:
print ('Error: Creating directory of data')
while(True):
ret = cap.set(1,index)
ret1,frame = cap.read()
if ret == False or ret1 == False:
break
nowTime = time.time()
if (int(nowTime - startTime)) > fpsLimit:
temp = cv2.resize(frame,(400,400))
for intframe in range(4):
if intframe == 0:
t = temp[0:200,0:200]
if intframe == 1:
t = temp[200:400,0:200]
if intframe ==2:
t = temp[0:200,200:400]
if intframe == 3:
t = temp[200:400,200:400]
# Saves image of the current frame in jpg file
cv2.waitKey(2)
name = str(data_path) + str(currentFrame) + '.jpg'
print ('Creating...image' + str(currentFrame) )
cv2.imwrite(name, t)
currentFrame += 1
intframe=0
index+=100
startTime = time.time() # reset time
cap.release()
cv2.destroyAllWindows()
| 25.690909 | 64 | 0.521585 | import time
import cv2
import sys
import os,os.path
path = sys.argv[1]
data_path=sys.argv[2]
fpsLimit = 0.8
index = 0
currentFrame=0
intframe =0
startTime = time.time()
cap = cv2.VideoCapture(str(path))
try:
if not os.path.exists('data_path'):
os.makedirs('data_path')
except OSError:
print ('Error: Creating directory of data')
while(True):
ret = cap.set(1,index)
ret1,frame = cap.read()
if ret == False or ret1 == False:
break
nowTime = time.time()
if (int(nowTime - startTime)) > fpsLimit:
temp = cv2.resize(frame,(400,400))
for intframe in range(4):
if intframe == 0:
t = temp[0:200,0:200]
if intframe == 1:
t = temp[200:400,0:200]
if intframe ==2:
t = temp[0:200,200:400]
if intframe == 3:
t = temp[200:400,200:400]
cv2.waitKey(2)
name = str(data_path) + str(currentFrame) + '.jpg'
print ('Creating...image' + str(currentFrame) )
cv2.imwrite(name, t)
currentFrame += 1
intframe=0
index+=100
startTime = time.time()
cap.release()
cv2.destroyAllWindows()
| true | true |
f72cffe6fb7e02f2604f6918e08414a09ad9a4c2 | 2,529 | py | Python | recommendations_system/ffm/ffm.py | mmikolajczak/recommendation_system_hetrec2011_movielens | 3ae13e62605ffbf5517bc2079e086a400de48748 | [
"MIT"
] | 4 | 2019-12-04T08:42:21.000Z | 2020-06-07T07:22:08.000Z | recommendations_system/ffm/ffm.py | mmikolajczak/recommendation_system_hetrec2011_movielens | 3ae13e62605ffbf5517bc2079e086a400de48748 | [
"MIT"
] | null | null | null | recommendations_system/ffm/ffm.py | mmikolajczak/recommendation_system_hetrec2011_movielens | 3ae13e62605ffbf5517bc2079e086a400de48748 | [
"MIT"
] | null | null | null | import subprocess
import warnings
import os.path as osp
import os
import numpy as np
# Note: libffm doesn't handle relative paths very well, hence abspath used.
class FFM:
def __init__(self, train_binary_path, predict_binary_path, model_path=None):
self.train_binary_path = osp.abspath(train_binary_path)
self.predict_binary_path = osp.abspath(predict_binary_path)
self.model_path = osp.abspath(model_path) if model_path is not None else None
def fit(self, X, model_path='model', l=0.00002, k=4, t=15, r=0.2, s=1):
"""
-l <lambda>: regularization parameter (default 0.00002)
-k <factor>: number of latent factors (default 4)
-t <iteration>: number of iterations (default 15)
-r <eta>: learning rate (default 0.2)
-s <nr_threads>: number of threads (default 1)
"""
# validation support?
warnings.warn('Please note that unix newline format (LF) is required for libffm binaries to work correctly.' +
' Windows (CR LF) will cause the issues.')
if type(X) != str:
raise ValueError(f'Improper input type {type(X)}.X must be a path to ffm file.')
self.model_path = osp.abspath(model_path)
train_data_abspath = osp.abspath(X)
cmd = f'{self.train_binary_path} -l {l} -k {k} -t {t} -r {r} -s {s} {train_data_abspath} {self.model_path}'
proc = subprocess.Popen(cmd)
proc.wait()
os.remove(f'{train_data_abspath}.bin')
def predict(self, X, output_file):
warnings.warn('Please note that unix newline format (LF) is required for libffm binaries to work correctly.' +
' Windows (CR LF) will cause the issues.')
if self.model_path is None:
raise RuntimeError('Model must be fitted first!')
if type(X) != str:
raise ValueError(f'Improper input type {type(X)}.X must be a path to ffm file.')
predicted_data_abspath = osp.abspath(X)
output_file_abspath = osp.abspath(output_file)
cmd = f'{self.predict_binary_path} {predicted_data_abspath} {self.model_path} {output_file_abspath}'
proc = subprocess.Popen(cmd)
proc.wait()
@classmethod
def pred_file_to_numpy(cls, preds_file):
return np.loadtxt(preds_file)
@classmethod
def ground_truth_from_ffm_file(cls, ffm_file):
with open(ffm_file, 'r') as f:
labels = [line.split(' ')[0] for line in f]
return np.array(labels).astype(float)
| 41.459016 | 118 | 0.644128 | import subprocess
import warnings
import os.path as osp
import os
import numpy as np
class FFM:
def __init__(self, train_binary_path, predict_binary_path, model_path=None):
self.train_binary_path = osp.abspath(train_binary_path)
self.predict_binary_path = osp.abspath(predict_binary_path)
self.model_path = osp.abspath(model_path) if model_path is not None else None
def fit(self, X, model_path='model', l=0.00002, k=4, t=15, r=0.2, s=1):
# validation support?
warnings.warn('Please note that unix newline format (LF) is required for libffm binaries to work correctly.' +
' Windows (CR LF) will cause the issues.')
if type(X) != str:
raise ValueError(f'Improper input type {type(X)}.X must be a path to ffm file.')
self.model_path = osp.abspath(model_path)
train_data_abspath = osp.abspath(X)
cmd = f'{self.train_binary_path} -l {l} -k {k} -t {t} -r {r} -s {s} {train_data_abspath} {self.model_path}'
proc = subprocess.Popen(cmd)
proc.wait()
os.remove(f'{train_data_abspath}.bin')
def predict(self, X, output_file):
warnings.warn('Please note that unix newline format (LF) is required for libffm binaries to work correctly.' +
' Windows (CR LF) will cause the issues.')
if self.model_path is None:
raise RuntimeError('Model must be fitted first!')
if type(X) != str:
raise ValueError(f'Improper input type {type(X)}.X must be a path to ffm file.')
predicted_data_abspath = osp.abspath(X)
output_file_abspath = osp.abspath(output_file)
cmd = f'{self.predict_binary_path} {predicted_data_abspath} {self.model_path} {output_file_abspath}'
proc = subprocess.Popen(cmd)
proc.wait()
@classmethod
def pred_file_to_numpy(cls, preds_file):
return np.loadtxt(preds_file)
@classmethod
def ground_truth_from_ffm_file(cls, ffm_file):
with open(ffm_file, 'r') as f:
labels = [line.split(' ')[0] for line in f]
return np.array(labels).astype(float)
| true | true |
f72d0142e44b378e8893afaa8b5bbafa3e81c8da | 2,259 | py | Python | src/EmailAlert/fancymail.py | JyotiSunkara/Energy-Monitoring-And-Control | efba4ac611e7054b78492ccf5e758a81621c8d6d | [
"MIT"
] | 1 | 2020-06-27T03:25:11.000Z | 2020-06-27T03:25:11.000Z | src/EmailAlert/fancymail.py | JyotiSunkara/Energy-Monitoring-And-Control | efba4ac611e7054b78492ccf5e758a81621c8d6d | [
"MIT"
] | null | null | null | src/EmailAlert/fancymail.py | JyotiSunkara/Energy-Monitoring-And-Control | efba4ac611e7054b78492ccf5e758a81621c8d6d | [
"MIT"
] | null | null | null | from smtplib import SMTP
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from jinja2 import Environment, FileSystemLoader
import os
from_email = 'krbdashboard@outlook.com'
password = 'ArushiSinghal'
env = Environment(
loader=FileSystemLoader('./templates/'))
def get_contacts(filename):
"""
Return two lists names, emails containing names and email addresses
read from a file specified by filename.
"""
names = []
emails = []
with open(filename, mode='r', encoding='utf-8') as contacts_file:
for a_contact in contacts_file:
names.append(a_contact.split()[0])
emails.append(a_contact.split()[1])
return names, emails
def get_data():
data = []
data.append(
{
"movies": [
{
"title": 'Gone Girl',
"description": 'This is a fancy email'
},
{
"title": 'Delhi 6',
"description": 'Good movie'
},
{
"title": 'The Lion King',
"description": 'Roar'
},
{
"title": 'The Great Gatsby',
"description": ':o'
}
]
})
return data
def send_mail(bodyContent):
names, emails = get_contacts('mycontacts.txt') # Read contacts
subject = 'Testing CSS/HTML again!'
server = SMTP('smtp-mail.outlook.com', 587)
server.starttls()
server.login(from_email, password)
for name, email in zip(names, emails):
message = MIMEMultipart()
message['Subject'] = subject
message['From'] = from_email
message['To'] = email
message.attach(MIMEText(bodyContent, "html"))
msgBody = message.as_string()
server.sendmail(from_email, email, msgBody)
del message
server.quit()
def send_movie_list():
json_data = get_data()
template = env.get_template('child.html')
output = template.render(data=json_data[0])
send_mail(output)
return "Mail sent successfully."
if __name__ == '__main__':
print(send_movie_list())
| 25.670455 | 71 | 0.555556 | from smtplib import SMTP
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from jinja2 import Environment, FileSystemLoader
import os
from_email = 'krbdashboard@outlook.com'
password = 'ArushiSinghal'
env = Environment(
loader=FileSystemLoader('./templates/'))
def get_contacts(filename):
names = []
emails = []
with open(filename, mode='r', encoding='utf-8') as contacts_file:
for a_contact in contacts_file:
names.append(a_contact.split()[0])
emails.append(a_contact.split()[1])
return names, emails
def get_data():
data = []
data.append(
{
"movies": [
{
"title": 'Gone Girl',
"description": 'This is a fancy email'
},
{
"title": 'Delhi 6',
"description": 'Good movie'
},
{
"title": 'The Lion King',
"description": 'Roar'
},
{
"title": 'The Great Gatsby',
"description": ':o'
}
]
})
return data
def send_mail(bodyContent):
names, emails = get_contacts('mycontacts.txt')
subject = 'Testing CSS/HTML again!'
server = SMTP('smtp-mail.outlook.com', 587)
server.starttls()
server.login(from_email, password)
for name, email in zip(names, emails):
message = MIMEMultipart()
message['Subject'] = subject
message['From'] = from_email
message['To'] = email
message.attach(MIMEText(bodyContent, "html"))
msgBody = message.as_string()
server.sendmail(from_email, email, msgBody)
del message
server.quit()
def send_movie_list():
json_data = get_data()
template = env.get_template('child.html')
output = template.render(data=json_data[0])
send_mail(output)
return "Mail sent successfully."
if __name__ == '__main__':
print(send_movie_list())
| true | true |
f72d017e70b5e4176196a0457bdf724775bda1b5 | 5,587 | py | Python | sdk/python/pulumi_azure_nextgen/apimanagement/latest/get_api_release.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/apimanagement/latest/get_api_release.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/apimanagement/latest/get_api_release.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetApiReleaseResult',
'AwaitableGetApiReleaseResult',
'get_api_release',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:apimanagement:getApiRelease'.""", DeprecationWarning)
@pulumi.output_type
class GetApiReleaseResult:
"""
ApiRelease details.
"""
def __init__(__self__, api_id=None, created_date_time=None, id=None, name=None, notes=None, type=None, updated_date_time=None):
if api_id and not isinstance(api_id, str):
raise TypeError("Expected argument 'api_id' to be a str")
pulumi.set(__self__, "api_id", api_id)
if created_date_time and not isinstance(created_date_time, str):
raise TypeError("Expected argument 'created_date_time' to be a str")
pulumi.set(__self__, "created_date_time", created_date_time)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notes and not isinstance(notes, str):
raise TypeError("Expected argument 'notes' to be a str")
pulumi.set(__self__, "notes", notes)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated_date_time and not isinstance(updated_date_time, str):
raise TypeError("Expected argument 'updated_date_time' to be a str")
pulumi.set(__self__, "updated_date_time", updated_date_time)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> Optional[str]:
"""
Identifier of the API the release belongs to.
"""
return pulumi.get(self, "api_id")
@property
@pulumi.getter(name="createdDateTime")
def created_date_time(self) -> str:
"""
The time the API was released. The date conforms to the following format: yyyy-MM-ddTHH:mm:ssZ as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "created_date_time")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> Optional[str]:
"""
Release Notes
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedDateTime")
def updated_date_time(self) -> str:
"""
The time the API release was updated.
"""
return pulumi.get(self, "updated_date_time")
class AwaitableGetApiReleaseResult(GetApiReleaseResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiReleaseResult(
api_id=self.api_id,
created_date_time=self.created_date_time,
id=self.id,
name=self.name,
notes=self.notes,
type=self.type,
updated_date_time=self.updated_date_time)
def get_api_release(api_id: Optional[str] = None,
release_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiReleaseResult:
"""
ApiRelease details.
Latest API Version: 2019-12-01.
:param str api_id: API identifier. Must be unique in the current API Management service instance.
:param str release_id: Release identifier within an API. Must be unique in the current API Management service instance.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
pulumi.log.warn("get_api_release is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:apimanagement:getApiRelease'.")
__args__ = dict()
__args__['apiId'] = api_id
__args__['releaseId'] = release_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/latest:getApiRelease', __args__, opts=opts, typ=GetApiReleaseResult).value
return AwaitableGetApiReleaseResult(
api_id=__ret__.api_id,
created_date_time=__ret__.created_date_time,
id=__ret__.id,
name=__ret__.name,
notes=__ret__.notes,
type=__ret__.type,
updated_date_time=__ret__.updated_date_time)
| 36.279221 | 190 | 0.65026 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetApiReleaseResult',
'AwaitableGetApiReleaseResult',
'get_api_release',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:apimanagement:getApiRelease'.""", DeprecationWarning)
@pulumi.output_type
class GetApiReleaseResult:
def __init__(__self__, api_id=None, created_date_time=None, id=None, name=None, notes=None, type=None, updated_date_time=None):
if api_id and not isinstance(api_id, str):
raise TypeError("Expected argument 'api_id' to be a str")
pulumi.set(__self__, "api_id", api_id)
if created_date_time and not isinstance(created_date_time, str):
raise TypeError("Expected argument 'created_date_time' to be a str")
pulumi.set(__self__, "created_date_time", created_date_time)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notes and not isinstance(notes, str):
raise TypeError("Expected argument 'notes' to be a str")
pulumi.set(__self__, "notes", notes)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated_date_time and not isinstance(updated_date_time, str):
raise TypeError("Expected argument 'updated_date_time' to be a str")
pulumi.set(__self__, "updated_date_time", updated_date_time)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> Optional[str]:
return pulumi.get(self, "api_id")
@property
@pulumi.getter(name="createdDateTime")
def created_date_time(self) -> str:
return pulumi.get(self, "created_date_time")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> Optional[str]:
return pulumi.get(self, "notes")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedDateTime")
def updated_date_time(self) -> str:
return pulumi.get(self, "updated_date_time")
class AwaitableGetApiReleaseResult(GetApiReleaseResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiReleaseResult(
api_id=self.api_id,
created_date_time=self.created_date_time,
id=self.id,
name=self.name,
notes=self.notes,
type=self.type,
updated_date_time=self.updated_date_time)
def get_api_release(api_id: Optional[str] = None,
release_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiReleaseResult:
pulumi.log.warn("get_api_release is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:apimanagement:getApiRelease'.")
__args__ = dict()
__args__['apiId'] = api_id
__args__['releaseId'] = release_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/latest:getApiRelease', __args__, opts=opts, typ=GetApiReleaseResult).value
return AwaitableGetApiReleaseResult(
api_id=__ret__.api_id,
created_date_time=__ret__.created_date_time,
id=__ret__.id,
name=__ret__.name,
notes=__ret__.notes,
type=__ret__.type,
updated_date_time=__ret__.updated_date_time)
| true | true |
f72d018e06b47ce1c3e6edeff90355eca52ef202 | 15,137 | py | Python | apps/resume/views.py | ozet-team/ozet-server | 4772d37339634adee6ace65a5e2380df4bd22bbb | [
"MIT"
] | null | null | null | apps/resume/views.py | ozet-team/ozet-server | 4772d37339634adee6ace65a5e2380df4bd22bbb | [
"MIT"
] | 4 | 2021-11-27T14:15:55.000Z | 2021-12-10T12:59:44.000Z | apps/resume/views.py | ozet-team/ozet-server | 4772d37339634adee6ace65a5e2380df4bd22bbb | [
"MIT"
] | null | null | null | from django.utils.functional import cached_property
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import extend_schema, OpenApiExample, OpenApiParameter
from rest_framework.exceptions import NotFound
from rest_framework.generics import (
ListCreateAPIView,
RetrieveUpdateAPIView,
RetrieveUpdateDestroyAPIView,
RetrieveAPIView,
)
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly, AllowAny
from apps.member.models import User
from apps.resume import models
from apps.resume import serializers
from apps.resume.models import Career, Certificate, AcademicBackground, MilitaryService, Resume
from utils.django.rest_framework.mixins import UserContextMixin, QuerySerializerMixin
from commons.contrib.drf_spectacular import tags as api_tags
class ResumeDetailView(UserContextMixin, RetrieveAPIView):
permission_classes = (AllowAny, )
serializer_class = serializers.ResumeSerializer
queryset = Resume.objects
lookup_field = 'id'
lookup_url_kwarg = 'resume_id'
@extend_schema(
tags=[api_tags.RESUME],
summary="회원 이력서 가져오기 API @AllowAny",
description="회원 이력서 가져오기 API 입니다.",
responses=serializers.ResumeSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeDetailView, self).get(request, *args, **kwargs)
class UserResumeDetailView(UserContextMixin, RetrieveAPIView):
permission_classes = (AllowAny, )
serializer_class = serializers.UserResumeSerializer
def get_object(self):
user_id = self.kwargs.get('user_id', None)
if not user_id:
raise NotFound()
return get_object_or_404(Resume, user_id=user_id)
@extend_schema(
tags=[api_tags.RESUME],
summary="회원 이력서 가져오기 API @AllowAny",
description="회원 이력서 가져오기 API 입니다.",
responses=serializers.UserResumeSerializer,
)
def get(self, request, *args, **kwargs):
return super(UserResumeDetailView, self).get(request, *args, **kwargs)
class UserMeResumeDetailView(UserContextMixin, RetrieveAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.ResumeSerializer
def get_object(self):
resume, is_created = Resume.objects.get_or_create(user_id=self.user.id)
return resume
@extend_schema(
tags=[api_tags.USER_ME],
summary="회원 이력서 가져오기 API @IsAuthenticated",
description="회원 이력서 가져오기 API 입니다.",
responses=serializers.ResumeSerializer,
)
def get(self, request, *args, **kwargs):
return super(UserMeResumeDetailView, self).get(request, *args, **kwargs)
class UserMeResumeDetailPDFView(UserContextMixin, RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.ResumePDFSerializer
def get_object(self):
resume, is_created = Resume.objects.get_or_create(user_id=self.user.id)
return resume
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put" and method != "get"]
super(UserMeResumeDetailPDFView, self).__init__(*args, **kwargs)
@extend_schema(
tags=[api_tags.USER_ME],
summary="회원 이력서 PDF 업데이트 API @IsAuthenticated",
description="회원 이력서 PDF 업데이트 API 입니다.",
responses=serializers.ResumePDFSerializer,
)
def patch(self, request, *args, **kwargs):
return super(UserMeResumeDetailPDFView, self).patch(request, *args, **kwargs)
class ResumeCareerDetailView(UserContextMixin, RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CareerSerializer
lookup_field = 'id'
lookup_url_kwarg = 'id'
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCareerDetailView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Career.objects.none()
return Career.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 가져오기 API @IsAuthenticated",
description="회원 커리어 가져오기 API 입니다.",
responses=serializers.CareerSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCareerDetailView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 업데이트 API @IsAuthenticated",
description="회원 커리어 업데이트 API 입니다.",
responses=serializers.CareerSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeCareerDetailView, self).patch(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 삭제 API @IsAuthenticated",
description="회원 커리어 삭제 API 입니다.",
)
def delete(self, request, *args, **kwargs):
return super(ResumeCareerDetailView, self).delete(request, *args, **kwargs)
class ResumeCareerListView(UserContextMixin, ListCreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CareerSerializer
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCareerListView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Career.objects.none()
return Career.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 가져오기 API @IsAuthenticated",
description="회원 커리어 가져오기 API 입니다.\n"
"* **Position**\n"
" * **STAFF** - 스탭(인턴)\n"
" * **MANAGER** - 매니저\n"
" * **DESIGNER** - 디자이너\n"
" * **DIRECTOR** - 원장",
responses=serializers.CareerSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCareerListView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 추가 API @IsAuthenticated",
description="회원 커리어 추가 API 입니다.\n"
"* **Position**\n"
" * **STAFF** - 스탭(인턴)\n"
" * **MANAGER** - 매니저\n"
" * **DESIGNER** - 디자이너\n"
" * **DIRECTOR** - 원장",
responses=serializers.CareerSerializer,
)
def post(self, request, *args, **kwargs):
return super(ResumeCareerListView, self).post(request, *args, **kwargs)
class ResumeCertificateDetailView(UserContextMixin, RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CertificateSerializer
lookup_field = 'id'
lookup_url_kwarg = 'id'
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCertificateDetailView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Certificate.objects.none()
return Certificate.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-certificate_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 가져오기 API @IsAuthenticated",
description="회원 자격증 가져오기 API 입니다.",
responses=serializers.CertificateSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCertificateDetailView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 업데이트 API @IsAuthenticated",
description="회원 자격증 업데이트 API 입니다.",
responses=serializers.CertificateSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeCertificateDetailView, self).patch(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 삭제 API @IsAuthenticated",
description="회원 자격증 삭제 API 입니다.",
)
def delete(self, request, *args, **kwargs):
return super(ResumeCertificateDetailView, self).delete(request, *args, **kwargs)
class ResumeCertificateListView(UserContextMixin, ListCreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CertificateSerializer
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCertificateListView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Certificate.objects.none()
return Certificate.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-certificate_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 목록 가져오기 API @IsAuthenticated",
description="회원 자격증 목록 가져오기 API 입니다.",
responses=serializers.CertificateSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCertificateListView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 추가 API @IsAuthenticated",
description="회원 자격증 추가 API 입니다.",
responses=serializers.CertificateSerializer,
)
def post(self, request, *args, **kwargs):
return super(ResumeCertificateListView, self).post(request, *args, **kwargs)
class ResumeAcademicBackgroundDetailView(UserContextMixin, RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.AcademicBackgroundSerializer
lookup_field = 'id'
lookup_url_kwarg = 'id'
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeAcademicBackgroundDetailView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return AcademicBackground.objects.none()
return AcademicBackground.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 가져오기 API @IsAuthenticated",
description="회원 학력 가져오기 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundDetailView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 업데이트 API @IsAuthenticated",
description="회원 학력 업데이트 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundDetailView, self).patch(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 삭제 API @IsAuthenticated",
description="회원 학력 삭제 API 입니다.",
)
def delete(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundDetailView, self).delete(request, *args, **kwargs)
class ResumeAcademicBackgroundListView(UserContextMixin, ListCreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.AcademicBackgroundSerializer
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return AcademicBackground.objects.none()
return AcademicBackground.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeAcademicBackgroundListView, self).__init__(*args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 목록 가져오기 API @IsAuthenticated",
description="회원 학력 목록 가져오기 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundListView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 추가 API @IsAuthenticated",
description="회원 학력 추가 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def post(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundListView, self).post(request, *args, **kwargs)
class ResumeMilitaryServiceView(UserContextMixin, RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.MilitaryServiceSerializer
def get_object(self):
military, is_created = MilitaryService.objects.get_or_create(resume_id=self.user.resume.id)
return military
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeMilitaryServiceView, self).__init__(*args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_MILITARY],
summary="회원 병역 가져오기 API @IsAuthenticated",
description="회원 병역 가져오기 API 입니다.\n"
"* **Service Status**\n"
" * **NA** - 해당없음\n"
" * **EXEMPTION** - 면제\n"
" * **UNFINISHED** - 미필\n"
" * **FINISHED** - 군필",
responses=serializers.MilitaryServiceSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeMilitaryServiceView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_MILITARY],
summary="회원 병역 업데이트 API @IsAuthenticated",
description="회원 병역 업데이트 API 입니다.\n"
"* **Service Status**\n"
" * **NA** - 해당없음\n"
" * **EXEMPTION** - 면제\n"
" * **UNFINISHED** - 미필\n"
" * **FINISHED** - 군필",
responses=serializers.MilitaryServiceSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeMilitaryServiceView, self).patch(request, *args, **kwargs)
| 37.375309 | 117 | 0.658321 | from django.utils.functional import cached_property
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import extend_schema, OpenApiExample, OpenApiParameter
from rest_framework.exceptions import NotFound
from rest_framework.generics import (
ListCreateAPIView,
RetrieveUpdateAPIView,
RetrieveUpdateDestroyAPIView,
RetrieveAPIView,
)
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly, AllowAny
from apps.member.models import User
from apps.resume import models
from apps.resume import serializers
from apps.resume.models import Career, Certificate, AcademicBackground, MilitaryService, Resume
from utils.django.rest_framework.mixins import UserContextMixin, QuerySerializerMixin
from commons.contrib.drf_spectacular import tags as api_tags
class ResumeDetailView(UserContextMixin, RetrieveAPIView):
permission_classes = (AllowAny, )
serializer_class = serializers.ResumeSerializer
queryset = Resume.objects
lookup_field = 'id'
lookup_url_kwarg = 'resume_id'
@extend_schema(
tags=[api_tags.RESUME],
summary="회원 이력서 가져오기 API @AllowAny",
description="회원 이력서 가져오기 API 입니다.",
responses=serializers.ResumeSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeDetailView, self).get(request, *args, **kwargs)
class UserResumeDetailView(UserContextMixin, RetrieveAPIView):
permission_classes = (AllowAny, )
serializer_class = serializers.UserResumeSerializer
def get_object(self):
user_id = self.kwargs.get('user_id', None)
if not user_id:
raise NotFound()
return get_object_or_404(Resume, user_id=user_id)
@extend_schema(
tags=[api_tags.RESUME],
summary="회원 이력서 가져오기 API @AllowAny",
description="회원 이력서 가져오기 API 입니다.",
responses=serializers.UserResumeSerializer,
)
def get(self, request, *args, **kwargs):
return super(UserResumeDetailView, self).get(request, *args, **kwargs)
class UserMeResumeDetailView(UserContextMixin, RetrieveAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.ResumeSerializer
def get_object(self):
resume, is_created = Resume.objects.get_or_create(user_id=self.user.id)
return resume
@extend_schema(
tags=[api_tags.USER_ME],
summary="회원 이력서 가져오기 API @IsAuthenticated",
description="회원 이력서 가져오기 API 입니다.",
responses=serializers.ResumeSerializer,
)
def get(self, request, *args, **kwargs):
return super(UserMeResumeDetailView, self).get(request, *args, **kwargs)
class UserMeResumeDetailPDFView(UserContextMixin, RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.ResumePDFSerializer
def get_object(self):
resume, is_created = Resume.objects.get_or_create(user_id=self.user.id)
return resume
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put" and method != "get"]
super(UserMeResumeDetailPDFView, self).__init__(*args, **kwargs)
@extend_schema(
tags=[api_tags.USER_ME],
summary="회원 이력서 PDF 업데이트 API @IsAuthenticated",
description="회원 이력서 PDF 업데이트 API 입니다.",
responses=serializers.ResumePDFSerializer,
)
def patch(self, request, *args, **kwargs):
return super(UserMeResumeDetailPDFView, self).patch(request, *args, **kwargs)
class ResumeCareerDetailView(UserContextMixin, RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CareerSerializer
lookup_field = 'id'
lookup_url_kwarg = 'id'
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCareerDetailView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Career.objects.none()
return Career.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 가져오기 API @IsAuthenticated",
description="회원 커리어 가져오기 API 입니다.",
responses=serializers.CareerSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCareerDetailView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 업데이트 API @IsAuthenticated",
description="회원 커리어 업데이트 API 입니다.",
responses=serializers.CareerSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeCareerDetailView, self).patch(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 삭제 API @IsAuthenticated",
description="회원 커리어 삭제 API 입니다.",
)
def delete(self, request, *args, **kwargs):
return super(ResumeCareerDetailView, self).delete(request, *args, **kwargs)
class ResumeCareerListView(UserContextMixin, ListCreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CareerSerializer
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCareerListView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Career.objects.none()
return Career.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 가져오기 API @IsAuthenticated",
description="회원 커리어 가져오기 API 입니다.\n"
"* **Position**\n"
" * **STAFF** - 스탭(인턴)\n"
" * **MANAGER** - 매니저\n"
" * **DESIGNER** - 디자이너\n"
" * **DIRECTOR** - 원장",
responses=serializers.CareerSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCareerListView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CAREER],
summary="회원 커리어 추가 API @IsAuthenticated",
description="회원 커리어 추가 API 입니다.\n"
"* **Position**\n"
" * **STAFF** - 스탭(인턴)\n"
" * **MANAGER** - 매니저\n"
" * **DESIGNER** - 디자이너\n"
" * **DIRECTOR** - 원장",
responses=serializers.CareerSerializer,
)
def post(self, request, *args, **kwargs):
return super(ResumeCareerListView, self).post(request, *args, **kwargs)
class ResumeCertificateDetailView(UserContextMixin, RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CertificateSerializer
lookup_field = 'id'
lookup_url_kwarg = 'id'
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCertificateDetailView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Certificate.objects.none()
return Certificate.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-certificate_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 가져오기 API @IsAuthenticated",
description="회원 자격증 가져오기 API 입니다.",
responses=serializers.CertificateSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCertificateDetailView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 업데이트 API @IsAuthenticated",
description="회원 자격증 업데이트 API 입니다.",
responses=serializers.CertificateSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeCertificateDetailView, self).patch(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 삭제 API @IsAuthenticated",
description="회원 자격증 삭제 API 입니다.",
)
def delete(self, request, *args, **kwargs):
return super(ResumeCertificateDetailView, self).delete(request, *args, **kwargs)
class ResumeCertificateListView(UserContextMixin, ListCreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.CertificateSerializer
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeCertificateListView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return Certificate.objects.none()
return Certificate.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-certificate_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 목록 가져오기 API @IsAuthenticated",
description="회원 자격증 목록 가져오기 API 입니다.",
responses=serializers.CertificateSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeCertificateListView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_CERTIFICATE],
summary="회원 자격증 추가 API @IsAuthenticated",
description="회원 자격증 추가 API 입니다.",
responses=serializers.CertificateSerializer,
)
def post(self, request, *args, **kwargs):
return super(ResumeCertificateListView, self).post(request, *args, **kwargs)
class ResumeAcademicBackgroundDetailView(UserContextMixin, RetrieveUpdateDestroyAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.AcademicBackgroundSerializer
lookup_field = 'id'
lookup_url_kwarg = 'id'
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeAcademicBackgroundDetailView, self).__init__(*args, **kwargs)
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return AcademicBackground.objects.none()
return AcademicBackground.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 가져오기 API @IsAuthenticated",
description="회원 학력 가져오기 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundDetailView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 업데이트 API @IsAuthenticated",
description="회원 학력 업데이트 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundDetailView, self).patch(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 삭제 API @IsAuthenticated",
description="회원 학력 삭제 API 입니다.",
)
def delete(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundDetailView, self).delete(request, *args, **kwargs)
class ResumeAcademicBackgroundListView(UserContextMixin, ListCreateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.AcademicBackgroundSerializer
def get_queryset(self):
if getattr(self, 'swagger_fake_view', False):
return AcademicBackground.objects.none()
return AcademicBackground.objects \
.filter(resume_id=self.user.resume.id) \
.order_by('-join_at') \
.all()
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeAcademicBackgroundListView, self).__init__(*args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 목록 가져오기 API @IsAuthenticated",
description="회원 학력 목록 가져오기 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundListView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_ACADEMIC],
summary="회원 학력 추가 API @IsAuthenticated",
description="회원 학력 추가 API 입니다.",
responses=serializers.AcademicBackgroundSerializer,
)
def post(self, request, *args, **kwargs):
return super(ResumeAcademicBackgroundListView, self).post(request, *args, **kwargs)
class ResumeMilitaryServiceView(UserContextMixin, RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated, )
serializer_class = serializers.MilitaryServiceSerializer
def get_object(self):
military, is_created = MilitaryService.objects.get_or_create(resume_id=self.user.resume.id)
return military
def __init__(self, *args, **kwargs):
self.http_method_names = [method for method in self.http_method_names if method != "put"]
super(ResumeMilitaryServiceView, self).__init__(*args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_MILITARY],
summary="회원 병역 가져오기 API @IsAuthenticated",
description="회원 병역 가져오기 API 입니다.\n"
"* **Service Status**\n"
" * **NA** - 해당없음\n"
" * **EXEMPTION** - 면제\n"
" * **UNFINISHED** - 미필\n"
" * **FINISHED** - 군필",
responses=serializers.MilitaryServiceSerializer,
)
def get(self, request, *args, **kwargs):
return super(ResumeMilitaryServiceView, self).get(request, *args, **kwargs)
@extend_schema(
tags=[api_tags.RESUME_MILITARY],
summary="회원 병역 업데이트 API @IsAuthenticated",
description="회원 병역 업데이트 API 입니다.\n"
"* **Service Status**\n"
" * **NA** - 해당없음\n"
" * **EXEMPTION** - 면제\n"
" * **UNFINISHED** - 미필\n"
" * **FINISHED** - 군필",
responses=serializers.MilitaryServiceSerializer,
)
def patch(self, request, *args, **kwargs):
return super(ResumeMilitaryServiceView, self).patch(request, *args, **kwargs)
| true | true |
f72d027eb1356111f2daf107008ee00d025ad541 | 2,954 | py | Python | find_deathdomains.py | caspase-like-homolog-identifier/c14_witcher | e2c481607b85fed749daec0e9b3b29b65d6b448f | [
"MIT"
] | null | null | null | find_deathdomains.py | caspase-like-homolog-identifier/c14_witcher | e2c481607b85fed749daec0e9b3b29b65d6b448f | [
"MIT"
] | null | null | null | find_deathdomains.py | caspase-like-homolog-identifier/c14_witcher | e2c481607b85fed749daec0e9b3b29b65d6b448f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from run_hmmer import RunHmmer
from Bio import SearchIO
import pandas as pd
import collections
import random
import tempfile
import argparse
import pprint
import glob
import sys
class FindDeathDomains(RunHmmer):
def __init__(self, seqfile, dd_hmm_path, *hmmersearch_args):
""" Subclass the Hmmer commandline wrapper """
self.dd_hmm_paths = glob.glob(dd_hmm_path)
super().__init__("hmmsearch", None, seqfile, None, None, *hmmersearch_args)
self.deathdomain_hits = {}
self.dd_dict = None
def deathdomains_iter(self):
""" iterate over the deathdomains """
self.dd_names = []
for hmm_file in self.dd_hmm_paths:
self.hmmfile = hmm_file
tmp1, tmp2 = [ tempfile.NamedTemporaryFile(delete=False) for _ in range(2) ]
self.align_out = tmp1.name
self.domtblout = tmp2.name
std, stderr = self()
deathdomain = self.has_deathdomain(self.domtblout)
if deathdomain:
self.deathdomain_hits[deathdomain[0].id] = deathdomain[0].hits
self.dd_names.append(deathdomain[0].id)
def has_deathdomain(self, domtab):
return list(SearchIO.parse(domtab, "hmmsearch3-domtab"))
def DeathDomains(self, feature):
"""Property to view the death domains.Start analysis if not done already"""
# _id
# _id_alt
# _query_id
# _description
# _description_alt
# _query_description
# attributes
# dbxrefs
# _items
# accession
# seq_len
# evalue
# bitscore
# bias
if not self.deathdomain_hits:
self.deathdomains_iter()
#create dict using seq.ids as keys and empty lists as values
dd_dict = collections.defaultdict(list)
for dd in self.deathdomain_hits:
#print(dd)
for hit in self.deathdomain_hits[dd]:
dd_dict[hit.id].append(vars(hit)[feature])
self.deathdomains = pd.DataFrame(columns = ['Seq_ID']+self.dd_names)
for seq_id, values in dd_dict.items():
self.deathdomains = self.deathdomains.append(pd.Series([seq_id]+values, index= ['Seq_ID']+self.dd_names, name = seq_id))
return self.deathdomains
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('seqfile', action='store', type=str)
parser.add_argument('-g','--hmm_glob', default="/opt/DB_REF/Pfam/Ig*hmm")
args = parser.parse_args()
dd = FindDeathDomains(args.seqfile, args.hmm_glob)
dd.deathdomains_iter()
print("\n\n\n\n")
print(dd.DeathDomains('evalue'))
| 31.094737 | 133 | 0.58497 |
from run_hmmer import RunHmmer
from Bio import SearchIO
import pandas as pd
import collections
import random
import tempfile
import argparse
import pprint
import glob
import sys
class FindDeathDomains(RunHmmer):
def __init__(self, seqfile, dd_hmm_path, *hmmersearch_args):
self.dd_hmm_paths = glob.glob(dd_hmm_path)
super().__init__("hmmsearch", None, seqfile, None, None, *hmmersearch_args)
self.deathdomain_hits = {}
self.dd_dict = None
def deathdomains_iter(self):
self.dd_names = []
for hmm_file in self.dd_hmm_paths:
self.hmmfile = hmm_file
tmp1, tmp2 = [ tempfile.NamedTemporaryFile(delete=False) for _ in range(2) ]
self.align_out = tmp1.name
self.domtblout = tmp2.name
std, stderr = self()
deathdomain = self.has_deathdomain(self.domtblout)
if deathdomain:
self.deathdomain_hits[deathdomain[0].id] = deathdomain[0].hits
self.dd_names.append(deathdomain[0].id)
def has_deathdomain(self, domtab):
return list(SearchIO.parse(domtab, "hmmsearch3-domtab"))
def DeathDomains(self, feature):
if not self.deathdomain_hits:
self.deathdomains_iter()
dd_dict = collections.defaultdict(list)
for dd in self.deathdomain_hits:
for hit in self.deathdomain_hits[dd]:
dd_dict[hit.id].append(vars(hit)[feature])
self.deathdomains = pd.DataFrame(columns = ['Seq_ID']+self.dd_names)
for seq_id, values in dd_dict.items():
self.deathdomains = self.deathdomains.append(pd.Series([seq_id]+values, index= ['Seq_ID']+self.dd_names, name = seq_id))
return self.deathdomains
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('seqfile', action='store', type=str)
parser.add_argument('-g','--hmm_glob', default="/opt/DB_REF/Pfam/Ig*hmm")
args = parser.parse_args()
dd = FindDeathDomains(args.seqfile, args.hmm_glob)
dd.deathdomains_iter()
print("\n\n\n\n")
print(dd.DeathDomains('evalue'))
| true | true |
f72d032ec7455ded65fafe668268d74e9cbda5cc | 2,306 | py | Python | models.py | phpwizz/SimpelApi | c2a5f28fff752fb84e99568a3e0dab5c37e03c94 | [
"MIT"
] | 1 | 2018-07-14T08:43:25.000Z | 2018-07-14T08:43:25.000Z | models.py | phpwizz/SimpelApi | c2a5f28fff752fb84e99568a3e0dab5c37e03c94 | [
"MIT"
] | null | null | null | models.py | phpwizz/SimpelApi | c2a5f28fff752fb84e99568a3e0dab5c37e03c94 | [
"MIT"
] | null | null | null | import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from passlib.apps import custom_app_context as pwd_context
import random, string
from itsdangerous import(TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
Base = declarative_base()
secret_key = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key = True)
username = Column(String)
picture = Column (String)
description = Column(String)
name = Column(String)
password_hash = Column(String(64))
def hash_password(self,password):
self.password_hash = pwd_context.hash(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
def generate_auth_token(self, expiration = 600):
s = Serializer(secret_key, expires_in = expiration)
return s.dumps({'id': self.id})
#Verify auth tokens
@staticmethod
def verify_auth_token(token):
s = Serializer(secret_key)
try:
data = s.loads(token)
except SignatureExpired:
#Valid but expired
return None
except BadSignature:
#Invalid token
return None
user_id = data['id']
return user_id
@property
def serialize(self):
return {
'id': self.id,
'user_about': self.description,
'username': self.username,
'picture': self.picture,
'name' : self.name
}
class Post(Base):
__tablename__ = 'posts'
id = Column(Integer, primary_key = True)
content = Column(String(250))
likes = Column(Integer)
user_id = Column(Integer,ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
return {
'id': self.id,
'zcontent': self.content,
'zlikes': self.likes,
'zauthor': self.user_id
}
engine = create_engine('sqlite:///simpelapi.db')
Base.metadata.create_all(engine) | 26.813953 | 103 | 0.641804 | import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from passlib.apps import custom_app_context as pwd_context
import random, string
from itsdangerous import(TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
Base = declarative_base()
secret_key = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key = True)
username = Column(String)
picture = Column (String)
description = Column(String)
name = Column(String)
password_hash = Column(String(64))
def hash_password(self,password):
self.password_hash = pwd_context.hash(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
def generate_auth_token(self, expiration = 600):
s = Serializer(secret_key, expires_in = expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(secret_key)
try:
data = s.loads(token)
except SignatureExpired:
return None
except BadSignature:
return None
user_id = data['id']
return user_id
@property
def serialize(self):
return {
'id': self.id,
'user_about': self.description,
'username': self.username,
'picture': self.picture,
'name' : self.name
}
class Post(Base):
__tablename__ = 'posts'
id = Column(Integer, primary_key = True)
content = Column(String(250))
likes = Column(Integer)
user_id = Column(Integer,ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
return {
'id': self.id,
'zcontent': self.content,
'zlikes': self.likes,
'zauthor': self.user_id
}
engine = create_engine('sqlite:///simpelapi.db')
Base.metadata.create_all(engine) | true | true |
f72d04129209f13907fb9ece50d3696c445c1bc3 | 11,522 | py | Python | gammapy/catalog/tests/test_hess.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | gammapy/catalog/tests/test_hess.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | gammapy/catalog/tests/test_hess.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import Counter
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates import SkyCoord, Angle
from astropy.table import Table
from ...utils.testing import assert_quantity_allclose
from ...utils.testing import requires_data, requires_dependency
from ...spectrum.models import PowerLaw, ExponentialCutoffPowerLaw
from ..hess import SourceCatalogHGPS, SourceCatalogLargeScaleHGPS
@pytest.fixture(scope="session")
def cat():
return SourceCatalogHGPS("$GAMMAPY_DATA/catalogs/hgps_catalog_v1.fits.gz")
@requires_data("gammapy-data")
class TestSourceCatalogHGPS:
@staticmethod
def test_source_table(cat):
assert cat.name == "hgps"
assert len(cat.table) == 78
@staticmethod
def test_table_components(cat):
assert len(cat.table_components) == 98
@staticmethod
def test_table_associations(cat):
assert len(cat.table_associations) == 223
@staticmethod
def test_table_identifications(cat):
assert len(cat.table_identifications) == 31
@staticmethod
def test_gaussian_component(cat):
# Row index starts at 0, component numbers at 1
# Thus we expect `HGPSC 084` at row 83
c = cat.gaussian_component(83)
assert c.name == "HGPSC 084"
@staticmethod
def test_large_scale_component(cat):
assert isinstance(cat.large_scale_component, SourceCatalogLargeScaleHGPS)
@requires_data("gammapy-data")
class TestSourceCatalogObjectHGPS:
@pytest.fixture(scope="class")
def source(self, cat):
return cat["HESS J1843-033"]
@staticmethod
@pytest.mark.slow
def test_all_sources(cat):
"""Check that properties and methods work for all sources,
i.e. don't raise an error."""
for source in cat:
str(source)
source.energy_range
source.spectral_model_type
source.spectral_model()
source.spatial_model_type
source.is_pointlike
source.sky_model()
source.flux_points
@staticmethod
def test_basics(source):
assert source.name == "HESS J1843-033"
assert source.index == 64
data = source.data
assert data["Source_Class"] == "Unid"
assert "SourceCatalogObjectHGPS" in repr(source)
ss = str(source)
assert "Source name : HESS J1843-033" in ss
assert "Component HGPSC 083:" in ss
@staticmethod
def test_str(cat):
source = cat["HESS J1930+188"]
assert source.data["Spatial_Model"] == "Gaussian"
assert "Spatial components : HGPSC 097" in str(source)
source = cat["HESS J1825-137"]
assert source.data["Spatial_Model"] == "3-Gaussian"
assert "Spatial components : HGPSC 065, HGPSC 066, HGPSC 067" in str(source)
source = cat["HESS J1713-397"]
assert source.data["Spatial_Model"] == "Shell"
assert "Source name : HESS J1713-397" in str(source)
@staticmethod
def test_components(source):
components = source.components
assert len(components) == 2
c = components[1]
assert c.name == "HGPSC 084"
@staticmethod
def test_energy_range(source):
energy_range = source.energy_range
assert energy_range.unit == "TeV"
assert_allclose(energy_range.value, [0.21544346, 61.89658356])
@staticmethod
def test_spectral_model_type(cat):
spec_types = Counter([_.spectral_model_type for _ in cat])
assert spec_types == {"pl": 66, "ecpl": 12}
@staticmethod
@requires_dependency("uncertainties")
def test_spectral_model_pl(cat):
source = cat["HESS J1843-033"]
model = source.spectral_model()
assert isinstance(model, PowerLaw)
pars = model.parameters
assert_allclose(pars["amplitude"].value, 9.140179932365378e-13)
assert_allclose(pars["index"].value, 2.1513476371765137)
assert_allclose(pars["reference"].value, 1.867810606956482)
val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value
assert_allclose(val, source.data["Flux_Spec_Int_1TeV"].value, rtol=0.01)
assert_allclose(err, source.data["Flux_Spec_Int_1TeV_Err"].value, rtol=0.01)
@staticmethod
@requires_dependency("uncertainties")
def test_spectral_model_ecpl(cat):
source = cat["HESS J0835-455"]
model = source.spectral_model()
assert isinstance(model, ExponentialCutoffPowerLaw)
pars = model.parameters
assert_allclose(pars["amplitude"].value, 6.408420542586617e-12)
assert_allclose(pars["index"].value, 1.3543991614920847)
assert_allclose(pars["reference"].value, 1.696938754239)
assert_allclose(pars["lambda_"].value, 0.081517637)
val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value
assert_allclose(val, source.data["Flux_Spec_Int_1TeV"].value, rtol=0.01)
assert_allclose(err, source.data["Flux_Spec_Int_1TeV_Err"].value, rtol=0.01)
model = source.spectral_model("pl")
assert isinstance(model, PowerLaw)
pars = model.parameters
assert_allclose(pars["amplitude"].value, 1.833056926733856e-12)
assert_allclose(pars["index"].value, 1.8913707)
assert_allclose(pars["reference"].value, 3.0176312923431396)
val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value
assert_allclose(val, source.data["Flux_Spec_PL_Int_1TeV"].value, rtol=0.01)
assert_allclose(err, source.data["Flux_Spec_PL_Int_1TeV_Err"].value, rtol=0.01)
@staticmethod
def test_spatial_model_type(cat):
morph_types = Counter([_.spatial_model_type for _ in cat])
assert morph_types == {
"gaussian": 52,
"2-gaussian": 8,
"shell": 7,
"point-like": 6,
"3-gaussian": 5,
}
@staticmethod
def test_sky_model_point(cat):
model = cat["HESS J1826-148"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 9.815771242691063e-13)
assert_allclose(p["lon_0"].value, 16.882482528686523)
assert_allclose(p["lat_0"].value, -1.2889292240142822)
@staticmethod
def test_sky_model_gaussian(cat):
model = cat["HESS J1119-614"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 7.959899015960725e-13)
assert_allclose(p["lon_0"].value, 292.1280822753906)
assert_allclose(p["lat_0"].value, -0.5332353711128235)
assert_allclose(p["sigma"].value, 0.09785966575145721)
@staticmethod
def test_sky_model_gaussian2(cat):
model = cat["HESS J1843-033"].sky_model()
p = model.skymodels[0].parameters
assert_allclose(p["amplitude"].value, 4.259815e-13, rtol=1e-5)
assert_allclose(p["lon_0"].value, 29.047216415405273)
assert_allclose(p["lat_0"].value, 0.24389676749706268)
assert_allclose(p["sigma"].value, 0.12499100714921951)
p = model.skymodels[1].parameters
assert_allclose(p["amplitude"].value, 4.880365e-13, rtol=1e-5)
assert_allclose(p["lon_0"].value, 28.77037811279297)
assert_allclose(p["lat_0"].value, -0.0727819949388504)
assert_allclose(p["sigma"].value, 0.2294706553220749)
@staticmethod
def test_sky_model_gaussian3(cat):
model = cat["HESS J1825-137"].sky_model()
p = model.skymodels[0].parameters
assert_allclose(p["amplitude"].value, 1.8952104218765842e-11)
assert_allclose(p["lon_0"].value, 16.988601684570312)
assert_allclose(p["lat_0"].value, -0.4913068115711212)
assert_allclose(p["sigma"].value, 0.47650089859962463)
p = model.skymodels[1].parameters
assert_allclose(p["amplitude"].value, 4.4639763971527836e-11)
assert_allclose(p["lon_0"].value, 17.71169090270996)
assert_allclose(p["lat_0"].value, -0.6598004102706909)
assert_allclose(p["sigma"].value, 0.3910967707633972)
p = model.skymodels[2].parameters
assert_allclose(p["amplitude"].value, 5.870712920658374e-12)
assert_allclose(p["lon_0"].value, 17.840524673461914)
assert_allclose(p["lat_0"].value, -0.7057178020477295)
assert_allclose(p["sigma"].value, 0.10932201147079468)
@staticmethod
def test_sky_model_gaussian_extern(cat):
# special test for the only extern source with a gaussian morphology
model = cat["HESS J1801-233"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 7.499999970031479e-13)
assert_allclose(p["lon_0"].value, 6.656888961791992)
assert_allclose(p["lat_0"].value, -0.267688125371933)
assert_allclose(p["sigma"].value, 0.17)
@staticmethod
def test_sky_model_shell(cat):
model = cat["Vela Junior"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 3.2163001428830995e-11)
assert_allclose(p["lon_0"].value, 266.2873840332031)
assert_allclose(p["lat_0"].value, -1.243260383605957)
assert_allclose(p["radius"].value, 0.95)
assert_allclose(p["width"].value, 0.05)
@requires_data("gammapy-data")
class TestSourceCatalogObjectHGPSComponent:
@pytest.fixture(scope="class")
def component(self, cat):
return cat.gaussian_component(83)
@staticmethod
def test_repr(component):
assert "SourceCatalogObjectHGPSComponent" in repr(component)
@staticmethod
def test_str(component):
assert "Component HGPSC 084" in str(component)
@staticmethod
def test_name(component):
assert component.name == "HGPSC 084"
@staticmethod
def test_index(component):
assert component.index == 83
@staticmethod
def test_spatial_model(component):
model = component.spatial_model
p = model.parameters
assert_allclose(p["lon_0"].value, 28.77037811279297)
assert_allclose(p.error("lon_0"), 0.058748625218868256)
assert_allclose(p["lat_0"].value, -0.0727819949388504)
assert_allclose(p.error("lat_0"), 0.06880396604537964)
assert_allclose(p["sigma"].value, 0.2294706553220749)
assert_allclose(p.error("sigma"), 0.04618723690509796)
class TestSourceCatalogLargeScaleHGPS:
def setup(self):
table = Table()
table["GLON"] = [-30, -10, 10, 20] * u.deg
table["Surface_Brightness"] = [0, 1, 10, 0] * u.Unit("cm-2 s-1 sr-1")
table["GLAT"] = [-1, 0, 1, 0] * u.deg
table["Width"] = [0.4, 0.5, 0.3, 1.0] * u.deg
self.table = table
self.model = SourceCatalogLargeScaleHGPS(table)
def test_evaluate(self):
x = np.linspace(-100, 20, 5)
y = np.linspace(-2, 2, 7)
x, y = np.meshgrid(x, y)
coords = SkyCoord(x, y, unit="deg", frame="galactic")
image = self.model.evaluate(coords)
desired = 1.223962643740966 * u.Unit("cm-2 s-1 sr-1")
assert_quantity_allclose(image.sum(), desired)
def test_parvals(self):
glon = Angle(10, unit="deg")
assert_quantity_allclose(
self.model.peak_brightness(glon), 10 * u.Unit("cm-2 s-1 sr-1")
)
assert_quantity_allclose(self.model.peak_latitude(glon), 1 * u.deg)
assert_quantity_allclose(self.model.width(glon), 0.3 * u.deg)
| 37.167742 | 87 | 0.661083 |
from collections import Counter
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates import SkyCoord, Angle
from astropy.table import Table
from ...utils.testing import assert_quantity_allclose
from ...utils.testing import requires_data, requires_dependency
from ...spectrum.models import PowerLaw, ExponentialCutoffPowerLaw
from ..hess import SourceCatalogHGPS, SourceCatalogLargeScaleHGPS
@pytest.fixture(scope="session")
def cat():
return SourceCatalogHGPS("$GAMMAPY_DATA/catalogs/hgps_catalog_v1.fits.gz")
@requires_data("gammapy-data")
class TestSourceCatalogHGPS:
@staticmethod
def test_source_table(cat):
assert cat.name == "hgps"
assert len(cat.table) == 78
@staticmethod
def test_table_components(cat):
assert len(cat.table_components) == 98
@staticmethod
def test_table_associations(cat):
assert len(cat.table_associations) == 223
@staticmethod
def test_table_identifications(cat):
assert len(cat.table_identifications) == 31
@staticmethod
def test_gaussian_component(cat):
c = cat.gaussian_component(83)
assert c.name == "HGPSC 084"
@staticmethod
def test_large_scale_component(cat):
assert isinstance(cat.large_scale_component, SourceCatalogLargeScaleHGPS)
@requires_data("gammapy-data")
class TestSourceCatalogObjectHGPS:
@pytest.fixture(scope="class")
def source(self, cat):
return cat["HESS J1843-033"]
@staticmethod
@pytest.mark.slow
def test_all_sources(cat):
for source in cat:
str(source)
source.energy_range
source.spectral_model_type
source.spectral_model()
source.spatial_model_type
source.is_pointlike
source.sky_model()
source.flux_points
@staticmethod
def test_basics(source):
assert source.name == "HESS J1843-033"
assert source.index == 64
data = source.data
assert data["Source_Class"] == "Unid"
assert "SourceCatalogObjectHGPS" in repr(source)
ss = str(source)
assert "Source name : HESS J1843-033" in ss
assert "Component HGPSC 083:" in ss
@staticmethod
def test_str(cat):
source = cat["HESS J1930+188"]
assert source.data["Spatial_Model"] == "Gaussian"
assert "Spatial components : HGPSC 097" in str(source)
source = cat["HESS J1825-137"]
assert source.data["Spatial_Model"] == "3-Gaussian"
assert "Spatial components : HGPSC 065, HGPSC 066, HGPSC 067" in str(source)
source = cat["HESS J1713-397"]
assert source.data["Spatial_Model"] == "Shell"
assert "Source name : HESS J1713-397" in str(source)
@staticmethod
def test_components(source):
components = source.components
assert len(components) == 2
c = components[1]
assert c.name == "HGPSC 084"
@staticmethod
def test_energy_range(source):
energy_range = source.energy_range
assert energy_range.unit == "TeV"
assert_allclose(energy_range.value, [0.21544346, 61.89658356])
@staticmethod
def test_spectral_model_type(cat):
spec_types = Counter([_.spectral_model_type for _ in cat])
assert spec_types == {"pl": 66, "ecpl": 12}
@staticmethod
@requires_dependency("uncertainties")
def test_spectral_model_pl(cat):
source = cat["HESS J1843-033"]
model = source.spectral_model()
assert isinstance(model, PowerLaw)
pars = model.parameters
assert_allclose(pars["amplitude"].value, 9.140179932365378e-13)
assert_allclose(pars["index"].value, 2.1513476371765137)
assert_allclose(pars["reference"].value, 1.867810606956482)
val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value
assert_allclose(val, source.data["Flux_Spec_Int_1TeV"].value, rtol=0.01)
assert_allclose(err, source.data["Flux_Spec_Int_1TeV_Err"].value, rtol=0.01)
@staticmethod
@requires_dependency("uncertainties")
def test_spectral_model_ecpl(cat):
source = cat["HESS J0835-455"]
model = source.spectral_model()
assert isinstance(model, ExponentialCutoffPowerLaw)
pars = model.parameters
assert_allclose(pars["amplitude"].value, 6.408420542586617e-12)
assert_allclose(pars["index"].value, 1.3543991614920847)
assert_allclose(pars["reference"].value, 1.696938754239)
assert_allclose(pars["lambda_"].value, 0.081517637)
val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value
assert_allclose(val, source.data["Flux_Spec_Int_1TeV"].value, rtol=0.01)
assert_allclose(err, source.data["Flux_Spec_Int_1TeV_Err"].value, rtol=0.01)
model = source.spectral_model("pl")
assert isinstance(model, PowerLaw)
pars = model.parameters
assert_allclose(pars["amplitude"].value, 1.833056926733856e-12)
assert_allclose(pars["index"].value, 1.8913707)
assert_allclose(pars["reference"].value, 3.0176312923431396)
val, err = model.integral_error(1 * u.TeV, 1e5 * u.TeV).value
assert_allclose(val, source.data["Flux_Spec_PL_Int_1TeV"].value, rtol=0.01)
assert_allclose(err, source.data["Flux_Spec_PL_Int_1TeV_Err"].value, rtol=0.01)
@staticmethod
def test_spatial_model_type(cat):
morph_types = Counter([_.spatial_model_type for _ in cat])
assert morph_types == {
"gaussian": 52,
"2-gaussian": 8,
"shell": 7,
"point-like": 6,
"3-gaussian": 5,
}
@staticmethod
def test_sky_model_point(cat):
model = cat["HESS J1826-148"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 9.815771242691063e-13)
assert_allclose(p["lon_0"].value, 16.882482528686523)
assert_allclose(p["lat_0"].value, -1.2889292240142822)
@staticmethod
def test_sky_model_gaussian(cat):
model = cat["HESS J1119-614"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 7.959899015960725e-13)
assert_allclose(p["lon_0"].value, 292.1280822753906)
assert_allclose(p["lat_0"].value, -0.5332353711128235)
assert_allclose(p["sigma"].value, 0.09785966575145721)
@staticmethod
def test_sky_model_gaussian2(cat):
model = cat["HESS J1843-033"].sky_model()
p = model.skymodels[0].parameters
assert_allclose(p["amplitude"].value, 4.259815e-13, rtol=1e-5)
assert_allclose(p["lon_0"].value, 29.047216415405273)
assert_allclose(p["lat_0"].value, 0.24389676749706268)
assert_allclose(p["sigma"].value, 0.12499100714921951)
p = model.skymodels[1].parameters
assert_allclose(p["amplitude"].value, 4.880365e-13, rtol=1e-5)
assert_allclose(p["lon_0"].value, 28.77037811279297)
assert_allclose(p["lat_0"].value, -0.0727819949388504)
assert_allclose(p["sigma"].value, 0.2294706553220749)
@staticmethod
def test_sky_model_gaussian3(cat):
model = cat["HESS J1825-137"].sky_model()
p = model.skymodels[0].parameters
assert_allclose(p["amplitude"].value, 1.8952104218765842e-11)
assert_allclose(p["lon_0"].value, 16.988601684570312)
assert_allclose(p["lat_0"].value, -0.4913068115711212)
assert_allclose(p["sigma"].value, 0.47650089859962463)
p = model.skymodels[1].parameters
assert_allclose(p["amplitude"].value, 4.4639763971527836e-11)
assert_allclose(p["lon_0"].value, 17.71169090270996)
assert_allclose(p["lat_0"].value, -0.6598004102706909)
assert_allclose(p["sigma"].value, 0.3910967707633972)
p = model.skymodels[2].parameters
assert_allclose(p["amplitude"].value, 5.870712920658374e-12)
assert_allclose(p["lon_0"].value, 17.840524673461914)
assert_allclose(p["lat_0"].value, -0.7057178020477295)
assert_allclose(p["sigma"].value, 0.10932201147079468)
@staticmethod
def test_sky_model_gaussian_extern(cat):
model = cat["HESS J1801-233"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 7.499999970031479e-13)
assert_allclose(p["lon_0"].value, 6.656888961791992)
assert_allclose(p["lat_0"].value, -0.267688125371933)
assert_allclose(p["sigma"].value, 0.17)
@staticmethod
def test_sky_model_shell(cat):
model = cat["Vela Junior"].sky_model()
p = model.parameters
assert_allclose(p["amplitude"].value, 3.2163001428830995e-11)
assert_allclose(p["lon_0"].value, 266.2873840332031)
assert_allclose(p["lat_0"].value, -1.243260383605957)
assert_allclose(p["radius"].value, 0.95)
assert_allclose(p["width"].value, 0.05)
@requires_data("gammapy-data")
class TestSourceCatalogObjectHGPSComponent:
@pytest.fixture(scope="class")
def component(self, cat):
return cat.gaussian_component(83)
@staticmethod
def test_repr(component):
assert "SourceCatalogObjectHGPSComponent" in repr(component)
@staticmethod
def test_str(component):
assert "Component HGPSC 084" in str(component)
@staticmethod
def test_name(component):
assert component.name == "HGPSC 084"
@staticmethod
def test_index(component):
assert component.index == 83
@staticmethod
def test_spatial_model(component):
model = component.spatial_model
p = model.parameters
assert_allclose(p["lon_0"].value, 28.77037811279297)
assert_allclose(p.error("lon_0"), 0.058748625218868256)
assert_allclose(p["lat_0"].value, -0.0727819949388504)
assert_allclose(p.error("lat_0"), 0.06880396604537964)
assert_allclose(p["sigma"].value, 0.2294706553220749)
assert_allclose(p.error("sigma"), 0.04618723690509796)
class TestSourceCatalogLargeScaleHGPS:
def setup(self):
table = Table()
table["GLON"] = [-30, -10, 10, 20] * u.deg
table["Surface_Brightness"] = [0, 1, 10, 0] * u.Unit("cm-2 s-1 sr-1")
table["GLAT"] = [-1, 0, 1, 0] * u.deg
table["Width"] = [0.4, 0.5, 0.3, 1.0] * u.deg
self.table = table
self.model = SourceCatalogLargeScaleHGPS(table)
def test_evaluate(self):
x = np.linspace(-100, 20, 5)
y = np.linspace(-2, 2, 7)
x, y = np.meshgrid(x, y)
coords = SkyCoord(x, y, unit="deg", frame="galactic")
image = self.model.evaluate(coords)
desired = 1.223962643740966 * u.Unit("cm-2 s-1 sr-1")
assert_quantity_allclose(image.sum(), desired)
def test_parvals(self):
glon = Angle(10, unit="deg")
assert_quantity_allclose(
self.model.peak_brightness(glon), 10 * u.Unit("cm-2 s-1 sr-1")
)
assert_quantity_allclose(self.model.peak_latitude(glon), 1 * u.deg)
assert_quantity_allclose(self.model.width(glon), 0.3 * u.deg)
| true | true |
f72d04c38d826a7bc9753a2f6f269707d60b54c5 | 2,335 | py | Python | nmmo/entity/player.py | jsuarez5341/neural-mmo | 0828982e8a30641986fdd947ab82f34c008fafde | [
"MIT"
] | 230 | 2019-07-03T06:52:29.000Z | 2021-12-10T18:47:37.000Z | nmmo/entity/player.py | jsuarez5341/neural-mmo | 0828982e8a30641986fdd947ab82f34c008fafde | [
"MIT"
] | 16 | 2019-10-11T16:51:27.000Z | 2021-12-06T14:32:31.000Z | nmmo/entity/player.py | jsuarez5341/neural-mmo | 0828982e8a30641986fdd947ab82f34c008fafde | [
"MIT"
] | 40 | 2019-08-02T19:36:38.000Z | 2021-12-02T09:59:08.000Z | import numpy as np
from pdb import set_trace as T
import nmmo
from nmmo.systems import ai, equipment
from nmmo.lib import material
from nmmo.systems.skill import Skills
from nmmo.systems.achievement import Diary
from nmmo.entity import entity
class Player(entity.Entity):
def __init__(self, realm, pos, agent, color, pop):
super().__init__(realm, pos, agent.iden, agent.name, color, pop)
self.agent = agent
self.pop = pop
#Scripted hooks
self.target = None
self.food = None
self.water = None
self.vision = 7
#Submodules
self.skills = Skills(self)
self.achievements = Diary(realm.config)
self.dataframe.init(nmmo.Serialized.Entity, self.entID, self.pos)
@property
def serial(self):
return self.population, self.entID
@property
def isPlayer(self) -> bool:
return True
@property
def population(self):
return self.pop
def applyDamage(self, dmg, style):
self.resources.food.increment(dmg)
self.resources.water.increment(dmg)
self.skills.applyDamage(dmg, style)
def receiveDamage(self, source, dmg):
if not super().receiveDamage(source, dmg):
if source:
source.history.playerKills += 1
return
self.resources.food.decrement(dmg)
self.resources.water.decrement(dmg)
self.skills.receiveDamage(dmg)
def receiveLoot(self, loadout):
if loadout.chestplate.level > self.loadout.chestplate.level:
self.loadout.chestplate = loadout.chestplate
if loadout.platelegs.level > self.loadout.platelegs.level:
self.loadout.platelegs = loadout.platelegs
def packet(self):
data = super().packet()
data['entID'] = self.entID
data['annID'] = self.population
data['base'] = self.base.packet()
data['resource'] = self.resources.packet()
data['skills'] = self.skills.packet()
return data
def update(self, realm, actions):
'''Post-action update. Do not include history'''
super().update(realm, actions)
if not self.alive:
return
self.resources.update(realm, self, actions)
self.skills.update(realm, self, actions)
self.achievements.update(realm, self)
#self.inventory.update(world, actions)
| 26.83908 | 71 | 0.651392 | import numpy as np
from pdb import set_trace as T
import nmmo
from nmmo.systems import ai, equipment
from nmmo.lib import material
from nmmo.systems.skill import Skills
from nmmo.systems.achievement import Diary
from nmmo.entity import entity
class Player(entity.Entity):
def __init__(self, realm, pos, agent, color, pop):
super().__init__(realm, pos, agent.iden, agent.name, color, pop)
self.agent = agent
self.pop = pop
self.target = None
self.food = None
self.water = None
self.vision = 7
self.skills = Skills(self)
self.achievements = Diary(realm.config)
self.dataframe.init(nmmo.Serialized.Entity, self.entID, self.pos)
@property
def serial(self):
return self.population, self.entID
@property
def isPlayer(self) -> bool:
return True
@property
def population(self):
return self.pop
def applyDamage(self, dmg, style):
self.resources.food.increment(dmg)
self.resources.water.increment(dmg)
self.skills.applyDamage(dmg, style)
def receiveDamage(self, source, dmg):
if not super().receiveDamage(source, dmg):
if source:
source.history.playerKills += 1
return
self.resources.food.decrement(dmg)
self.resources.water.decrement(dmg)
self.skills.receiveDamage(dmg)
def receiveLoot(self, loadout):
if loadout.chestplate.level > self.loadout.chestplate.level:
self.loadout.chestplate = loadout.chestplate
if loadout.platelegs.level > self.loadout.platelegs.level:
self.loadout.platelegs = loadout.platelegs
def packet(self):
data = super().packet()
data['entID'] = self.entID
data['annID'] = self.population
data['base'] = self.base.packet()
data['resource'] = self.resources.packet()
data['skills'] = self.skills.packet()
return data
def update(self, realm, actions):
super().update(realm, actions)
if not self.alive:
return
self.resources.update(realm, self, actions)
self.skills.update(realm, self, actions)
self.achievements.update(realm, self)
| true | true |
f72d0563aa56e9c5a65d9965f487213c64d35e84 | 3,423 | py | Python | newchain_keys/main.py | newtonproject/newchain-keys.py | d8a053d78787dfc4403b57e60d54d0472d59787c | [
"MIT"
] | null | null | null | newchain_keys/main.py | newtonproject/newchain-keys.py | d8a053d78787dfc4403b57e60d54d0472d59787c | [
"MIT"
] | null | null | null | newchain_keys/main.py | newtonproject/newchain-keys.py | d8a053d78787dfc4403b57e60d54d0472d59787c | [
"MIT"
] | null | null | null | from typing import (Any, Union, Type) # noqa: F401
from newchain_keys.datatypes import (
LazyBackend,
PublicKey,
PrivateKey,
Signature,
)
from newchain_keys.exceptions import (
ValidationError,
)
from newchain_keys.validation import (
validate_message_hash,
)
# These must be aliased due to a scoping issue in mypy
# https://github.com/python/mypy/issues/1775
_PublicKey = PublicKey
_PrivateKey = PrivateKey
_Signature = Signature
class KeyAPI(LazyBackend):
#
# datatype shortcuts
#
PublicKey = PublicKey # type: Type[_PublicKey]
PrivateKey = PrivateKey # type: Type[_PrivateKey]
Signature = Signature # type: Type[_Signature]
#
# Proxy method calls to the backends
#
def ecdsa_sign(self,
message_hash: bytes,
private_key: _PrivateKey) -> _Signature:
validate_message_hash(message_hash)
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `newchain_keys.datatypes.PrivateKey`"
)
signature = self.backend.ecdsa_sign(message_hash, private_key)
if not isinstance(signature, Signature):
raise ValidationError(
"Backend returned an invalid signature. Return value must be "
"an instance of `newchain_keys.datatypes.Signature`"
)
return signature
def ecdsa_verify(self,
message_hash: bytes,
signature: _Signature,
public_key: _PublicKey) -> bool:
if not isinstance(public_key, PublicKey):
raise ValidationError(
"The `public_key` must be an instance of `newchain_keys.datatypes.PublicKey`"
)
return self.ecdsa_recover(message_hash, signature) == public_key
def ecdsa_recover(self,
message_hash: bytes,
signature: _Signature) -> _PublicKey:
validate_message_hash(message_hash)
if not isinstance(signature, Signature):
raise ValidationError(
"The `signature` must be an instance of `newchain_keys.datatypes.Signature`"
)
public_key = self.backend.ecdsa_recover(message_hash, signature)
if not isinstance(public_key, _PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `newchain_keys.datatypes.PublicKey`"
)
return public_key
def private_key_to_public_key(self, private_key: _PrivateKey) -> _PublicKey:
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `newchain_keys.datatypes.PrivateKey`"
)
public_key = self.backend.private_key_to_public_key(private_key)
if not isinstance(public_key, PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `newchain_keys.datatypes.PublicKey`"
)
return public_key
# This creates an easy to import backend which will lazily fetch whatever
# backend has been configured at runtime (as opposed to import or instantiation time).
lazy_key_api = KeyAPI(backend=None)
| 36.414894 | 95 | 0.643003 | from typing import (Any, Union, Type)
from newchain_keys.datatypes import (
LazyBackend,
PublicKey,
PrivateKey,
Signature,
)
from newchain_keys.exceptions import (
ValidationError,
)
from newchain_keys.validation import (
validate_message_hash,
)
_PublicKey = PublicKey
_PrivateKey = PrivateKey
_Signature = Signature
class KeyAPI(LazyBackend):
PublicKey = PublicKey
PrivateKey = PrivateKey
Signature = Signature
def ecdsa_sign(self,
message_hash: bytes,
private_key: _PrivateKey) -> _Signature:
validate_message_hash(message_hash)
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `newchain_keys.datatypes.PrivateKey`"
)
signature = self.backend.ecdsa_sign(message_hash, private_key)
if not isinstance(signature, Signature):
raise ValidationError(
"Backend returned an invalid signature. Return value must be "
"an instance of `newchain_keys.datatypes.Signature`"
)
return signature
def ecdsa_verify(self,
message_hash: bytes,
signature: _Signature,
public_key: _PublicKey) -> bool:
if not isinstance(public_key, PublicKey):
raise ValidationError(
"The `public_key` must be an instance of `newchain_keys.datatypes.PublicKey`"
)
return self.ecdsa_recover(message_hash, signature) == public_key
def ecdsa_recover(self,
message_hash: bytes,
signature: _Signature) -> _PublicKey:
validate_message_hash(message_hash)
if not isinstance(signature, Signature):
raise ValidationError(
"The `signature` must be an instance of `newchain_keys.datatypes.Signature`"
)
public_key = self.backend.ecdsa_recover(message_hash, signature)
if not isinstance(public_key, _PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `newchain_keys.datatypes.PublicKey`"
)
return public_key
def private_key_to_public_key(self, private_key: _PrivateKey) -> _PublicKey:
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `newchain_keys.datatypes.PrivateKey`"
)
public_key = self.backend.private_key_to_public_key(private_key)
if not isinstance(public_key, PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `newchain_keys.datatypes.PublicKey`"
)
return public_key
lazy_key_api = KeyAPI(backend=None)
| true | true |
f72d059384ed8eef053b45a327b0c27c29814ac1 | 11,111 | py | Python | opentelemetry-sdk/tests/metrics/test_metrics.py | 3tilley/opentelemetry-python | 4ed4fd08db67de69369f87862e43562c2e43fed5 | [
"Apache-2.0"
] | null | null | null | opentelemetry-sdk/tests/metrics/test_metrics.py | 3tilley/opentelemetry-python | 4ed4fd08db67de69369f87862e43562c2e43fed5 | [
"Apache-2.0"
] | null | null | null | opentelemetry-sdk/tests/metrics/test_metrics.py | 3tilley/opentelemetry-python | 4ed4fd08db67de69369f87862e43562c2e43fed5 | [
"Apache-2.0"
] | null | null | null | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import WARNING
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from opentelemetry._metrics import NoOpMeter
from opentelemetry.sdk._metrics import Meter, MeterProvider
from opentelemetry.sdk._metrics.instrument import (
Counter,
Histogram,
ObservableCounter,
ObservableGauge,
ObservableUpDownCounter,
UpDownCounter,
)
from opentelemetry.sdk._metrics.metric_reader import MetricReader
from opentelemetry.sdk._metrics.point import AggregationTemporality
from opentelemetry.sdk.resources import Resource
from opentelemetry.test.concurrency_test import ConcurrencyTestBase, MockFunc
class DummyMetricReader(MetricReader):
def __init__(self):
super().__init__(AggregationTemporality.CUMULATIVE)
def _receive_metrics(self, metrics):
pass
def shutdown(self):
return True
class TestMeterProvider(ConcurrencyTestBase):
def test_resource(self):
"""
`MeterProvider` provides a way to allow a `Resource` to be specified.
"""
meter_provider_0 = MeterProvider()
meter_provider_1 = MeterProvider()
self.assertIs(
meter_provider_0._sdk_config.resource,
meter_provider_1._sdk_config.resource,
)
self.assertIsInstance(meter_provider_0._sdk_config.resource, Resource)
self.assertIsInstance(meter_provider_1._sdk_config.resource, Resource)
resource = Resource({"key": "value"})
self.assertIs(
MeterProvider(resource=resource)._sdk_config.resource, resource
)
def test_get_meter(self):
"""
`MeterProvider.get_meter` arguments are used to create an
`InstrumentationInfo` object on the created `Meter`.
"""
meter = MeterProvider().get_meter(
"name",
version="version",
schema_url="schema_url",
)
self.assertEqual(meter._instrumentation_info.name, "name")
self.assertEqual(meter._instrumentation_info.version, "version")
self.assertEqual(meter._instrumentation_info.schema_url, "schema_url")
def test_get_meter_empty(self):
"""
`MeterProvider.get_meter` called with None or empty string as name
should return a NoOpMeter.
"""
meter = MeterProvider().get_meter(
None,
version="version",
schema_url="schema_url",
)
self.assertIsInstance(meter, NoOpMeter)
self.assertEqual(meter._name, None)
meter = MeterProvider().get_meter(
"",
version="version",
schema_url="schema_url",
)
self.assertIsInstance(meter, NoOpMeter)
self.assertEqual(meter._name, "")
def test_get_meter_duplicate(self):
"""
Subsequent calls to `MeterProvider.get_meter` with the same arguments
should return the same `Meter` instance.
"""
mp = MeterProvider()
meter1 = mp.get_meter(
"name",
version="version",
schema_url="schema_url",
)
meter2 = mp.get_meter(
"name",
version="version",
schema_url="schema_url",
)
meter3 = mp.get_meter(
"name2",
version="version",
schema_url="schema_url",
)
self.assertIs(meter1, meter2)
self.assertIsNot(meter1, meter3)
def test_shutdown(self):
mock_metric_reader_0 = MagicMock(
**{
"shutdown.return_value": False,
"__str__.return_value": "mock_metric_reader_0",
}
)
mock_metric_reader_1 = Mock(**{"shutdown.return_value": True})
meter_provider = MeterProvider(
metric_readers=[mock_metric_reader_0, mock_metric_reader_1]
)
with self.assertLogs(level=WARNING) as log:
self.assertFalse(meter_provider.shutdown())
self.assertEqual(
log.records[0].getMessage(),
"MetricReader mock_metric_reader_0 failed to shutdown",
)
mock_metric_reader_0.shutdown.assert_called_once()
mock_metric_reader_1.shutdown.assert_called_once()
mock_metric_reader_0 = Mock(**{"shutdown.return_value": True})
mock_metric_reader_1 = Mock(**{"shutdown.return_value": True})
meter_provider = MeterProvider(
metric_readers=[mock_metric_reader_0, mock_metric_reader_1]
)
self.assertTrue(meter_provider.shutdown())
mock_metric_reader_0.shutdown.assert_called_once()
mock_metric_reader_1.shutdown.assert_called_once()
def test_shutdown_subsequent_calls(self):
"""
No subsequent attempts to get a `Meter` are allowed after calling
`MeterProvider.shutdown`
"""
meter_provider = MeterProvider()
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
meter_provider.shutdown()
with self.assertLogs(level=WARNING):
meter_provider.shutdown()
@patch("opentelemetry.sdk._metrics._logger")
def test_shutdown_race(self, mock_logger):
mock_logger.warning = MockFunc()
meter_provider = MeterProvider()
num_threads = 70
self.run_with_many_threads(
meter_provider.shutdown, num_threads=num_threads
)
self.assertEqual(mock_logger.warning.call_count, num_threads - 1)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_measurement_collect_callback(
self, mock_sync_measurement_consumer
):
metric_readers = [DummyMetricReader()] * 5
sync_consumer_instance = mock_sync_measurement_consumer()
sync_consumer_instance.collect = MockFunc()
MeterProvider(metric_readers=metric_readers)
for reader in metric_readers:
reader.collect()
self.assertEqual(
sync_consumer_instance.collect.call_count, len(metric_readers)
)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_creates_sync_measurement_consumer(
self, mock_sync_measurement_consumer
):
MeterProvider()
mock_sync_measurement_consumer.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_register_asynchronous_instrument(
self, mock_sync_measurement_consumer
):
meter_provider = MeterProvider()
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_counter(
"name", Mock()
)
)
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_up_down_counter(
"name", Mock()
)
)
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_gauge(
"name", Mock()
)
)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_counter(self, mock_sync_measurement_consumer):
sync_consumer_instance = mock_sync_measurement_consumer()
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_counter("name")
counter.add(1)
sync_consumer_instance.consume_measurement.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_up_down_counter(
self, mock_sync_measurement_consumer
):
sync_consumer_instance = mock_sync_measurement_consumer()
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_up_down_counter(
"name"
)
counter.add(1)
sync_consumer_instance.consume_measurement.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_histogram(
self, mock_sync_measurement_consumer
):
sync_consumer_instance = mock_sync_measurement_consumer()
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_histogram("name")
counter.record(1)
sync_consumer_instance.consume_measurement.assert_called()
class TestMeter(TestCase):
def setUp(self):
self.meter = Meter(Mock(), Mock())
def test_create_counter(self):
counter = self.meter.create_counter(
"name", unit="unit", description="description"
)
self.assertIsInstance(counter, Counter)
self.assertEqual(counter.name, "name")
def test_create_up_down_counter(self):
up_down_counter = self.meter.create_up_down_counter(
"name", unit="unit", description="description"
)
self.assertIsInstance(up_down_counter, UpDownCounter)
self.assertEqual(up_down_counter.name, "name")
def test_create_observable_counter(self):
observable_counter = self.meter.create_observable_counter(
"name", Mock(), unit="unit", description="description"
)
self.assertIsInstance(observable_counter, ObservableCounter)
self.assertEqual(observable_counter.name, "name")
def test_create_histogram(self):
histogram = self.meter.create_histogram(
"name", unit="unit", description="description"
)
self.assertIsInstance(histogram, Histogram)
self.assertEqual(histogram.name, "name")
def test_create_observable_gauge(self):
observable_gauge = self.meter.create_observable_gauge(
"name", Mock(), unit="unit", description="description"
)
self.assertIsInstance(observable_gauge, ObservableGauge)
self.assertEqual(observable_gauge.name, "name")
def test_create_observable_up_down_counter(self):
observable_up_down_counter = (
self.meter.create_observable_up_down_counter(
"name", Mock(), unit="unit", description="description"
)
)
self.assertIsInstance(
observable_up_down_counter, ObservableUpDownCounter
)
self.assertEqual(observable_up_down_counter.name, "name")
| 34.187692 | 97 | 0.671587 |
from logging import WARNING
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from opentelemetry._metrics import NoOpMeter
from opentelemetry.sdk._metrics import Meter, MeterProvider
from opentelemetry.sdk._metrics.instrument import (
Counter,
Histogram,
ObservableCounter,
ObservableGauge,
ObservableUpDownCounter,
UpDownCounter,
)
from opentelemetry.sdk._metrics.metric_reader import MetricReader
from opentelemetry.sdk._metrics.point import AggregationTemporality
from opentelemetry.sdk.resources import Resource
from opentelemetry.test.concurrency_test import ConcurrencyTestBase, MockFunc
class DummyMetricReader(MetricReader):
def __init__(self):
super().__init__(AggregationTemporality.CUMULATIVE)
def _receive_metrics(self, metrics):
pass
def shutdown(self):
return True
class TestMeterProvider(ConcurrencyTestBase):
def test_resource(self):
meter_provider_0 = MeterProvider()
meter_provider_1 = MeterProvider()
self.assertIs(
meter_provider_0._sdk_config.resource,
meter_provider_1._sdk_config.resource,
)
self.assertIsInstance(meter_provider_0._sdk_config.resource, Resource)
self.assertIsInstance(meter_provider_1._sdk_config.resource, Resource)
resource = Resource({"key": "value"})
self.assertIs(
MeterProvider(resource=resource)._sdk_config.resource, resource
)
def test_get_meter(self):
meter = MeterProvider().get_meter(
"name",
version="version",
schema_url="schema_url",
)
self.assertEqual(meter._instrumentation_info.name, "name")
self.assertEqual(meter._instrumentation_info.version, "version")
self.assertEqual(meter._instrumentation_info.schema_url, "schema_url")
def test_get_meter_empty(self):
meter = MeterProvider().get_meter(
None,
version="version",
schema_url="schema_url",
)
self.assertIsInstance(meter, NoOpMeter)
self.assertEqual(meter._name, None)
meter = MeterProvider().get_meter(
"",
version="version",
schema_url="schema_url",
)
self.assertIsInstance(meter, NoOpMeter)
self.assertEqual(meter._name, "")
def test_get_meter_duplicate(self):
mp = MeterProvider()
meter1 = mp.get_meter(
"name",
version="version",
schema_url="schema_url",
)
meter2 = mp.get_meter(
"name",
version="version",
schema_url="schema_url",
)
meter3 = mp.get_meter(
"name2",
version="version",
schema_url="schema_url",
)
self.assertIs(meter1, meter2)
self.assertIsNot(meter1, meter3)
def test_shutdown(self):
mock_metric_reader_0 = MagicMock(
**{
"shutdown.return_value": False,
"__str__.return_value": "mock_metric_reader_0",
}
)
mock_metric_reader_1 = Mock(**{"shutdown.return_value": True})
meter_provider = MeterProvider(
metric_readers=[mock_metric_reader_0, mock_metric_reader_1]
)
with self.assertLogs(level=WARNING) as log:
self.assertFalse(meter_provider.shutdown())
self.assertEqual(
log.records[0].getMessage(),
"MetricReader mock_metric_reader_0 failed to shutdown",
)
mock_metric_reader_0.shutdown.assert_called_once()
mock_metric_reader_1.shutdown.assert_called_once()
mock_metric_reader_0 = Mock(**{"shutdown.return_value": True})
mock_metric_reader_1 = Mock(**{"shutdown.return_value": True})
meter_provider = MeterProvider(
metric_readers=[mock_metric_reader_0, mock_metric_reader_1]
)
self.assertTrue(meter_provider.shutdown())
mock_metric_reader_0.shutdown.assert_called_once()
mock_metric_reader_1.shutdown.assert_called_once()
def test_shutdown_subsequent_calls(self):
meter_provider = MeterProvider()
with self.assertRaises(AssertionError):
with self.assertLogs(level=WARNING):
meter_provider.shutdown()
with self.assertLogs(level=WARNING):
meter_provider.shutdown()
@patch("opentelemetry.sdk._metrics._logger")
def test_shutdown_race(self, mock_logger):
mock_logger.warning = MockFunc()
meter_provider = MeterProvider()
num_threads = 70
self.run_with_many_threads(
meter_provider.shutdown, num_threads=num_threads
)
self.assertEqual(mock_logger.warning.call_count, num_threads - 1)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_measurement_collect_callback(
self, mock_sync_measurement_consumer
):
metric_readers = [DummyMetricReader()] * 5
sync_consumer_instance = mock_sync_measurement_consumer()
sync_consumer_instance.collect = MockFunc()
MeterProvider(metric_readers=metric_readers)
for reader in metric_readers:
reader.collect()
self.assertEqual(
sync_consumer_instance.collect.call_count, len(metric_readers)
)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_creates_sync_measurement_consumer(
self, mock_sync_measurement_consumer
):
MeterProvider()
mock_sync_measurement_consumer.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_register_asynchronous_instrument(
self, mock_sync_measurement_consumer
):
meter_provider = MeterProvider()
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_counter(
"name", Mock()
)
)
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_up_down_counter(
"name", Mock()
)
)
meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with(
meter_provider.get_meter("name").create_observable_gauge(
"name", Mock()
)
)
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_counter(self, mock_sync_measurement_consumer):
sync_consumer_instance = mock_sync_measurement_consumer()
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_counter("name")
counter.add(1)
sync_consumer_instance.consume_measurement.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_up_down_counter(
self, mock_sync_measurement_consumer
):
sync_consumer_instance = mock_sync_measurement_consumer()
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_up_down_counter(
"name"
)
counter.add(1)
sync_consumer_instance.consume_measurement.assert_called()
@patch("opentelemetry.sdk._metrics.SynchronousMeasurementConsumer")
def test_consume_measurement_histogram(
self, mock_sync_measurement_consumer
):
sync_consumer_instance = mock_sync_measurement_consumer()
meter_provider = MeterProvider()
counter = meter_provider.get_meter("name").create_histogram("name")
counter.record(1)
sync_consumer_instance.consume_measurement.assert_called()
class TestMeter(TestCase):
def setUp(self):
self.meter = Meter(Mock(), Mock())
def test_create_counter(self):
counter = self.meter.create_counter(
"name", unit="unit", description="description"
)
self.assertIsInstance(counter, Counter)
self.assertEqual(counter.name, "name")
def test_create_up_down_counter(self):
up_down_counter = self.meter.create_up_down_counter(
"name", unit="unit", description="description"
)
self.assertIsInstance(up_down_counter, UpDownCounter)
self.assertEqual(up_down_counter.name, "name")
def test_create_observable_counter(self):
observable_counter = self.meter.create_observable_counter(
"name", Mock(), unit="unit", description="description"
)
self.assertIsInstance(observable_counter, ObservableCounter)
self.assertEqual(observable_counter.name, "name")
def test_create_histogram(self):
histogram = self.meter.create_histogram(
"name", unit="unit", description="description"
)
self.assertIsInstance(histogram, Histogram)
self.assertEqual(histogram.name, "name")
def test_create_observable_gauge(self):
observable_gauge = self.meter.create_observable_gauge(
"name", Mock(), unit="unit", description="description"
)
self.assertIsInstance(observable_gauge, ObservableGauge)
self.assertEqual(observable_gauge.name, "name")
def test_create_observable_up_down_counter(self):
observable_up_down_counter = (
self.meter.create_observable_up_down_counter(
"name", Mock(), unit="unit", description="description"
)
)
self.assertIsInstance(
observable_up_down_counter, ObservableUpDownCounter
)
self.assertEqual(observable_up_down_counter.name, "name")
| true | true |
f72d0628837767bf0423dbc36d367e46cce6f424 | 3,323 | py | Python | src/logistic_regression.py | MehdiAbbanaBennani/statistical-optimisation | 0de96661ca7ab857639ad14127b97af39321762e | [
"MIT"
] | null | null | null | src/logistic_regression.py | MehdiAbbanaBennani/statistical-optimisation | 0de96661ca7ab857639ad14127b97af39321762e | [
"MIT"
] | null | null | null | src/logistic_regression.py | MehdiAbbanaBennani/statistical-optimisation | 0de96661ca7ab857639ad14127b97af39321762e | [
"MIT"
] | null | null | null | import numpy as np
from tqdm import tqdm
from gradient import Gradient
class LogisticRegression:
def __init__(self, type, mu, gradient_param, data, d=100, theta=None):
if theta is None:
self.theta = np.random.rand(d) * 2 - 1
else:
self.theta = theta
self.type = type
self.gradient = Gradient(gradient_param)
self.mat = data
self.n_samples = data["Xtrain"].shape[0]
self.mu = mu
@staticmethod
def sigmoid(z):
return 1 / (1 + np.exp(- z))
def error(self, X, y_true):
N = len(y_true)
return sum([self.single_error(X[i], y_true[i])
for i in range(N)]) / N
def single_error(self, X, y_true):
# y_pred = round(self.predict(X))
y_pred = self.predict_label(X)
return abs(y_true - y_pred) / 2
def loss(self, X, y_true):
N = len(y_true)
return sum([self.single_loss(X[i], y_true[i])
for i in range(N)]) / N
def single_loss(self, X, y_true):
y_pred = self.predict(X)
if self.type == "square":
return (y_pred - y_true) ** 2
if self.type == "logistic":
return np.log(1 + np.exp(- y_true * y_pred))
# return - y_true * np.log(y_pred) - (1 - y_true) * np.log(1 - y_pred)
def predict(self, X):
# return self.sigmoid(np.dot(X, self.theta))
return np.dot(X, self.theta)
def predict_label(self, X):
y_pred = self.predict(X)
if y_pred < 0 :
return -1
else :
return 1
def log(self, log_dict, it, log_freq):
log_dict["train_losses"].append(self.loss(X=self.mat["Xtrain"],
y_true=self.mat["ytrain"]))
log_dict["test_losses"].append(self.loss(X=self.mat["Xtest"],
y_true=self.mat["ytest"]))
log_dict["train_errors"].append(self.error(X=self.mat["Xtrain"],
y_true=self.mat["ytrain"]))
log_dict["test_errors"].append(self.error(X=self.mat["Xtest"],
y_true=self.mat["ytest"]))
if log_freq == "epoch" :
log_dict["iterations"].append(it / self.n_samples)
else :
log_dict["iterations"].append(it)
def compute_n_iter(self, n_epoch):
return n_epoch * (self.n_samples // self.gradient.batch_size)
def log_freq_to_iter(self, log_freq):
if log_freq == "epoch" :
return self.n_samples
else :
return log_freq
def run_optimizer(self, n_epoch, log_freq, optimizer):
log_dict = {"train_losses": [],
"test_losses": [],
"iterations": [],
"train_errors": [],
"test_errors": []}
n_iter = self.compute_n_iter(n_epoch)
for it in tqdm(range(n_iter)):
if optimizer == "sgd" :
self.gradient.sgd_step(model=self, it=it)
if optimizer == "sag":
self.gradient.sag_step(model=self, it=it)
if it % self.log_freq_to_iter(log_freq) == 0:
self.log(log_dict, it, log_freq)
return log_dict | 33.908163 | 82 | 0.520012 | import numpy as np
from tqdm import tqdm
from gradient import Gradient
class LogisticRegression:
def __init__(self, type, mu, gradient_param, data, d=100, theta=None):
if theta is None:
self.theta = np.random.rand(d) * 2 - 1
else:
self.theta = theta
self.type = type
self.gradient = Gradient(gradient_param)
self.mat = data
self.n_samples = data["Xtrain"].shape[0]
self.mu = mu
@staticmethod
def sigmoid(z):
return 1 / (1 + np.exp(- z))
def error(self, X, y_true):
N = len(y_true)
return sum([self.single_error(X[i], y_true[i])
for i in range(N)]) / N
def single_error(self, X, y_true):
y_pred = self.predict_label(X)
return abs(y_true - y_pred) / 2
def loss(self, X, y_true):
N = len(y_true)
return sum([self.single_loss(X[i], y_true[i])
for i in range(N)]) / N
def single_loss(self, X, y_true):
y_pred = self.predict(X)
if self.type == "square":
return (y_pred - y_true) ** 2
if self.type == "logistic":
return np.log(1 + np.exp(- y_true * y_pred))
def predict(self, X):
return np.dot(X, self.theta)
def predict_label(self, X):
y_pred = self.predict(X)
if y_pred < 0 :
return -1
else :
return 1
def log(self, log_dict, it, log_freq):
log_dict["train_losses"].append(self.loss(X=self.mat["Xtrain"],
y_true=self.mat["ytrain"]))
log_dict["test_losses"].append(self.loss(X=self.mat["Xtest"],
y_true=self.mat["ytest"]))
log_dict["train_errors"].append(self.error(X=self.mat["Xtrain"],
y_true=self.mat["ytrain"]))
log_dict["test_errors"].append(self.error(X=self.mat["Xtest"],
y_true=self.mat["ytest"]))
if log_freq == "epoch" :
log_dict["iterations"].append(it / self.n_samples)
else :
log_dict["iterations"].append(it)
def compute_n_iter(self, n_epoch):
return n_epoch * (self.n_samples // self.gradient.batch_size)
def log_freq_to_iter(self, log_freq):
if log_freq == "epoch" :
return self.n_samples
else :
return log_freq
def run_optimizer(self, n_epoch, log_freq, optimizer):
log_dict = {"train_losses": [],
"test_losses": [],
"iterations": [],
"train_errors": [],
"test_errors": []}
n_iter = self.compute_n_iter(n_epoch)
for it in tqdm(range(n_iter)):
if optimizer == "sgd" :
self.gradient.sgd_step(model=self, it=it)
if optimizer == "sag":
self.gradient.sag_step(model=self, it=it)
if it % self.log_freq_to_iter(log_freq) == 0:
self.log(log_dict, it, log_freq)
return log_dict | true | true |
f72d0638d4148f7ac1513563b596e331e42a1b8a | 1,461 | py | Python | Advance_Python/Linked_List/Search_Ele_In_SLL.py | siddharth-143/Python | 293f4643a3a13e3b82d23fd8922db54dbb0f12bc | [
"MIT"
] | null | null | null | Advance_Python/Linked_List/Search_Ele_In_SLL.py | siddharth-143/Python | 293f4643a3a13e3b82d23fd8922db54dbb0f12bc | [
"MIT"
] | null | null | null | Advance_Python/Linked_List/Search_Ele_In_SLL.py | siddharth-143/Python | 293f4643a3a13e3b82d23fd8922db54dbb0f12bc | [
"MIT"
] | null | null | null | """
Python program to search for an element in the linked list using recursion
"""
class Node:
def __init__(self, data):
self.data = data
self.next = None
class Linked_list:
def __init__(self):
self.head = None
self.last_node = None
def append(self, data):
if self.last_node is None:
self.head = Node(data)
self.last_node = self.head
else:
self.last_node.next = Node(data)
self.last_node = self.last_node.next
def display(self):
current = self.head
while current is not None:
print(current.data, end=" ")
current = current.next
def find_index(self, key):
return self.find_index_helper(key, 0, self.head)
def find_index_helper(self, key, start, node):
if node is None:
return -1
if node.data == key:
return start
else:
return self.find_index_helper(key, start + 1, node.next)
a_llist = Linked_list()
n = int(input("How many elements would you like to add : "))
for i in range(n):
data = int(input("Enter data item : "))
a_llist.append(data)
print("The lined list : ", end=" ")
a_llist.display()
print()
key = int(input("What data item would you like to search : "))
index = a_llist.find_index(key)
if index == -1:
print(str(key) + ' was not found')
else:
print(str(key) + "is at index" + str(index) + ".")
| 26.563636 | 78 | 0.590691 |
class Node:
def __init__(self, data):
self.data = data
self.next = None
class Linked_list:
def __init__(self):
self.head = None
self.last_node = None
def append(self, data):
if self.last_node is None:
self.head = Node(data)
self.last_node = self.head
else:
self.last_node.next = Node(data)
self.last_node = self.last_node.next
def display(self):
current = self.head
while current is not None:
print(current.data, end=" ")
current = current.next
def find_index(self, key):
return self.find_index_helper(key, 0, self.head)
def find_index_helper(self, key, start, node):
if node is None:
return -1
if node.data == key:
return start
else:
return self.find_index_helper(key, start + 1, node.next)
a_llist = Linked_list()
n = int(input("How many elements would you like to add : "))
for i in range(n):
data = int(input("Enter data item : "))
a_llist.append(data)
print("The lined list : ", end=" ")
a_llist.display()
print()
key = int(input("What data item would you like to search : "))
index = a_llist.find_index(key)
if index == -1:
print(str(key) + ' was not found')
else:
print(str(key) + "is at index" + str(index) + ".")
| true | true |
f72d0862c33d21e1bd9da9c2c2fa5d10f09f06f4 | 4,140 | py | Python | pystratis/api/federation/tests/test_federation.py | madrazzl3/pystratis | 8b78552e753ae1d12f2afb39e9a322a270fbb7b3 | [
"MIT"
] | null | null | null | pystratis/api/federation/tests/test_federation.py | madrazzl3/pystratis | 8b78552e753ae1d12f2afb39e9a322a270fbb7b3 | [
"MIT"
] | null | null | null | pystratis/api/federation/tests/test_federation.py | madrazzl3/pystratis | 8b78552e753ae1d12f2afb39e9a322a270fbb7b3 | [
"MIT"
] | null | null | null | import pytest
from pytest_mock import MockerFixture
from pystratis.api.federation.responsemodels import *
from pystratis.api.federation import Federation
from pystratis.core.networks import StraxMain, CirrusMain
def test_all_strax_endpoints_implemented(strax_swagger_json):
paths = [key.lower() for key in strax_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
def test_all_cirrus_endpoints_implemented(cirrus_swagger_json):
paths = [key.lower() for key in cirrus_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
def test_all_interfluxstrax_endpoints_implemented(interfluxstrax_swagger_json):
paths = [key.lower() for key in interfluxstrax_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
def test_all_interfluxcirrus_endpoints_implemented(interfluxcirrus_swagger_json):
paths = [key.lower() for key in interfluxcirrus_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_reconstruct(mocker: MockerFixture, network):
data = "Reconstruction flag set, please restart the node."
mocker.patch.object(Federation, 'put', return_value=data)
federation = Federation(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = federation.reconstruct()
assert response == data
# noinspection PyUnresolvedReferences
federation.put.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_members_current(mocker: MockerFixture, network, generate_compressed_pubkey, get_datetime):
data = {
"pollStartBlockHeight": None,
"pollNumberOfVotesAcquired": None,
"pollFinishedBlockHeight": None,
"pollWillFinishInBlocks": None,
"pollExecutedBlockHeight": None,
"memberWillStartMiningAtBlockHeight": None,
"memberWillStartEarningRewardsEstimateHeight": None,
"pollType": None,
"rewardEstimatePerBlock": 0.05,
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:32.9200000"
}
mocker.patch.object(Federation, 'get', return_value=data)
federation = Federation(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = federation.members_current()
assert response == FederationMemberDetailedModel(**data)
# noinspection PyUnresolvedReferences
federation.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_member(mocker: MockerFixture, network, generate_compressed_pubkey, get_datetime):
data = [
{
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:32.9200000"
},
{
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:33.9200000"
},
{
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:34.9200000"
}
]
mocker.patch.object(Federation, 'get', return_value=data)
federation = Federation(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = federation.members()
assert response == [FederationMemberModel(**x) for x in data]
# noinspection PyUnresolvedReferences
federation.get.assert_called_once()
| 38.333333 | 100 | 0.700966 | import pytest
from pytest_mock import MockerFixture
from pystratis.api.federation.responsemodels import *
from pystratis.api.federation import Federation
from pystratis.core.networks import StraxMain, CirrusMain
def test_all_strax_endpoints_implemented(strax_swagger_json):
paths = [key.lower() for key in strax_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
def test_all_cirrus_endpoints_implemented(cirrus_swagger_json):
paths = [key.lower() for key in cirrus_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
def test_all_interfluxstrax_endpoints_implemented(interfluxstrax_swagger_json):
paths = [key.lower() for key in interfluxstrax_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
def test_all_interfluxcirrus_endpoints_implemented(interfluxcirrus_swagger_json):
paths = [key.lower() for key in interfluxcirrus_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_reconstruct(mocker: MockerFixture, network):
data = "Reconstruction flag set, please restart the node."
mocker.patch.object(Federation, 'put', return_value=data)
federation = Federation(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = federation.reconstruct()
assert response == data
federation.put.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_members_current(mocker: MockerFixture, network, generate_compressed_pubkey, get_datetime):
data = {
"pollStartBlockHeight": None,
"pollNumberOfVotesAcquired": None,
"pollFinishedBlockHeight": None,
"pollWillFinishInBlocks": None,
"pollExecutedBlockHeight": None,
"memberWillStartMiningAtBlockHeight": None,
"memberWillStartEarningRewardsEstimateHeight": None,
"pollType": None,
"rewardEstimatePerBlock": 0.05,
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:32.9200000"
}
mocker.patch.object(Federation, 'get', return_value=data)
federation = Federation(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = federation.members_current()
assert response == FederationMemberDetailedModel(**data)
federation.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_member(mocker: MockerFixture, network, generate_compressed_pubkey, get_datetime):
data = [
{
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:32.9200000"
},
{
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:33.9200000"
},
{
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:34.9200000"
}
]
mocker.patch.object(Federation, 'get', return_value=data)
federation = Federation(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = federation.members()
assert response == [FederationMemberModel(**x) for x in data]
federation.get.assert_called_once()
| true | true |
f72d089f873a7c0b075b4dd30a15b63980100580 | 20,269 | py | Python | tools/ami-creator/scripts/win2019_cuda11_installer.py | mseth10/incubator-mxnet-ci | 36a5050b9c7bd720a4aa87d225738400083d611d | [
"Apache-2.0"
] | 10 | 2019-08-19T17:12:52.000Z | 2021-11-07T21:25:32.000Z | tools/ami-creator/scripts/win2019_cuda11_installer.py | mseth10/incubator-mxnet-ci | 36a5050b9c7bd720a4aa87d225738400083d611d | [
"Apache-2.0"
] | 16 | 2019-10-22T17:07:40.000Z | 2022-02-08T23:33:27.000Z | tools/ami-creator/scripts/win2019_cuda11_installer.py | mseth10/incubator-mxnet-ci | 36a5050b9c7bd720a4aa87d225738400083d611d | [
"Apache-2.0"
] | 15 | 2019-08-25T18:44:54.000Z | 2021-11-07T21:25:25.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Dependency installer for Windows"""
__author__ = 'Pedro Larroy, Chance Bair, Joe Evans'
__version__ = '0.4'
import argparse
import errno
import logging
import os
import psutil
import shutil
import subprocess
import urllib
import stat
import tempfile
import zipfile
from time import sleep
from urllib.error import HTTPError
import logging
from subprocess import check_output, check_call, call
import re
import sys
import urllib.request
import contextlib
import glob
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
log = logging.getLogger(__name__)
DEPS = {
'openblas': 'https://windows-post-install.s3-us-west-2.amazonaws.com/OpenBLAS-windows-v0_2_19.zip',
'opencv': 'https://windows-post-install.s3-us-west-2.amazonaws.com/opencv-windows-4.1.2-vc14_vc15.zip',
'cudnn7': 'https://windows-post-install.s3-us-west-2.amazonaws.com/cudnn-10.2-windows10-x64-v7.6.5.32.zip',
'cudnn8': 'https://windows-post-install.s3-us-west-2.amazonaws.com/cudnn-11.0-windows-x64-v8.0.3.33.zip',
'perl': 'http://strawberryperl.com/download/5.30.1.1/strawberry-perl-5.30.1.1-64bit.msi',
'clang': 'https://github.com/llvm/llvm-project/releases/download/llvmorg-9.0.1/LLVM-9.0.1-win64.exe',
}
DEFAULT_SUBPROCESS_TIMEOUT = 3600
@contextlib.contextmanager
def remember_cwd():
'''
Restore current directory when exiting context
'''
curdir = os.getcwd()
try:
yield
finally:
os.chdir(curdir)
def retry(target_exception, tries=4, delay_s=1, backoff=2):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param target_exception: the exception to check. may be a tuple of
exceptions to check
:type target_exception: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay_s: initial delay between retries in seconds
:type delay_s: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
"""
import time
from functools import wraps
def decorated_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay_s
while mtries > 1:
try:
return f(*args, **kwargs)
except target_exception as e:
logging.warning("Exception: %s, Retrying in %d seconds...", str(e), mdelay)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return decorated_retry
@retry((ValueError, OSError, HTTPError), tries=5, delay_s=2, backoff=5)
def download(url, dest=None, progress=False) -> str:
from urllib.request import urlopen
from urllib.parse import (urlparse, urlunparse)
import progressbar
import http.client
class ProgressCB():
def __init__(self):
self.pbar = None
def __call__(self, block_num, block_size, total_size):
if not self.pbar and total_size > 0:
self.pbar = progressbar.bar.ProgressBar(max_value=total_size)
downloaded = block_num * block_size
if self.pbar:
if downloaded < total_size:
self.pbar.update(downloaded)
else:
self.pbar.finish()
if dest and os.path.isdir(dest):
local_file = os.path.split(urlparse(url).path)[1]
local_path = os.path.normpath(os.path.join(dest, local_file))
else:
local_path = dest
with urlopen(url) as c:
content_length = c.getheader('content-length')
length = int(content_length) if content_length and isinstance(c, http.client.HTTPResponse) else None
if length and local_path and os.path.exists(local_path) and os.stat(local_path).st_size == length:
log.debug(f"download('{url}'): Already downloaded.")
return local_path
log.debug(f"download({url}, {local_path}): downloading {length} bytes")
if local_path:
with tempfile.NamedTemporaryFile(delete=False) as tmpfd:
urllib.request.urlretrieve(url, filename=tmpfd.name, reporthook=ProgressCB() if progress else None)
shutil.move(tmpfd.name, local_path)
else:
(local_path, _) = urllib.request.urlretrieve(url, reporthook=ProgressCB())
log.debug(f"download({url}, {local_path}'): done.")
return local_path
# Takes arguments and runs command on host. Shell is disabled by default.
# TODO: Move timeout to args
def run_command(*args, shell=False, timeout=DEFAULT_SUBPROCESS_TIMEOUT, **kwargs):
try:
logging.info("Issuing command: {}".format(args))
res = subprocess.check_output(*args, shell=shell, timeout=timeout).decode("utf-8").replace("\r\n", "\n")
logging.info("Output: {}".format(res))
except subprocess.CalledProcessError as e:
raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
return res
# Copies source directory recursively to destination.
def copy(src, dest):
try:
shutil.copytree(src, dest)
logging.info("Moved {} to {}".format(src, dest))
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, dest)
logging.info("Moved {} to {}".format(src, dest))
else:
raise RuntimeError("copy return with error: {}".format(e))
# Workaround for windows readonly attribute error
def on_rm_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
def reboot_system():
logging.info("Rebooting system now...")
run_command("shutdown -r -t 5")
exit(0)
def shutdown_system():
logging.info("Shutting down system now...")
# wait 20 sec so we can capture the install logs
run_command("shutdown -s -t 20")
exit(0)
def install_vs():
if os.path.exists("C:\\Program Files (x86)\\Microsoft Visual Studio\\2019"):
logging.info("MSVS already installed, skipping.")
return False
# Visual Studio 2019
# Components: https://docs.microsoft.com/en-us/visualstudio/install/workload-component-id-vs-community?view=vs-2019#visual-studio-core-editor-included-with-visual-studio-community-2019
logging.info("Installing Visual Studio 2019...")
vs_file_path = download('https://windows-post-install.s3-us-west-2.amazonaws.com/vs_community__1246179388.1585201415.exe')
run_command("PowerShell Rename-Item -Path {} -NewName \"{}.exe\"".format(vs_file_path,
vs_file_path.split('\\')[-1]), shell=True)
vs_file_path = vs_file_path + '.exe'
logging.info("Installing VisualStudio 2019.....")
ret = call(vs_file_path +
' --add Microsoft.VisualStudio.Workload.ManagedDesktop'
' --add Microsoft.VisualStudio.Workload.NetCoreTools'
' --add Microsoft.VisualStudio.Workload.NetWeb'
' --add Microsoft.VisualStudio.Workload.Node'
' --add Microsoft.VisualStudio.Workload.Office'
' --add Microsoft.VisualStudio.Component.TypeScript.2.0'
' --add Microsoft.VisualStudio.Component.TestTools.WebLoadTest'
' --add Component.GitHub.VisualStudio'
' --add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core'
' --add Microsoft.VisualStudio.Component.Static.Analysis.Tools'
' --add Microsoft.VisualStudio.Component.VC.CMake.Project'
' --add Microsoft.VisualStudio.Component.VC.140'
' --add Microsoft.VisualStudio.Component.Windows10SDK.18362.Desktop'
' --add Microsoft.VisualStudio.Component.Windows10SDK.18362.UWP'
' --add Microsoft.VisualStudio.Component.Windows10SDK.18362.UWP.Native'
' --add Microsoft.VisualStudio.ComponentGroup.Windows10SDK.18362'
' --add Microsoft.VisualStudio.Component.Windows10SDK.16299'
' --wait'
' --passive'
' --norestart'
)
if ret == 3010 or ret == 0:
# 3010 is restart required
logging.info("VS install successful.")
else:
raise RuntimeError("VS failed to install, exit status {}".format(ret))
# Workaround for --wait sometimes ignoring the subprocesses doing component installs
def vs_still_installing():
return {'vs_installer.exe', 'vs_installershell.exe', 'vs_setup_bootstrapper.exe'} & set(map(lambda process: process.name(), psutil.process_iter()))
timer = 0
while vs_still_installing() and timer < DEFAULT_SUBPROCESS_TIMEOUT:
logging.warning("VS installers still running for %d s", timer)
if timer % 60 == 0:
logging.info("Waiting for Visual Studio to install for the last {} seconds".format(str(timer)))
sleep(1)
timer += 1
if vs_still_installing():
logging.warning("VS install still running after timeout (%d)", DEFAULT_SUBPROCESS_TIMEOUT)
else:
logging.info("Visual studio install complete.")
return True
def install_perl():
if os.path.exists("C:\\Strawberry\\perl\\bin\\perl.exe"):
logging.info("Perl already installed, skipping.")
return False
logging.info("Installing Perl")
with tempfile.TemporaryDirectory() as tmpdir:
perl_file_path = download(DEPS['perl'], tmpdir)
check_call(['msiexec ', '/n', '/passive', '/i', perl_file_path])
logging.info("Perl install complete")
return True
def install_clang():
if os.path.exists("C:\\Program Files\\LLVM"):
logging.info("Clang already installed, skipping.")
return False
logging.info("Installing Clang")
with tempfile.TemporaryDirectory() as tmpdir:
clang_file_path = download(DEPS['clang'], tmpdir)
run_command(clang_file_path + " /S /D=C:\\Program Files\\LLVM")
logging.info("Clang install complete")
return True
def install_openblas():
if os.path.exists("C:\\Program Files\\OpenBLAS-windows-v0_2_19"):
logging.info("OpenBLAS already installed, skipping.")
return False
logging.info("Installing OpenBLAS")
local_file = download(DEPS['openblas'])
with zipfile.ZipFile(local_file, 'r') as zip:
zip.extractall("C:\\Program Files")
run_command("PowerShell Set-ItemProperty -path 'hklm:\\system\\currentcontrolset\\control\\session manager\\environment' -Name OpenBLAS_HOME -Value 'C:\\Program Files\\OpenBLAS-windows-v0_2_19'")
logging.info("Openblas Install complete")
return True
def install_mkl():
if os.path.exists("C:\\Program Files (x86)\\IntelSWTools"):
logging.info("Intel MKL already installed, skipping.")
return False
logging.info("Installing MKL 2019.3.203...")
file_path = download("http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/15247/w_mkl_2019.3.203.exe")
run_command("{} --silent --remove-extracted-files yes --a install -output=C:\mkl-install-log.txt -eula=accept".format(file_path))
logging.info("MKL Install complete")
return True
def install_opencv():
if os.path.exists("C:\\Program Files\\opencv"):
logging.info("OpenCV already installed, skipping.")
return False
logging.info("Installing OpenCV")
with tempfile.TemporaryDirectory() as tmpdir:
local_file = download(DEPS['opencv'])
with zipfile.ZipFile(local_file, 'r') as zip:
zip.extractall(tmpdir)
copy(f'{tmpdir}\\opencv\\build', r'c:\Program Files\opencv')
run_command("PowerShell Set-ItemProperty -path 'hklm:\\system\\currentcontrolset\\control\\session manager\\environment' -Name OpenCV_DIR -Value 'C:\\Program Files\\opencv'")
logging.info("OpenCV install complete")
return True
def install_cudnn7():
if os.path.exists("C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\bin\\cudnn64_7.dll"):
logging.info("cuDNN7 already installed, skipping.")
return False
# cuDNN
logging.info("Installing cuDNN7")
with tempfile.TemporaryDirectory() as tmpdir:
local_file = download(DEPS['cudnn7'])
with zipfile.ZipFile(local_file, 'r') as zip:
zip.extractall(tmpdir)
for f in glob.glob(tmpdir+"\\cuda\\bin\\*"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\bin")
for f in glob.glob(tmpdir+"\\cuda\\include\\*.h"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\include")
for f in glob.glob(tmpdir+"\\cuda\\lib\\x64\\*"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\lib\\x64")
logging.info("cuDNN7 install complete")
return True
def install_cudnn8():
if os.path.exists("C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\bin\\cudnn64_8.dll"):
logging.info("cuDNN7 already installed, skipping.")
return False
# cuDNN
logging.info("Installing cuDNN8")
with tempfile.TemporaryDirectory() as tmpdir:
local_file = download(DEPS['cudnn8'])
with zipfile.ZipFile(local_file, 'r') as zip:
zip.extractall(tmpdir)
for f in glob.glob(tmpdir+"\\cuda\\bin\\*"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\bin")
for f in glob.glob(tmpdir+"\\cuda\\include\\*.h"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\include")
for f in glob.glob(tmpdir+"\\cuda\\lib\\x64\\*"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\lib\\x64")
logging.info("cuDNN8 install complete")
return True
def instance_family():
return urllib.request.urlopen('http://instance-data/latest/meta-data/instance-type').read().decode().split('.')[0]
CUDA_COMPONENTS=[
'nvcc', 'cublas', 'cublas_dev', 'cudart', 'cufft', 'cufft_dev', 'curand', 'curand_dev', 'cusolver',
'cusolver_dev', 'cusparse', 'cusparse_dev', 'npp', 'npp_dev', 'nvrtc', 'nvrtc_dev', 'nvml_dev'
]
def install_cuda110():
if os.path.exists("C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\bin"):
logging.info("CUDA 11.0 already installed, skipping.")
return False
logging.info("Downloadinng CUDA 11.0...")
cuda_file_path = download(
'http://developer.download.nvidia.com/compute/cuda/11.0.3/network_installers/cuda_11.0.3_win10_network.exe')
try:
check_call("PowerShell Rename-Item -Path {} -NewName \"{}.exe\"".format(cuda_file_path,
cuda_file_path.split('\\')[-1]), shell=True)
except subprocess.CalledProcessError as e:
logging.exception("Rename file failed")
cuda_file_path = cuda_file_path + '.exe'
logging.info("Installing CUDA 11.0...")
check_call(cuda_file_path + ' -s')
#check_call(cuda_file_path + ' -s ' + " ".join([p + "_11.0" for p in CUDA_COMPONENTS]))
logging.info("Done installing CUDA 11.0.")
return True
def install_cuda102():
if os.path.exists("C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\bin"):
logging.info("CUDA 10.2 already installed, skipping.")
return False
logging.info("Downloading CUDA 10.2...")
cuda_file_path = download(
'http://developer.download.nvidia.com/compute/cuda/10.2/Prod/network_installers/cuda_10.2.89_win10_network.exe')
try:
check_call("PowerShell Rename-Item -Path {} -NewName \"{}.exe\"".format(cuda_file_path,
cuda_file_path.split('\\')[-1]), shell=True)
except subprocess.CalledProcessError as e:
logging.exception("Rename file failed")
cuda_file_path = cuda_file_path + '.exe'
logging.info("Installing CUDA 10.2...")
check_call(cuda_file_path + ' -s')
#check_call(cuda_file_path + ' -s ' + " ".join([p + "_10.2" for p in CUDA_COMPONENTS]))
logging.info("Downloading CUDA 10.2 patch...")
patch_file_path = download(
'http://developer.download.nvidia.com/compute/cuda/10.2/Prod/patches/1/cuda_10.2.1_win10.exe')
try:
check_call("PowerShell Rename-Item -Path {} -NewName \"{}.exe\"".format(patch_file_path,
patch_file_path.split('\\')[-1]), shell=True)
except subprocess.CalledProcessError as e:
logging.exception("Rename patch failed")
patch_file_path = patch_file_path + '.exe'
logging.info("Installing CUDA patch...")
check_call(patch_file_path + ' -s ')
logging.info("Done installing CUDA 10.2 and patches.")
return True
def schedule_aws_userdata():
logging.info("Scheduling AWS init so userdata will run on next boot...")
run_command("PowerShell C:\\ProgramData\\Amazon\\EC2-Windows\\Launch\\Scripts\\InitializeInstance.ps1 -Schedule")
def add_paths():
# TODO: Add python paths (python -> C:\\Python37\\python.exe, python2 -> C:\\Python27\\python.exe)
logging.info("Adding Windows Kits to PATH...")
current_path = run_command(
"PowerShell (Get-Itemproperty -path 'hklm:\\system\\currentcontrolset\\control\\session manager\\environment' -Name Path).Path")
current_path = current_path.rstrip()
logging.debug("current_path: {}".format(current_path))
new_path = current_path + \
";C:\\Program Files (x86)\\Windows Kits\\10\\bin\\10.0.16299.0\\x86;C:\\Program Files\\OpenBLAS-windows-v0_2_19\\bin;C:\\Program Files\\LLVM\\bin;C:\\Program Files\\opencv\\bin;C:\\Program Files\\opencv\\x64\\vc15\\bin"
logging.debug("new_path: {}".format(new_path))
run_command("PowerShell Set-ItemProperty -path 'hklm:\\system\\currentcontrolset\\control\\session manager\\environment' -Name Path -Value '" + new_path + "'")
def script_name() -> str:
""":returns: script name with leading paths removed"""
return os.path.split(sys.argv[0])[1]
def remove_install_task():
logging.info("Removing stage2 startup task...")
run_command("PowerShell Unregister-ScheduledTask -TaskName 'Stage2Install' -Confirm:$false")
def main():
logging.getLogger().setLevel(os.environ.get('LOGLEVEL', logging.DEBUG))
logging.basicConfig(filename="C:\\install.log", format='{}: %(asctime)sZ %(levelname)s %(message)s'.format(script_name()))
# install all necessary software and reboot after some components
# for CUDA, the last version you install will be the default, based on PATH variable
if install_cuda110():
reboot_system()
install_cudnn8()
#if install_cuda102():
# reboot_system()
#install_cudnn7()
if install_vs():
reboot_system()
install_openblas()
install_mkl()
install_opencv()
install_perl()
install_clang()
add_paths()
remove_install_task()
schedule_aws_userdata()
shutdown_system()
if __name__ == "__main__":
exit(main())
| 42.852008 | 227 | 0.660763 |
__author__ = 'Pedro Larroy, Chance Bair, Joe Evans'
__version__ = '0.4'
import argparse
import errno
import logging
import os
import psutil
import shutil
import subprocess
import urllib
import stat
import tempfile
import zipfile
from time import sleep
from urllib.error import HTTPError
import logging
from subprocess import check_output, check_call, call
import re
import sys
import urllib.request
import contextlib
import glob
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
log = logging.getLogger(__name__)
DEPS = {
'openblas': 'https://windows-post-install.s3-us-west-2.amazonaws.com/OpenBLAS-windows-v0_2_19.zip',
'opencv': 'https://windows-post-install.s3-us-west-2.amazonaws.com/opencv-windows-4.1.2-vc14_vc15.zip',
'cudnn7': 'https://windows-post-install.s3-us-west-2.amazonaws.com/cudnn-10.2-windows10-x64-v7.6.5.32.zip',
'cudnn8': 'https://windows-post-install.s3-us-west-2.amazonaws.com/cudnn-11.0-windows-x64-v8.0.3.33.zip',
'perl': 'http://strawberryperl.com/download/5.30.1.1/strawberry-perl-5.30.1.1-64bit.msi',
'clang': 'https://github.com/llvm/llvm-project/releases/download/llvmorg-9.0.1/LLVM-9.0.1-win64.exe',
}
DEFAULT_SUBPROCESS_TIMEOUT = 3600
@contextlib.contextmanager
def remember_cwd():
curdir = os.getcwd()
try:
yield
finally:
os.chdir(curdir)
def retry(target_exception, tries=4, delay_s=1, backoff=2):
import time
from functools import wraps
def decorated_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay_s
while mtries > 1:
try:
return f(*args, **kwargs)
except target_exception as e:
logging.warning("Exception: %s, Retrying in %d seconds...", str(e), mdelay)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry
return decorated_retry
@retry((ValueError, OSError, HTTPError), tries=5, delay_s=2, backoff=5)
def download(url, dest=None, progress=False) -> str:
from urllib.request import urlopen
from urllib.parse import (urlparse, urlunparse)
import progressbar
import http.client
class ProgressCB():
def __init__(self):
self.pbar = None
def __call__(self, block_num, block_size, total_size):
if not self.pbar and total_size > 0:
self.pbar = progressbar.bar.ProgressBar(max_value=total_size)
downloaded = block_num * block_size
if self.pbar:
if downloaded < total_size:
self.pbar.update(downloaded)
else:
self.pbar.finish()
if dest and os.path.isdir(dest):
local_file = os.path.split(urlparse(url).path)[1]
local_path = os.path.normpath(os.path.join(dest, local_file))
else:
local_path = dest
with urlopen(url) as c:
content_length = c.getheader('content-length')
length = int(content_length) if content_length and isinstance(c, http.client.HTTPResponse) else None
if length and local_path and os.path.exists(local_path) and os.stat(local_path).st_size == length:
log.debug(f"download('{url}'): Already downloaded.")
return local_path
log.debug(f"download({url}, {local_path}): downloading {length} bytes")
if local_path:
with tempfile.NamedTemporaryFile(delete=False) as tmpfd:
urllib.request.urlretrieve(url, filename=tmpfd.name, reporthook=ProgressCB() if progress else None)
shutil.move(tmpfd.name, local_path)
else:
(local_path, _) = urllib.request.urlretrieve(url, reporthook=ProgressCB())
log.debug(f"download({url}, {local_path}'): done.")
return local_path
# Takes arguments and runs command on host. Shell is disabled by default.
# TODO: Move timeout to args
def run_command(*args, shell=False, timeout=DEFAULT_SUBPROCESS_TIMEOUT, **kwargs):
try:
logging.info("Issuing command: {}".format(args))
res = subprocess.check_output(*args, shell=shell, timeout=timeout).decode("utf-8").replace("\r\n", "\n")
logging.info("Output: {}".format(res))
except subprocess.CalledProcessError as e:
raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
return res
# Copies source directory recursively to destination.
def copy(src, dest):
try:
shutil.copytree(src, dest)
logging.info("Moved {} to {}".format(src, dest))
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, dest)
logging.info("Moved {} to {}".format(src, dest))
else:
raise RuntimeError("copy return with error: {}".format(e))
def on_rm_error(func, path, exc_info):
# let's just assume that it's read-only and unlink it.
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
def reboot_system():
logging.info("Rebooting system now...")
run_command("shutdown -r -t 5")
exit(0)
def shutdown_system():
logging.info("Shutting down system now...")
# wait 20 sec so we can capture the install logs
run_command("shutdown -s -t 20")
exit(0)
def install_vs():
if os.path.exists("C:\\Program Files (x86)\\Microsoft Visual Studio\\2019"):
logging.info("MSVS already installed, skipping.")
return False
# Visual Studio 2019
# Components: https://docs.microsoft.com/en-us/visualstudio/install/workload-component-id-vs-community?view=vs-2019#visual-studio-core-editor-included-with-visual-studio-community-2019
logging.info("Installing Visual Studio 2019...")
vs_file_path = download('https://windows-post-install.s3-us-west-2.amazonaws.com/vs_community__1246179388.1585201415.exe')
run_command("PowerShell Rename-Item -Path {} -NewName \"{}.exe\"".format(vs_file_path,
vs_file_path.split('\\')[-1]), shell=True)
vs_file_path = vs_file_path + '.exe'
logging.info("Installing VisualStudio 2019.....")
ret = call(vs_file_path +
' --add Microsoft.VisualStudio.Workload.ManagedDesktop'
' --add Microsoft.VisualStudio.Workload.NetCoreTools'
' --add Microsoft.VisualStudio.Workload.NetWeb'
' --add Microsoft.VisualStudio.Workload.Node'
' --add Microsoft.VisualStudio.Workload.Office'
' --add Microsoft.VisualStudio.Component.TypeScript.2.0'
' --add Microsoft.VisualStudio.Component.TestTools.WebLoadTest'
' --add Component.GitHub.VisualStudio'
' --add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core'
' --add Microsoft.VisualStudio.Component.Static.Analysis.Tools'
' --add Microsoft.VisualStudio.Component.VC.CMake.Project'
' --add Microsoft.VisualStudio.Component.VC.140'
' --add Microsoft.VisualStudio.Component.Windows10SDK.18362.Desktop'
' --add Microsoft.VisualStudio.Component.Windows10SDK.18362.UWP'
' --add Microsoft.VisualStudio.Component.Windows10SDK.18362.UWP.Native'
' --add Microsoft.VisualStudio.ComponentGroup.Windows10SDK.18362'
' --add Microsoft.VisualStudio.Component.Windows10SDK.16299'
' --wait'
' --passive'
' --norestart'
)
if ret == 3010 or ret == 0:
# 3010 is restart required
logging.info("VS install successful.")
else:
raise RuntimeError("VS failed to install, exit status {}".format(ret))
# Workaround for --wait sometimes ignoring the subprocesses doing component installs
def vs_still_installing():
return {'vs_installer.exe', 'vs_installershell.exe', 'vs_setup_bootstrapper.exe'} & set(map(lambda process: process.name(), psutil.process_iter()))
timer = 0
while vs_still_installing() and timer < DEFAULT_SUBPROCESS_TIMEOUT:
logging.warning("VS installers still running for %d s", timer)
if timer % 60 == 0:
logging.info("Waiting for Visual Studio to install for the last {} seconds".format(str(timer)))
sleep(1)
timer += 1
if vs_still_installing():
logging.warning("VS install still running after timeout (%d)", DEFAULT_SUBPROCESS_TIMEOUT)
else:
logging.info("Visual studio install complete.")
return True
def install_perl():
if os.path.exists("C:\\Strawberry\\perl\\bin\\perl.exe"):
logging.info("Perl already installed, skipping.")
return False
logging.info("Installing Perl")
with tempfile.TemporaryDirectory() as tmpdir:
perl_file_path = download(DEPS['perl'], tmpdir)
check_call(['msiexec ', '/n', '/passive', '/i', perl_file_path])
logging.info("Perl install complete")
return True
def install_clang():
if os.path.exists("C:\\Program Files\\LLVM"):
logging.info("Clang already installed, skipping.")
return False
logging.info("Installing Clang")
with tempfile.TemporaryDirectory() as tmpdir:
clang_file_path = download(DEPS['clang'], tmpdir)
run_command(clang_file_path + " /S /D=C:\\Program Files\\LLVM")
logging.info("Clang install complete")
return True
def install_openblas():
if os.path.exists("C:\\Program Files\\OpenBLAS-windows-v0_2_19"):
logging.info("OpenBLAS already installed, skipping.")
return False
logging.info("Installing OpenBLAS")
local_file = download(DEPS['openblas'])
with zipfile.ZipFile(local_file, 'r') as zip:
zip.extractall("C:\\Program Files")
run_command("PowerShell Set-ItemProperty -path 'hklm:\\system\\currentcontrolset\\control\\session manager\\environment' -Name OpenBLAS_HOME -Value 'C:\\Program Files\\OpenBLAS-windows-v0_2_19'")
logging.info("Openblas Install complete")
return True
def install_mkl():
if os.path.exists("C:\\Program Files (x86)\\IntelSWTools"):
logging.info("Intel MKL already installed, skipping.")
return False
logging.info("Installing MKL 2019.3.203...")
file_path = download("http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/15247/w_mkl_2019.3.203.exe")
run_command("{} --silent --remove-extracted-files yes --a install -output=C:\mkl-install-log.txt -eula=accept".format(file_path))
logging.info("MKL Install complete")
return True
def install_opencv():
if os.path.exists("C:\\Program Files\\opencv"):
logging.info("OpenCV already installed, skipping.")
return False
logging.info("Installing OpenCV")
with tempfile.TemporaryDirectory() as tmpdir:
local_file = download(DEPS['opencv'])
with zipfile.ZipFile(local_file, 'r') as zip:
zip.extractall(tmpdir)
copy(f'{tmpdir}\\opencv\\build', r'c:\Program Files\opencv')
run_command("PowerShell Set-ItemProperty -path 'hklm:\\system\\currentcontrolset\\control\\session manager\\environment' -Name OpenCV_DIR -Value 'C:\\Program Files\\opencv'")
logging.info("OpenCV install complete")
return True
def install_cudnn7():
if os.path.exists("C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\bin\\cudnn64_7.dll"):
logging.info("cuDNN7 already installed, skipping.")
return False
# cuDNN
logging.info("Installing cuDNN7")
with tempfile.TemporaryDirectory() as tmpdir:
local_file = download(DEPS['cudnn7'])
with zipfile.ZipFile(local_file, 'r') as zip:
zip.extractall(tmpdir)
for f in glob.glob(tmpdir+"\\cuda\\bin\\*"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\bin")
for f in glob.glob(tmpdir+"\\cuda\\include\\*.h"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\include")
for f in glob.glob(tmpdir+"\\cuda\\lib\\x64\\*"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\lib\\x64")
logging.info("cuDNN7 install complete")
return True
def install_cudnn8():
if os.path.exists("C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\bin\\cudnn64_8.dll"):
logging.info("cuDNN7 already installed, skipping.")
return False
# cuDNN
logging.info("Installing cuDNN8")
with tempfile.TemporaryDirectory() as tmpdir:
local_file = download(DEPS['cudnn8'])
with zipfile.ZipFile(local_file, 'r') as zip:
zip.extractall(tmpdir)
for f in glob.glob(tmpdir+"\\cuda\\bin\\*"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\bin")
for f in glob.glob(tmpdir+"\\cuda\\include\\*.h"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\include")
for f in glob.glob(tmpdir+"\\cuda\\lib\\x64\\*"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\lib\\x64")
logging.info("cuDNN8 install complete")
return True
def instance_family():
return urllib.request.urlopen('http://instance-data/latest/meta-data/instance-type').read().decode().split('.')[0]
CUDA_COMPONENTS=[
'nvcc', 'cublas', 'cublas_dev', 'cudart', 'cufft', 'cufft_dev', 'curand', 'curand_dev', 'cusolver',
'cusolver_dev', 'cusparse', 'cusparse_dev', 'npp', 'npp_dev', 'nvrtc', 'nvrtc_dev', 'nvml_dev'
]
def install_cuda110():
if os.path.exists("C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\bin"):
logging.info("CUDA 11.0 already installed, skipping.")
return False
logging.info("Downloadinng CUDA 11.0...")
cuda_file_path = download(
'http://developer.download.nvidia.com/compute/cuda/11.0.3/network_installers/cuda_11.0.3_win10_network.exe')
try:
check_call("PowerShell Rename-Item -Path {} -NewName \"{}.exe\"".format(cuda_file_path,
cuda_file_path.split('\\')[-1]), shell=True)
except subprocess.CalledProcessError as e:
logging.exception("Rename file failed")
cuda_file_path = cuda_file_path + '.exe'
logging.info("Installing CUDA 11.0...")
check_call(cuda_file_path + ' -s')
#check_call(cuda_file_path + ' -s ' + " ".join([p + "_11.0" for p in CUDA_COMPONENTS]))
logging.info("Done installing CUDA 11.0.")
return True
def install_cuda102():
if os.path.exists("C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\bin"):
logging.info("CUDA 10.2 already installed, skipping.")
return False
logging.info("Downloading CUDA 10.2...")
cuda_file_path = download(
'http://developer.download.nvidia.com/compute/cuda/10.2/Prod/network_installers/cuda_10.2.89_win10_network.exe')
try:
check_call("PowerShell Rename-Item -Path {} -NewName \"{}.exe\"".format(cuda_file_path,
cuda_file_path.split('\\')[-1]), shell=True)
except subprocess.CalledProcessError as e:
logging.exception("Rename file failed")
cuda_file_path = cuda_file_path + '.exe'
logging.info("Installing CUDA 10.2...")
check_call(cuda_file_path + ' -s')
#check_call(cuda_file_path + ' -s ' + " ".join([p + "_10.2" for p in CUDA_COMPONENTS]))
logging.info("Downloading CUDA 10.2 patch...")
patch_file_path = download(
'http://developer.download.nvidia.com/compute/cuda/10.2/Prod/patches/1/cuda_10.2.1_win10.exe')
try:
check_call("PowerShell Rename-Item -Path {} -NewName \"{}.exe\"".format(patch_file_path,
patch_file_path.split('\\')[-1]), shell=True)
except subprocess.CalledProcessError as e:
logging.exception("Rename patch failed")
patch_file_path = patch_file_path + '.exe'
logging.info("Installing CUDA patch...")
check_call(patch_file_path + ' -s ')
logging.info("Done installing CUDA 10.2 and patches.")
return True
def schedule_aws_userdata():
logging.info("Scheduling AWS init so userdata will run on next boot...")
run_command("PowerShell C:\\ProgramData\\Amazon\\EC2-Windows\\Launch\\Scripts\\InitializeInstance.ps1 -Schedule")
def add_paths():
# TODO: Add python paths (python -> C:\\Python37\\python.exe, python2 -> C:\\Python27\\python.exe)
logging.info("Adding Windows Kits to PATH...")
current_path = run_command(
"PowerShell (Get-Itemproperty -path 'hklm:\\system\\currentcontrolset\\control\\session manager\\environment' -Name Path).Path")
current_path = current_path.rstrip()
logging.debug("current_path: {}".format(current_path))
new_path = current_path + \
";C:\\Program Files (x86)\\Windows Kits\\10\\bin\\10.0.16299.0\\x86;C:\\Program Files\\OpenBLAS-windows-v0_2_19\\bin;C:\\Program Files\\LLVM\\bin;C:\\Program Files\\opencv\\bin;C:\\Program Files\\opencv\\x64\\vc15\\bin"
logging.debug("new_path: {}".format(new_path))
run_command("PowerShell Set-ItemProperty -path 'hklm:\\system\\currentcontrolset\\control\\session manager\\environment' -Name Path -Value '" + new_path + "'")
def script_name() -> str:
return os.path.split(sys.argv[0])[1]
def remove_install_task():
logging.info("Removing stage2 startup task...")
run_command("PowerShell Unregister-ScheduledTask -TaskName 'Stage2Install' -Confirm:$false")
def main():
logging.getLogger().setLevel(os.environ.get('LOGLEVEL', logging.DEBUG))
logging.basicConfig(filename="C:\\install.log", format='{}: %(asctime)sZ %(levelname)s %(message)s'.format(script_name()))
# install all necessary software and reboot after some components
# for CUDA, the last version you install will be the default, based on PATH variable
if install_cuda110():
reboot_system()
install_cudnn8()
#if install_cuda102():
# reboot_system()
#install_cudnn7()
if install_vs():
reboot_system()
install_openblas()
install_mkl()
install_opencv()
install_perl()
install_clang()
add_paths()
remove_install_task()
schedule_aws_userdata()
shutdown_system()
if __name__ == "__main__":
exit(main())
| true | true |
f72d09e2fff6278a2ca4a3ec916f0240a3598469 | 2,143 | py | Python | tests/examples/test_examples.py | ibraheemmmoosa/lightning-flash | c60fef81b27174543d7ad3a4d841faf71ad8536c | [
"Apache-2.0"
] | 2 | 2021-06-25T08:42:36.000Z | 2021-06-25T08:49:29.000Z | tests/examples/test_examples.py | edenlightning/lightning-flash | 841986aa0081bdeaf785d1ed4c48dd108fa69a78 | [
"Apache-2.0"
] | null | null | null | tests/examples/test_examples.py | edenlightning/lightning-flash | 841986aa0081bdeaf785d1ed4c48dd108fa69a78 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import sys
from pathlib import Path
from typing import List, Optional, Tuple
import pytest
root = Path(__file__).parent.parent.parent
def call_script(filepath: str,
args: Optional[List[str]] = None,
timeout: Optional[int] = 60 * 5) -> Tuple[int, str, str]:
if args is None:
args = []
args = [str(a) for a in args]
command = [sys.executable, filepath] + args
print(" ".join(command))
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
p.kill()
stdout, stderr = p.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
return p.returncode, stdout, stderr
def run_test(filepath):
code, stdout, stderr = call_script(filepath)
assert not code
print(f"{filepath} STDOUT: {stdout}")
print(f"{filepath} STDERR: {stderr}")
@pytest.mark.parametrize(
"step,file",
[
("finetuning", "image_classification.py"),
("finetuning", "tabular_classification.py"),
("predict", "classify_image.py"),
("predict", "classify_tabular.py"),
# "classify_text.py" TODO: takes too long
]
)
def test_finetune_example(tmpdir, step, file):
with tmpdir.as_cwd():
run_test(str(root / "flash_examples" / step / file))
def test_generic_example(tmpdir):
with tmpdir.as_cwd():
run_test(str(root / "flash_examples" / "generic_task.py"))
| 31.514706 | 81 | 0.674755 |
import subprocess
import sys
from pathlib import Path
from typing import List, Optional, Tuple
import pytest
root = Path(__file__).parent.parent.parent
def call_script(filepath: str,
args: Optional[List[str]] = None,
timeout: Optional[int] = 60 * 5) -> Tuple[int, str, str]:
if args is None:
args = []
args = [str(a) for a in args]
command = [sys.executable, filepath] + args
print(" ".join(command))
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
p.kill()
stdout, stderr = p.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
return p.returncode, stdout, stderr
def run_test(filepath):
code, stdout, stderr = call_script(filepath)
assert not code
print(f"{filepath} STDOUT: {stdout}")
print(f"{filepath} STDERR: {stderr}")
@pytest.mark.parametrize(
"step,file",
[
("finetuning", "image_classification.py"),
("finetuning", "tabular_classification.py"),
("predict", "classify_image.py"),
("predict", "classify_tabular.py"),
]
)
def test_finetune_example(tmpdir, step, file):
with tmpdir.as_cwd():
run_test(str(root / "flash_examples" / step / file))
def test_generic_example(tmpdir):
with tmpdir.as_cwd():
run_test(str(root / "flash_examples" / "generic_task.py"))
| true | true |
f72d0a505a4a11b305a59432ae84c13cac527366 | 1,848 | py | Python | tests/functional/tests/rbd-mirror/test_rbd_mirror.py | u-kosmonaft-u/ceph-ansible | 14c472707c165f77def05826b22885480af3e8f9 | [
"Apache-2.0"
] | 1,570 | 2015-01-03T08:38:22.000Z | 2022-03-31T09:24:37.000Z | tests/functional/tests/rbd-mirror/test_rbd_mirror.py | u-kosmonaft-u/ceph-ansible | 14c472707c165f77def05826b22885480af3e8f9 | [
"Apache-2.0"
] | 4,964 | 2015-01-05T10:41:44.000Z | 2022-03-31T07:59:49.000Z | tests/functional/tests/rbd-mirror/test_rbd_mirror.py | u-kosmonaft-u/ceph-ansible | 14c472707c165f77def05826b22885480af3e8f9 | [
"Apache-2.0"
] | 1,231 | 2015-01-04T11:48:16.000Z | 2022-03-31T12:15:28.000Z | import pytest
import json
class TestRbdMirrors(object):
@pytest.mark.no_docker
def test_rbd_mirror_is_installed(self, node, host):
assert host.package("rbd-mirror").is_installed
def test_rbd_mirror_service_enabled_and_running(self, node, host):
service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
s = host.service(service_name)
assert s.is_enabled
assert s.is_running
def test_rbd_mirror_is_up(self, node, host, setup):
hostname = node["vars"]["inventory_hostname"]
cluster = setup["cluster_name"]
container_binary = setup["container_binary"]
daemons = []
if node['docker']:
container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format( # noqa E501
hostname=hostname, container_binary=container_binary)
else:
container_exec_cmd = ''
hostname = node["vars"]["inventory_hostname"]
cluster = setup['cluster_name']
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format( # noqa E501
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)
output = host.check_output(cmd)
status = json.loads(output)
daemon_ids = [i for i in status["servicemap"]["services"]
["rbd-mirror"]["daemons"].keys() if i != "summary"]
for daemon_id in daemon_ids:
daemons.append(status["servicemap"]["services"]["rbd-mirror"]
["daemons"][daemon_id]["metadata"]["hostname"])
assert hostname in daemons
| 42 | 216 | 0.62987 | import pytest
import json
class TestRbdMirrors(object):
@pytest.mark.no_docker
def test_rbd_mirror_is_installed(self, node, host):
assert host.package("rbd-mirror").is_installed
def test_rbd_mirror_service_enabled_and_running(self, node, host):
service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
s = host.service(service_name)
assert s.is_enabled
assert s.is_running
def test_rbd_mirror_is_up(self, node, host, setup):
hostname = node["vars"]["inventory_hostname"]
cluster = setup["cluster_name"]
container_binary = setup["container_binary"]
daemons = []
if node['docker']:
container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format(
hostname=hostname, container_binary=container_binary)
else:
container_exec_cmd = ''
hostname = node["vars"]["inventory_hostname"]
cluster = setup['cluster_name']
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)
output = host.check_output(cmd)
status = json.loads(output)
daemon_ids = [i for i in status["servicemap"]["services"]
["rbd-mirror"]["daemons"].keys() if i != "summary"]
for daemon_id in daemon_ids:
daemons.append(status["servicemap"]["services"]["rbd-mirror"]
["daemons"][daemon_id]["metadata"]["hostname"])
assert hostname in daemons
| true | true |
f72d0b8d703a8988d4c6fdaaab16ad16480f62ab | 10,785 | py | Python | src/summarycode/summarizer.py | sthagen/nexB-scancode-toolkit | 12cc1286df78af898fae76fa339da2bb50ad51b9 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | src/summarycode/summarizer.py | sthagen/nexB-scancode-toolkit | 12cc1286df78af898fae76fa339da2bb50ad51b9 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | src/summarycode/summarizer.py | sthagen/nexB-scancode-toolkit | 12cc1286df78af898fae76fa339da2bb50ad51b9 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
from collections import defaultdict
import attr
import fingerprints
from commoncode.cliutils import POST_SCAN_GROUP, PluggableCommandLineOption
from license_expression import Licensing
from plugincode.post_scan import PostScanPlugin, post_scan_impl
from cluecode.copyrights import CopyrightDetector
from packagedcode.utils import combine_expressions
from packagedcode import models
from summarycode.score import compute_license_score
from summarycode.score import get_field_values_from_codebase_resources
from summarycode.score import unique
from summarycode.tallies import compute_codebase_tallies
# Tracing flags
TRACE = False
TRACE_LIGHT = False
def logger_debug(*args):
pass
if TRACE or TRACE_LIGHT:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
"""
Create summarized scan data.
"""
@post_scan_impl
class ScanSummary(PostScanPlugin):
"""
Summarize a scan at the codebase level.
"""
sort_order = 10
codebase_attributes = dict(summary=attr.ib(default=attr.Factory(dict)))
options = [
PluggableCommandLineOption(
('--summary',),
is_flag=True,
default=False,
help='Summarize scans by providing declared origin '
'information and other detected origin info at the '
'codebase attribute level.',
help_group=POST_SCAN_GROUP,
required_options=['classify'],
)
]
def is_enabled(self, summary, **kwargs):
return summary
def process_codebase(self, codebase, summary, **kwargs):
if TRACE_LIGHT:
logger_debug('ScanSummary:process_codebase')
# Get tallies
tallies = compute_codebase_tallies(codebase, keep_details=False, **kwargs)
license_expressions_tallies = tallies.get('license_expressions') or []
holders_tallies = tallies.get('holders') or []
programming_language_tallies = tallies.get('programming_language') or []
# Determine declared license expression, declared holder, and primary
# language from Package data at the top level.
declared_license_expression = None
declared_holders = None
primary_language = None
# use top level packages
if hasattr(codebase.attributes, 'packages'):
top_level_packages = codebase.attributes.packages
(
declared_license_expression,
declared_holders,
primary_language,
) = get_origin_info_from_top_level_packages(
top_level_packages=top_level_packages,
codebase=codebase,
)
if declared_license_expression:
scoring_elements, _ = compute_license_score(codebase)
else:
# If we did not get a declared license expression from detected
# package data, then we use the results from `compute_license_score`
scoring_elements, declared_license_expression = compute_license_score(codebase)
other_license_expressions = remove_from_tallies(
declared_license_expression, license_expressions_tallies
)
if not declared_holders:
declared_holders = get_declared_holders(codebase, holders_tallies)
other_holders = remove_from_tallies(declared_holders, holders_tallies)
declared_holder = ', '.join(declared_holders)
if not primary_language:
primary_language = get_primary_language(programming_language_tallies)
other_languages = remove_from_tallies(primary_language, programming_language_tallies)
# Save summary info to codebase
codebase.attributes.summary['declared_license_expression'] = declared_license_expression
codebase.attributes.summary['license_clarity_score'] = scoring_elements.to_dict()
codebase.attributes.summary['declared_holder'] = declared_holder
codebase.attributes.summary['primary_language'] = primary_language
codebase.attributes.summary['other_license_expressions'] = other_license_expressions
codebase.attributes.summary['other_holders'] = other_holders
codebase.attributes.summary['other_languages'] = other_languages
def remove_from_tallies(entry, tallies):
"""
Return an list containing the elements of `tallies`, without `entry`
"""
pruned_tallies = []
for t in tallies:
if (
isinstance(entry, dict)
and t == entry
or isinstance(entry, (list, tuple))
and t in entry
or isinstance(entry, (list, tuple))
and t.get('value') in entry
or t.get('value') == entry
):
continue
pruned_tallies.append(t)
return pruned_tallies
def get_declared_holders(codebase, holders_tallies):
"""
Return a list of declared holders from a codebase using the holders
detected from key files.
A declared holder is a copyright holder present in the key files who has the
highest amount of refrences throughout the codebase.
"""
entry_by_holders = {
fingerprints.generate(entry['value']): entry for entry in holders_tallies if entry['value']
}
key_file_holders = get_field_values_from_codebase_resources(
codebase, 'holders', key_files_only=True
)
entry_by_key_file_holders = {
fingerprints.generate(entry['holder']): entry
for entry in key_file_holders
if entry['holder']
}
unique_key_file_holders = unique(entry_by_key_file_holders.keys())
unique_key_file_holders_entries = [
entry_by_holders[holder] for holder in unique_key_file_holders
]
holder_by_counts = defaultdict(list)
for holder_entry in unique_key_file_holders_entries:
count = holder_entry.get('count')
if count:
holder = holder_entry.get('value')
holder_by_counts[count].append(holder)
declared_holders = []
if holder_by_counts:
highest_count = max(holder_by_counts)
declared_holders = holder_by_counts[highest_count]
# If we could not determine a holder, then we return a list of all the
# unique key file holders
if not declared_holders:
declared_holders = [entry['value'] for entry in unique_key_file_holders_entries]
return declared_holders
def get_primary_language(programming_language_tallies):
"""
Return the most common detected programming language as the primary language.
"""
programming_languages_by_count = {
entry['count']: entry['value'] for entry in programming_language_tallies
}
primary_language = ''
if programming_languages_by_count:
highest_count = max(programming_languages_by_count)
primary_language = programming_languages_by_count[highest_count] or ''
return primary_language
def get_origin_info_from_top_level_packages(top_level_packages, codebase):
"""
Return a 3-tuple containing the strings of declared license expression,
copyright holder, and primary programming language from a
``top_level_packages`` list of detected top-level packages mapping and a
``codebase``.
"""
if not top_level_packages:
return '', '', ''
license_expressions = []
programming_languages = []
copyrights = []
parties = []
for package_mapping in top_level_packages:
package = models.Package.from_dict(package_mapping)
# we are only interested in key packages
if not is_key_package(package, codebase):
continue
license_expression = package.license_expression
if license_expression:
license_expressions.append(license_expression)
programming_language = package.primary_language
if programming_language:
programming_languages.append(programming_language)
copyright_statement = package.copyright
if copyright_statement:
copyrights.append(copyright_statement)
parties.extend(package.parties or [])
# Combine license expressions
unique_license_expressions = unique(license_expressions)
combined_declared_license_expression = combine_expressions(
expressions=unique_license_expressions,
relation='AND',
)
declared_license_expression = ''
if combined_declared_license_expression:
declared_license_expression = str(
Licensing().parse(combined_declared_license_expression).simplify()
)
# Get holders
holders = list(get_holders_from_copyright(copyrights))
declared_holders = []
if holders:
declared_holders = holders
elif parties:
declared_holders = [party.name for party in parties or []]
declared_holders = unique(declared_holders)
# Programming language
unique_programming_languages = unique(programming_languages)
primary_language = ''
if len(unique_programming_languages) == 1:
primary_language = unique_programming_languages[0]
return declared_license_expression, declared_holders, primary_language
def get_holders_from_copyright(copyright):
"""
Yield holders detected from a `copyright` string or list.
"""
numbered_lines = []
if isinstance(copyright, list):
for i, c in enumerate(copyright):
numbered_lines.append((i, c))
else:
numbered_lines.append((0, copyright))
holder_detections = CopyrightDetector().detect(
numbered_lines,
include_copyrights=False,
include_holders=True,
include_authors=False,
)
for holder_detection in holder_detections:
yield holder_detection.holder
def is_key_package(package, codebase):
"""
Return True if the ``package`` Package is a key, top-level package.
"""
# get the datafile_paths of the package
# get the top level files in the codebase
# return True if any datafile_paths is also a top level files? or key file?
datafile_paths = set(package.datafile_paths or [])
for resource in codebase.walk(topdown=True):
if not resource.is_top_level:
break
if resource.path in datafile_paths:
return True
return False
| 33.915094 | 99 | 0.697636 |
from collections import defaultdict
import attr
import fingerprints
from commoncode.cliutils import POST_SCAN_GROUP, PluggableCommandLineOption
from license_expression import Licensing
from plugincode.post_scan import PostScanPlugin, post_scan_impl
from cluecode.copyrights import CopyrightDetector
from packagedcode.utils import combine_expressions
from packagedcode import models
from summarycode.score import compute_license_score
from summarycode.score import get_field_values_from_codebase_resources
from summarycode.score import unique
from summarycode.tallies import compute_codebase_tallies
TRACE = False
TRACE_LIGHT = False
def logger_debug(*args):
pass
if TRACE or TRACE_LIGHT:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
@post_scan_impl
class ScanSummary(PostScanPlugin):
sort_order = 10
codebase_attributes = dict(summary=attr.ib(default=attr.Factory(dict)))
options = [
PluggableCommandLineOption(
('--summary',),
is_flag=True,
default=False,
help='Summarize scans by providing declared origin '
'information and other detected origin info at the '
'codebase attribute level.',
help_group=POST_SCAN_GROUP,
required_options=['classify'],
)
]
def is_enabled(self, summary, **kwargs):
return summary
def process_codebase(self, codebase, summary, **kwargs):
if TRACE_LIGHT:
logger_debug('ScanSummary:process_codebase')
tallies = compute_codebase_tallies(codebase, keep_details=False, **kwargs)
license_expressions_tallies = tallies.get('license_expressions') or []
holders_tallies = tallies.get('holders') or []
programming_language_tallies = tallies.get('programming_language') or []
declared_license_expression = None
declared_holders = None
primary_language = None
if hasattr(codebase.attributes, 'packages'):
top_level_packages = codebase.attributes.packages
(
declared_license_expression,
declared_holders,
primary_language,
) = get_origin_info_from_top_level_packages(
top_level_packages=top_level_packages,
codebase=codebase,
)
if declared_license_expression:
scoring_elements, _ = compute_license_score(codebase)
else:
scoring_elements, declared_license_expression = compute_license_score(codebase)
other_license_expressions = remove_from_tallies(
declared_license_expression, license_expressions_tallies
)
if not declared_holders:
declared_holders = get_declared_holders(codebase, holders_tallies)
other_holders = remove_from_tallies(declared_holders, holders_tallies)
declared_holder = ', '.join(declared_holders)
if not primary_language:
primary_language = get_primary_language(programming_language_tallies)
other_languages = remove_from_tallies(primary_language, programming_language_tallies)
codebase.attributes.summary['declared_license_expression'] = declared_license_expression
codebase.attributes.summary['license_clarity_score'] = scoring_elements.to_dict()
codebase.attributes.summary['declared_holder'] = declared_holder
codebase.attributes.summary['primary_language'] = primary_language
codebase.attributes.summary['other_license_expressions'] = other_license_expressions
codebase.attributes.summary['other_holders'] = other_holders
codebase.attributes.summary['other_languages'] = other_languages
def remove_from_tallies(entry, tallies):
pruned_tallies = []
for t in tallies:
if (
isinstance(entry, dict)
and t == entry
or isinstance(entry, (list, tuple))
and t in entry
or isinstance(entry, (list, tuple))
and t.get('value') in entry
or t.get('value') == entry
):
continue
pruned_tallies.append(t)
return pruned_tallies
def get_declared_holders(codebase, holders_tallies):
entry_by_holders = {
fingerprints.generate(entry['value']): entry for entry in holders_tallies if entry['value']
}
key_file_holders = get_field_values_from_codebase_resources(
codebase, 'holders', key_files_only=True
)
entry_by_key_file_holders = {
fingerprints.generate(entry['holder']): entry
for entry in key_file_holders
if entry['holder']
}
unique_key_file_holders = unique(entry_by_key_file_holders.keys())
unique_key_file_holders_entries = [
entry_by_holders[holder] for holder in unique_key_file_holders
]
holder_by_counts = defaultdict(list)
for holder_entry in unique_key_file_holders_entries:
count = holder_entry.get('count')
if count:
holder = holder_entry.get('value')
holder_by_counts[count].append(holder)
declared_holders = []
if holder_by_counts:
highest_count = max(holder_by_counts)
declared_holders = holder_by_counts[highest_count]
if not declared_holders:
declared_holders = [entry['value'] for entry in unique_key_file_holders_entries]
return declared_holders
def get_primary_language(programming_language_tallies):
programming_languages_by_count = {
entry['count']: entry['value'] for entry in programming_language_tallies
}
primary_language = ''
if programming_languages_by_count:
highest_count = max(programming_languages_by_count)
primary_language = programming_languages_by_count[highest_count] or ''
return primary_language
def get_origin_info_from_top_level_packages(top_level_packages, codebase):
if not top_level_packages:
return '', '', ''
license_expressions = []
programming_languages = []
copyrights = []
parties = []
for package_mapping in top_level_packages:
package = models.Package.from_dict(package_mapping)
if not is_key_package(package, codebase):
continue
license_expression = package.license_expression
if license_expression:
license_expressions.append(license_expression)
programming_language = package.primary_language
if programming_language:
programming_languages.append(programming_language)
copyright_statement = package.copyright
if copyright_statement:
copyrights.append(copyright_statement)
parties.extend(package.parties or [])
unique_license_expressions = unique(license_expressions)
combined_declared_license_expression = combine_expressions(
expressions=unique_license_expressions,
relation='AND',
)
declared_license_expression = ''
if combined_declared_license_expression:
declared_license_expression = str(
Licensing().parse(combined_declared_license_expression).simplify()
)
holders = list(get_holders_from_copyright(copyrights))
declared_holders = []
if holders:
declared_holders = holders
elif parties:
declared_holders = [party.name for party in parties or []]
declared_holders = unique(declared_holders)
unique_programming_languages = unique(programming_languages)
primary_language = ''
if len(unique_programming_languages) == 1:
primary_language = unique_programming_languages[0]
return declared_license_expression, declared_holders, primary_language
def get_holders_from_copyright(copyright):
numbered_lines = []
if isinstance(copyright, list):
for i, c in enumerate(copyright):
numbered_lines.append((i, c))
else:
numbered_lines.append((0, copyright))
holder_detections = CopyrightDetector().detect(
numbered_lines,
include_copyrights=False,
include_holders=True,
include_authors=False,
)
for holder_detection in holder_detections:
yield holder_detection.holder
def is_key_package(package, codebase):
datafile_paths = set(package.datafile_paths or [])
for resource in codebase.walk(topdown=True):
if not resource.is_top_level:
break
if resource.path in datafile_paths:
return True
return False
| true | true |
f72d0b98b7272d4c524f74d442bb17b81bda3d2a | 1,257 | py | Python | salt/pillar/varstack_pillar.py | preoctopus/salt | aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d | [
"Apache-2.0"
] | 3 | 2015-04-16T18:42:35.000Z | 2017-10-30T16:57:49.000Z | salt/pillar/varstack_pillar.py | preoctopus/salt | aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d | [
"Apache-2.0"
] | 16 | 2015-11-18T00:44:03.000Z | 2018-10-29T20:48:27.000Z | salt/pillar/varstack_pillar.py | preoctopus/salt | aceaaa0e2f2f2ff29a694393bd82bba0d88fa44d | [
"Apache-2.0"
] | 4 | 2020-11-04T06:28:05.000Z | 2022-02-09T10:54:49.000Z | # -*- coding: utf-8 -*-
'''
Use `Varstack <https://github.com/conversis/varstack>`_ data as a Pillar source
Configuring Varstack
====================
Using varstack in Salt is fairly simple. Just put the following into the
config file of your master:
.. code-block:: yaml
ext_pillar:
- varstack: /etc/varstack.yaml
Varstack will then use /etc/varstack.yaml to determine which configuration
data to return as pillar information. From there you can take a look at the
`README <https://github.com/conversis/varstack/blob/master/README.md>`_ of
varstack on how this file is evaluated.
'''
from __future__ import absolute_import
# Import python libs
import logging
HAS_VARSTACK = False
try:
import varstack
HAS_VARSTACK = True
except ImportError:
pass
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'varstack'
def __virtual__():
if not HAS_VARSTACK:
return False
return __virtualname__
def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
conf):
'''
Parse varstack data and return the result
'''
vs = varstack.Varstack(config_filename=conf)
return vs.evaluate(__grains__)
| 22.854545 | 79 | 0.703262 |
from __future__ import absolute_import
import logging
HAS_VARSTACK = False
try:
import varstack
HAS_VARSTACK = True
except ImportError:
pass
log = logging.getLogger(__name__)
__virtualname__ = 'varstack'
def __virtual__():
if not HAS_VARSTACK:
return False
return __virtualname__
def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
conf):
vs = varstack.Varstack(config_filename=conf)
return vs.evaluate(__grains__)
| true | true |
f72d0c4a82e81d1e634b40b84dd34a80b3db88c1 | 113 | py | Python | Codeforces/637B Nastya and Door.py | a3X3k/Competitive-programing-hacktoberfest-2021 | bc3997997318af4c5eafad7348abdd9bf5067b4f | [
"Unlicense"
] | 12 | 2021-06-05T09:40:10.000Z | 2021-10-07T17:59:51.000Z | Codeforces/637B Nastya and Door.py | a3X3k/Competitive-programing-hacktoberfest-2021 | bc3997997318af4c5eafad7348abdd9bf5067b4f | [
"Unlicense"
] | 21 | 2020-10-10T10:41:03.000Z | 2020-10-31T10:41:23.000Z | Codeforces/637B Nastya and Door.py | a3X3k/Competitive-programing-hacktoberfest-2021 | bc3997997318af4c5eafad7348abdd9bf5067b4f | [
"Unlicense"
] | 67 | 2021-08-01T10:04:52.000Z | 2021-10-10T00:25:04.000Z | t = int(input())
for t in range(t):
m,d=map(int,input().split())
l = list(map(int,input().split()))
| 22.6 | 38 | 0.530973 | t = int(input())
for t in range(t):
m,d=map(int,input().split())
l = list(map(int,input().split()))
| true | true |
f72d0cd7918505dd6a72b02b05dfa02f870e64c9 | 710 | py | Python | News163_Spider/News163_Spider/pipelines.py | a904919863/Spiders_Collection | 970aef563971eba5c9dca5dfe1cd8790561941be | [
"MIT"
] | 3 | 2022-02-23T02:44:47.000Z | 2022-02-28T06:41:26.000Z | News163_Spider/News163_Spider/pipelines.py | a904919863/Spiders_Collection | 970aef563971eba5c9dca5dfe1cd8790561941be | [
"MIT"
] | null | null | null | News163_Spider/News163_Spider/pipelines.py | a904919863/Spiders_Collection | 970aef563971eba5c9dca5dfe1cd8790561941be | [
"MIT"
] | null | null | null | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class News163SpiderPipeline:
fp = None
def open_spider(self,spider):
print('开始爬虫......')
self.fp = open('./news163.txt','w',encoding='utf-8')
def process_item(self, item, spider):
author = item['title']
content = item['content']
self.fp.write(author+":"+content+"\n")
return item
def close_spider(self,spider):
print('结束爬虫!')
self.fp.close()
| 25.357143 | 66 | 0.64507 |
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class News163SpiderPipeline:
fp = None
def open_spider(self,spider):
print('开始爬虫......')
self.fp = open('./news163.txt','w',encoding='utf-8')
def process_item(self, item, spider):
author = item['title']
content = item['content']
self.fp.write(author+":"+content+"\n")
return item
def close_spider(self,spider):
print('结束爬虫!')
self.fp.close()
| true | true |
f72d0f27a1d1b5199bb5a8833ffec4a28cd377c8 | 2,104 | py | Python | PasswordManager/LoginDialog.py | whymatter/PasswordManager | 86070a1f998362cfa026e6e6e9b820a2d7ad5f06 | [
"MIT"
] | null | null | null | PasswordManager/LoginDialog.py | whymatter/PasswordManager | 86070a1f998362cfa026e6e6e9b820a2d7ad5f06 | [
"MIT"
] | null | null | null | PasswordManager/LoginDialog.py | whymatter/PasswordManager | 86070a1f998362cfa026e6e6e9b820a2d7ad5f06 | [
"MIT"
] | null | null | null | import sys
import os.path
from PyQt5 import QtWidgets, uic
from PyQt5.QtWidgets import QApplication, QTableWidgetItem
from Crypto.Cipher import AES
import json
from constants import FILE_NAME, KEY_ENCODING
from PwWindow import PwWindow
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 13:53:56 2017
@author: seitz
"""
class LoginDialog(QtWidgets.QDialog):
def __init__(self, parent=None, key=None):
super(LoginDialog, self).__init__(parent)
self.pw_window = None
# load and show the user interface created with the designer.
uic.loadUi('../login_dialog.ui', self)
self.login_button.clicked.connect(self.login)
self.show()
def get_key(self):
return self.key_lineedit.text().encode(KEY_ENCODING)
def login(self):
if not os.path.isfile(FILE_NAME):
cipher = AES.new(self.get_key(), AES.MODE_EAX)
ciphertext, tag = cipher.encrypt_and_digest(json.dumps([]).encode(KEY_ENCODING))
with open(FILE_NAME, "wb") as file_out:
[ file_out.write(x) for x in (cipher.nonce, tag, ciphertext) ]
if self.load_data(FILE_NAME, self.get_key()):
self.hide()
self.pw_window = PwWindow(key=self.get_key())
def load_data(self, filename, key):
try:
with open(filename, 'rb') as file_in:
print(file_in)
nonce, tag, ciphertext = [ file_in.read(x) for x in (16, 16, -1) ]
# let's assume that the key is somehow available again
cipher = AES.new(key, AES.MODE_EAX, nonce)
jsontext = cipher.decrypt_and_verify(ciphertext, tag)
data = json.loads(jsontext)
return True
except Exception as e:
print("Your file contains errors")
print(e)
return False
def _main():
app = QApplication(sys.argv)
m = LoginDialog()
sys.exit(app.exec_())
if __name__ == '__main__':
_main()
| 30.492754 | 93 | 0.586502 | import sys
import os.path
from PyQt5 import QtWidgets, uic
from PyQt5.QtWidgets import QApplication, QTableWidgetItem
from Crypto.Cipher import AES
import json
from constants import FILE_NAME, KEY_ENCODING
from PwWindow import PwWindow
class LoginDialog(QtWidgets.QDialog):
def __init__(self, parent=None, key=None):
super(LoginDialog, self).__init__(parent)
self.pw_window = None
uic.loadUi('../login_dialog.ui', self)
self.login_button.clicked.connect(self.login)
self.show()
def get_key(self):
return self.key_lineedit.text().encode(KEY_ENCODING)
def login(self):
if not os.path.isfile(FILE_NAME):
cipher = AES.new(self.get_key(), AES.MODE_EAX)
ciphertext, tag = cipher.encrypt_and_digest(json.dumps([]).encode(KEY_ENCODING))
with open(FILE_NAME, "wb") as file_out:
[ file_out.write(x) for x in (cipher.nonce, tag, ciphertext) ]
if self.load_data(FILE_NAME, self.get_key()):
self.hide()
self.pw_window = PwWindow(key=self.get_key())
def load_data(self, filename, key):
try:
with open(filename, 'rb') as file_in:
print(file_in)
nonce, tag, ciphertext = [ file_in.read(x) for x in (16, 16, -1) ]
cipher = AES.new(key, AES.MODE_EAX, nonce)
jsontext = cipher.decrypt_and_verify(ciphertext, tag)
data = json.loads(jsontext)
return True
except Exception as e:
print("Your file contains errors")
print(e)
return False
def _main():
app = QApplication(sys.argv)
m = LoginDialog()
sys.exit(app.exec_())
if __name__ == '__main__':
_main()
| true | true |
f72d0f3414639adbfb45b04d9d66268327301c51 | 380 | py | Python | apps/db_data/migrations/0018_auto_20200723_2120.py | rhu2001/csua-backend | d2464c7eacfd1d675e0cc08f93f3b5083fa591dc | [
"MIT"
] | null | null | null | apps/db_data/migrations/0018_auto_20200723_2120.py | rhu2001/csua-backend | d2464c7eacfd1d675e0cc08f93f3b5083fa591dc | [
"MIT"
] | null | null | null | apps/db_data/migrations/0018_auto_20200723_2120.py | rhu2001/csua-backend | d2464c7eacfd1d675e0cc08f93f3b5083fa591dc | [
"MIT"
] | null | null | null | # Generated by Django 2.2.12 on 2020-07-24 04:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db_data', '0017_notice'),
]
operations = [
migrations.AlterField(
model_name='officer',
name='officer_since',
field=models.DateField(blank=True),
),
]
| 20 | 48 | 0.592105 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db_data', '0017_notice'),
]
operations = [
migrations.AlterField(
model_name='officer',
name='officer_since',
field=models.DateField(blank=True),
),
]
| true | true |
f72d0f3e5dbf9fe0132a52a219eaa07ffb21b9cc | 4,297 | py | Python | npy_append_array/npy_append_array.py | synapticarbors/npy-append-array | bf33483e7c2c50e13c9e55940878ca8217f4d4ad | [
"MIT"
] | null | null | null | npy_append_array/npy_append_array.py | synapticarbors/npy-append-array | bf33483e7c2c50e13c9e55940878ca8217f4d4ad | [
"MIT"
] | null | null | null | npy_append_array/npy_append_array.py | synapticarbors/npy-append-array | bf33483e7c2c50e13c9e55940878ca8217f4d4ad | [
"MIT"
] | null | null | null | import numpy as np
import os.path
from struct import pack, unpack
from io import BytesIO
def header_tuple_dict(tuple_in):
return {
'shape': tuple_in[0],
'fortran_order': tuple_in[1],
'descr': np.lib.format.dtype_to_descr(tuple_in[2])
}
def has_fortran_order(arr):
return not arr.flags.c_contiguous and arr.flags.f_contiguous
def peek(fp, length):
pos = fp.tell()
tmp = fp.read(length)
fp.seek(pos)
return tmp
class NpyAppendArray:
def __init__(self, filename):
self.filename = filename
self.fp = None
self.__is_init = False
if os.path.isfile(filename):
self.__init()
def __init(self):
self.fp = open(self.filename, mode="rb+")
fp = self.fp
magic = np.lib.format.read_magic(fp)
self.is_version_1 = magic[0] == 1 and magic[1] == 0
self.is_version_2 = magic[0] == 2 and magic[1] == 0
if not self.is_version_1 and not self.is_version_2:
raise NotImplementedError(
"version (%d, %d) not implemented"%magic
)
self.header_length, = unpack("<H", peek(fp, 2)) if self.is_version_1 \
else unpack("<I", peek(fp, 4))
self.header = np.lib.format.read_array_header_1_0(fp) if \
self.is_version_1 else np.lib.format.read_array_header_2_0(fp)
if self.header[1] != False:
raise NotImplementedError("fortran_order not implemented")
fp.seek(0)
self.header_bytes = fp.read(self.header_length + (
10 if self.is_version_1 else 12
))
fp.seek(0, 2)
self.__is_init = True
def __create_header_bytes(self, header_map, spare_space=False):
io = BytesIO()
np.lib.format.write_array_header_2_0(io, header_map)
if spare_space:
io.getbuffer()[8:12] = pack("<I", int(
io.getbuffer().nbytes-12+64
))
io.getbuffer()[-1] = 32
io.write(b" "*64)
io.getbuffer()[-1] = 10
return io.getbuffer()
def append(self, arr):
if not arr.flags.c_contiguous:
raise NotImplementedError("ndarray needs to be c_contiguous")
if has_fortran_order(arr):
raise NotImplementedError("fortran_order not implemented")
arr_descr = np.lib.format.dtype_to_descr(arr.dtype)
if not self.__is_init:
with open(self.filename, "wb") as fp0:
fp0.write(self.__create_header_bytes({
'descr': arr_descr,
'fortran_order': False,
'shape': arr.shape
}, True))
arr.tofile(fp0)
# np.save(self.filename, arr)
self.__init()
return
descr = self.header[2]
if arr_descr != descr:
raise TypeError("incompatible ndarrays types %s and %s"%(
arr_descr, descr
))
shape = self.header[0]
if len(arr.shape) != len(shape):
raise TypeError("incompatible ndarrays shape lengths %s and %s"%(
len(arr.shape), len(shape)
))
for i, e in enumerate(shape):
if i > 0 and e != arr.shape[i]:
raise TypeError("ndarray shapes can only differ on zero axis")
new_shape = list(shape)
new_shape[0] += arr.shape[0]
new_shape = tuple(new_shape)
self.header = (new_shape, self.header[1], self.header[2])
self.fp.seek(0)
new_header_map = header_tuple_dict(self.header)
new_header_bytes = self.__create_header_bytes(new_header_map, True)
header_length = len(self.header_bytes)
if header_length != len(new_header_bytes):
new_header_bytes = self.__create_header_bytes(new_header_map)
if header_length != len(new_header_bytes):
raise TypeError("header length mismatch, old: %d, new: %d"%(
header_length, len(new_header_bytes)
))
self.header_bytes = new_header_bytes
self.fp.write(new_header_bytes)
self.fp.seek(0, 2)
arr.tofile(self.fp)
def __del__(self):
if self.fp is not None:
self.fp.close() | 29.431507 | 78 | 0.573191 | import numpy as np
import os.path
from struct import pack, unpack
from io import BytesIO
def header_tuple_dict(tuple_in):
return {
'shape': tuple_in[0],
'fortran_order': tuple_in[1],
'descr': np.lib.format.dtype_to_descr(tuple_in[2])
}
def has_fortran_order(arr):
return not arr.flags.c_contiguous and arr.flags.f_contiguous
def peek(fp, length):
pos = fp.tell()
tmp = fp.read(length)
fp.seek(pos)
return tmp
class NpyAppendArray:
def __init__(self, filename):
self.filename = filename
self.fp = None
self.__is_init = False
if os.path.isfile(filename):
self.__init()
def __init(self):
self.fp = open(self.filename, mode="rb+")
fp = self.fp
magic = np.lib.format.read_magic(fp)
self.is_version_1 = magic[0] == 1 and magic[1] == 0
self.is_version_2 = magic[0] == 2 and magic[1] == 0
if not self.is_version_1 and not self.is_version_2:
raise NotImplementedError(
"version (%d, %d) not implemented"%magic
)
self.header_length, = unpack("<H", peek(fp, 2)) if self.is_version_1 \
else unpack("<I", peek(fp, 4))
self.header = np.lib.format.read_array_header_1_0(fp) if \
self.is_version_1 else np.lib.format.read_array_header_2_0(fp)
if self.header[1] != False:
raise NotImplementedError("fortran_order not implemented")
fp.seek(0)
self.header_bytes = fp.read(self.header_length + (
10 if self.is_version_1 else 12
))
fp.seek(0, 2)
self.__is_init = True
def __create_header_bytes(self, header_map, spare_space=False):
io = BytesIO()
np.lib.format.write_array_header_2_0(io, header_map)
if spare_space:
io.getbuffer()[8:12] = pack("<I", int(
io.getbuffer().nbytes-12+64
))
io.getbuffer()[-1] = 32
io.write(b" "*64)
io.getbuffer()[-1] = 10
return io.getbuffer()
def append(self, arr):
if not arr.flags.c_contiguous:
raise NotImplementedError("ndarray needs to be c_contiguous")
if has_fortran_order(arr):
raise NotImplementedError("fortran_order not implemented")
arr_descr = np.lib.format.dtype_to_descr(arr.dtype)
if not self.__is_init:
with open(self.filename, "wb") as fp0:
fp0.write(self.__create_header_bytes({
'descr': arr_descr,
'fortran_order': False,
'shape': arr.shape
}, True))
arr.tofile(fp0)
self.__init()
return
descr = self.header[2]
if arr_descr != descr:
raise TypeError("incompatible ndarrays types %s and %s"%(
arr_descr, descr
))
shape = self.header[0]
if len(arr.shape) != len(shape):
raise TypeError("incompatible ndarrays shape lengths %s and %s"%(
len(arr.shape), len(shape)
))
for i, e in enumerate(shape):
if i > 0 and e != arr.shape[i]:
raise TypeError("ndarray shapes can only differ on zero axis")
new_shape = list(shape)
new_shape[0] += arr.shape[0]
new_shape = tuple(new_shape)
self.header = (new_shape, self.header[1], self.header[2])
self.fp.seek(0)
new_header_map = header_tuple_dict(self.header)
new_header_bytes = self.__create_header_bytes(new_header_map, True)
header_length = len(self.header_bytes)
if header_length != len(new_header_bytes):
new_header_bytes = self.__create_header_bytes(new_header_map)
if header_length != len(new_header_bytes):
raise TypeError("header length mismatch, old: %d, new: %d"%(
header_length, len(new_header_bytes)
))
self.header_bytes = new_header_bytes
self.fp.write(new_header_bytes)
self.fp.seek(0, 2)
arr.tofile(self.fp)
def __del__(self):
if self.fp is not None:
self.fp.close() | true | true |
f72d0fdb931b1dcc84a293aeef05fc7e649e1c49 | 23 | py | Python | prepack/__init__.py | CargoCodes/PreparePack | 3d1d3623c0c86ab02a92a567ed954fb37e18b8fa | [
"MIT"
] | null | null | null | prepack/__init__.py | CargoCodes/PreparePack | 3d1d3623c0c86ab02a92a567ed954fb37e18b8fa | [
"MIT"
] | null | null | null | prepack/__init__.py | CargoCodes/PreparePack | 3d1d3623c0c86ab02a92a567ed954fb37e18b8fa | [
"MIT"
] | null | null | null | from .prepack import *
| 11.5 | 22 | 0.73913 | from .prepack import *
| true | true |
f72d1029095787de77bafa202e5314c2ea6e90db | 262 | py | Python | examples/client.py | Scauting-Burgum/RemoteVar | bc3b1ffaace5defed1cd3a3e7042002717a3e44a | [
"MIT"
] | 1 | 2018-06-14T14:59:56.000Z | 2018-06-14T14:59:56.000Z | examples/client.py | Scauting-Burgum/ScautEvent-python | bc3b1ffaace5defed1cd3a3e7042002717a3e44a | [
"MIT"
] | null | null | null | examples/client.py | Scauting-Burgum/ScautEvent-python | bc3b1ffaace5defed1cd3a3e7042002717a3e44a | [
"MIT"
] | null | null | null | from ScautEvent.client import EventClient
from ScautEvent.common import Event
client = EventClient("localhost", 5000)
client.listeners["message"] = print
client.start()
while True:
message = input()
event = Event("message", message)
client.push(event)
| 18.714286 | 41 | 0.748092 | from ScautEvent.client import EventClient
from ScautEvent.common import Event
client = EventClient("localhost", 5000)
client.listeners["message"] = print
client.start()
while True:
message = input()
event = Event("message", message)
client.push(event)
| true | true |
f72d105dd8903c3509596490416a261c9efa1878 | 7,239 | py | Python | tests/gold_tests/tls/tls_tunnel_forward.test.py | masaori335/trafficserver | 58e7e8675c96a5a4eb958a442942892f6e2a0ef4 | [
"Apache-2.0"
] | 1 | 2019-10-28T04:36:50.000Z | 2019-10-28T04:36:50.000Z | tests/gold_tests/tls/tls_tunnel_forward.test.py | masaori335/trafficserver | 58e7e8675c96a5a4eb958a442942892f6e2a0ef4 | [
"Apache-2.0"
] | 1 | 2021-06-27T23:06:33.000Z | 2021-06-27T23:06:33.000Z | tests/gold_tests/tls/tls_tunnel_forward.test.py | masaori335/trafficserver | 58e7e8675c96a5a4eb958a442942892f6e2a0ef4 | [
"Apache-2.0"
] | null | null | null | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Test tunneling and forwarding based on SNI
'''
# Define default ATS
ts = Test.MakeATSProcess("ts", select_ports=False)
server_foo = Test.MakeOriginServer("server_foo", ssl=True)
server_bar = Test.MakeOriginServer("server_bar", ssl=False)
server_random = Test.MakeOriginServer("server_random", ssl=False)
request_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_bar_header = {"headers": "GET / HTTP/1.1\r\nHost: bar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_random_header = {"headers": "GET / HTTP/1.1\r\nHost: random.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_foo_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "ok foo"}
response_bar_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "ok bar"}
response_random_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "ok random"}
server_foo.addResponse("sessionlog_foo.json", request_foo_header, response_foo_header)
server_bar.addResponse("sessionlog_bar.json", request_bar_header, response_bar_header)
server_random.addResponse("sessionlog_random.json", request_random_header, response_random_header)
# add ssl materials like key, certificates for the server
ts.addSSLfile("ssl/signed-foo.pem")
ts.addSSLfile("ssl/signed-foo.key")
ts.addSSLfile("ssl/signed-bar.pem")
ts.addSSLfile("ssl/signed-bar.key")
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.addSSLfile("ssl/signer.pem")
ts.addSSLfile("ssl/signer.key")
ts.Variables.ssl_port = 4443
# Need no remap rules. Everything should be proccessed by ssl_server_name
# Make sure the TS server certs are different from the origin certs
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=signed-foo.pem ssl_key_name=signed-foo.key'
)
# Case 1, global config policy=permissive properties=signature
# override for foo.com policy=enforced properties=all
ts.Disk.records_config.update({
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
# enable ssl port
'proxy.config.http.server_ports': '{0} {1}:proto=http2;http:ssl'.format(ts.Variables.port, ts.Variables.ssl_port),
'proxy.config.http.connect_ports': '{0} {1} {2} {3}'.format(ts.Variables.ssl_port, server_foo.Variables.SSL_Port, server_bar.Variables.Port, server_random.Variables.Port),
'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.CA.cert.filename': 'signer.pem',
'proxy.config.exec_thread.autoconfig.scale': 1.0,
'proxy.config.url_remap.pristine_host_hdr': 1
})
# foo.com should not terminate. Just tunnel to server_foo
# bar.com should terminate. Forward its tcp stream to server_bar
ts.Disk.ssl_server_name_yaml.AddLines([
"- fqdn: 'foo.com'",
" tunnel_route: 'localhost:{0}'".format(server_foo.Variables.SSL_Port),
"- fqdn: 'bar.com'",
" forward_route: 'localhost:{0}'".format(server_bar.Variables.Port),
"- fqdn: ''", #default case
" forward_route: 'localhost:{0}'".format(server_random.Variables.Port),
])
tr = Test.AddTestRun("Tunnel-test")
tr.Processes.Default.Command = "curl -v --resolve 'foo.com:{0}:127.0.0.1' -k https://foo.com:{0}".format(ts.Variables.ssl_port)
tr.ReturnCode = 0
tr.Processes.Default.StartBefore(server_foo)
tr.Processes.Default.StartBefore(server_bar)
tr.Processes.Default.StartBefore(server_random)
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port))
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Not Found on Accelerato", "Should not try to remap on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("CN=foo.com", "Should not TLS terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("HTTP/1.1 200 OK", "Should get a successful response")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("ok foo", "Body is expected")
tr2 = Test.AddTestRun("Forward-test")
tr2.Processes.Default.Command = "curl -v --http1.1 -H 'host:bar.com' --resolve 'bar.com:{0}:127.0.0.1' -k https://bar.com:{0}".format(ts.Variables.ssl_port)
tr2.ReturnCode = 0
tr2.StillRunningAfter = server_bar
tr2.StillRunningAfter = ts
tr2.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr2.Processes.Default.Streams.All += Testers.ExcludesExpression("Not Found on Accelerato", "Should not try to remap on Traffic Server")
tr2.Processes.Default.Streams.All += Testers.ContainsExpression("CN=foo.com", "Should TLS terminate on Traffic Server")
tr2.Processes.Default.Streams.All += Testers.ContainsExpression("HTTP/1.1 200 OK", "Should get a successful response")
tr2.Processes.Default.Streams.All += Testers.ContainsExpression("ok bar", "Body is expected")
tr3 = Test.AddTestRun("no-sni-forward-test")
tr3.Processes.Default.Command = "curl --http1.1 -v -k -H 'host:random.com' https://127.0.0.1:{0}".format(ts.Variables.ssl_port)
tr3.ReturnCode = 0
tr3.StillRunningAfter = server_random
tr3.StillRunningAfter = ts
tr3.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr3.Processes.Default.Streams.All += Testers.ExcludesExpression("Not Found on Accelerato", "Should not try to remap on Traffic Server")
tr3.Processes.Default.Streams.All += Testers.ContainsExpression("CN=foo.com", "Should TLS terminate on Traffic Server")
tr3.Processes.Default.Streams.All += Testers.ContainsExpression("HTTP/1.1 200 OK", "Should get a successful response")
tr3.Processes.Default.Streams.All += Testers.ContainsExpression("ok random", "Body is expected")
| 60.325 | 332 | 0.754524 |
import os
Test.Summary = '''
Test tunneling and forwarding based on SNI
'''
ts = Test.MakeATSProcess("ts", select_ports=False)
server_foo = Test.MakeOriginServer("server_foo", ssl=True)
server_bar = Test.MakeOriginServer("server_bar", ssl=False)
server_random = Test.MakeOriginServer("server_random", ssl=False)
request_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_bar_header = {"headers": "GET / HTTP/1.1\r\nHost: bar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
request_random_header = {"headers": "GET / HTTP/1.1\r\nHost: random.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_foo_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "ok foo"}
response_bar_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "ok bar"}
response_random_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "ok random"}
server_foo.addResponse("sessionlog_foo.json", request_foo_header, response_foo_header)
server_bar.addResponse("sessionlog_bar.json", request_bar_header, response_bar_header)
server_random.addResponse("sessionlog_random.json", request_random_header, response_random_header)
ts.addSSLfile("ssl/signed-foo.pem")
ts.addSSLfile("ssl/signed-foo.key")
ts.addSSLfile("ssl/signed-bar.pem")
ts.addSSLfile("ssl/signed-bar.key")
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.addSSLfile("ssl/signer.pem")
ts.addSSLfile("ssl/signer.key")
ts.Variables.ssl_port = 4443
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=signed-foo.pem ssl_key_name=signed-foo.key'
)
ts.Disk.records_config.update({
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.http.server_ports': '{0} {1}:proto=http2;http:ssl'.format(ts.Variables.port, ts.Variables.ssl_port),
'proxy.config.http.connect_ports': '{0} {1} {2} {3}'.format(ts.Variables.ssl_port, server_foo.Variables.SSL_Port, server_bar.Variables.Port, server_random.Variables.Port),
'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.CA.cert.filename': 'signer.pem',
'proxy.config.exec_thread.autoconfig.scale': 1.0,
'proxy.config.url_remap.pristine_host_hdr': 1
})
ts.Disk.ssl_server_name_yaml.AddLines([
"- fqdn: 'foo.com'",
" tunnel_route: 'localhost:{0}'".format(server_foo.Variables.SSL_Port),
"- fqdn: 'bar.com'",
" forward_route: 'localhost:{0}'".format(server_bar.Variables.Port),
"- fqdn: ''",
" forward_route: 'localhost:{0}'".format(server_random.Variables.Port),
])
tr = Test.AddTestRun("Tunnel-test")
tr.Processes.Default.Command = "curl -v --resolve 'foo.com:{0}:127.0.0.1' -k https://foo.com:{0}".format(ts.Variables.ssl_port)
tr.ReturnCode = 0
tr.Processes.Default.StartBefore(server_foo)
tr.Processes.Default.StartBefore(server_bar)
tr.Processes.Default.StartBefore(server_random)
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port))
tr.StillRunningAfter = ts
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Not Found on Accelerato", "Should not try to remap on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ExcludesExpression("CN=foo.com", "Should not TLS terminate on Traffic Server")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("HTTP/1.1 200 OK", "Should get a successful response")
tr.Processes.Default.Streams.All += Testers.ContainsExpression("ok foo", "Body is expected")
tr2 = Test.AddTestRun("Forward-test")
tr2.Processes.Default.Command = "curl -v --http1.1 -H 'host:bar.com' --resolve 'bar.com:{0}:127.0.0.1' -k https://bar.com:{0}".format(ts.Variables.ssl_port)
tr2.ReturnCode = 0
tr2.StillRunningAfter = server_bar
tr2.StillRunningAfter = ts
tr2.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr2.Processes.Default.Streams.All += Testers.ExcludesExpression("Not Found on Accelerato", "Should not try to remap on Traffic Server")
tr2.Processes.Default.Streams.All += Testers.ContainsExpression("CN=foo.com", "Should TLS terminate on Traffic Server")
tr2.Processes.Default.Streams.All += Testers.ContainsExpression("HTTP/1.1 200 OK", "Should get a successful response")
tr2.Processes.Default.Streams.All += Testers.ContainsExpression("ok bar", "Body is expected")
tr3 = Test.AddTestRun("no-sni-forward-test")
tr3.Processes.Default.Command = "curl --http1.1 -v -k -H 'host:random.com' https://127.0.0.1:{0}".format(ts.Variables.ssl_port)
tr3.ReturnCode = 0
tr3.StillRunningAfter = server_random
tr3.StillRunningAfter = ts
tr3.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded")
tr3.Processes.Default.Streams.All += Testers.ExcludesExpression("Not Found on Accelerato", "Should not try to remap on Traffic Server")
tr3.Processes.Default.Streams.All += Testers.ContainsExpression("CN=foo.com", "Should TLS terminate on Traffic Server")
tr3.Processes.Default.Streams.All += Testers.ContainsExpression("HTTP/1.1 200 OK", "Should get a successful response")
tr3.Processes.Default.Streams.All += Testers.ContainsExpression("ok random", "Body is expected")
| true | true |
f72d1174e70c56ea9c0c9a435927929db9497bbd | 1,820 | py | Python | resources/WPy32/python-3.10.2/Lib/distutils/tests/test_bdist.py | eladkarako/yt-dlp_kit | 6365651111ef4d2f94335cf38bf4d9b0136d42d2 | [
"Unlicense"
] | 1 | 2022-03-26T15:43:50.000Z | 2022-03-26T15:43:50.000Z | resources/WPy32/python-3.10.2/Lib/distutils/tests/test_bdist.py | eladkarako/yt-dlp_kit | 6365651111ef4d2f94335cf38bf4d9b0136d42d2 | [
"Unlicense"
] | null | null | null | resources/WPy32/python-3.10.2/Lib/distutils/tests/test_bdist.py | eladkarako/yt-dlp_kit | 6365651111ef4d2f94335cf38bf4d9b0136d42d2 | [
"Unlicense"
] | 1 | 2022-03-28T19:28:45.000Z | 2022-03-28T19:28:45.000Z | """Tests for distutils.command.bdist."""
import os
import unittest
from test.support import run_unittest
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
from distutils.command.bdist import bdist
from distutils.tests import support
class BuildTestCase(support.TempdirManager,
unittest.TestCase):
def test_formats(self):
# let's create a command and make sure
# we can set the format
dist = self.create_dist()[1]
cmd = bdist(dist)
cmd.formats = ['msi']
cmd.ensure_finalized()
self.assertEqual(cmd.formats, ['msi'])
# what formats does bdist offer?
formats = ['bztar', 'gztar', 'msi', 'rpm', 'tar',
'xztar', 'zip', 'ztar']
found = sorted(cmd.format_command)
self.assertEqual(found, formats)
def test_skip_build(self):
# bug #10946: bdist --skip-build should trickle down to subcommands
dist = self.create_dist()[1]
cmd = bdist(dist)
cmd.skip_build = 1
cmd.ensure_finalized()
dist.command_obj['bdist'] = cmd
names = ['bdist_dumb'] # bdist_rpm does not support --skip-build
if os.name == 'nt':
names.append('bdist_msi')
for name in names:
subcmd = cmd.get_finalized_command(name)
if getattr(subcmd, '_unsupported', False):
# command is not supported on this build
continue
self.assertTrue(subcmd.skip_build,
'%s should take --skip-build from bdist' % name)
def test_suite():
return unittest.makeSuite(BuildTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| 31.929825 | 77 | 0.591209 | import os
import unittest
from test.support import run_unittest
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
from distutils.command.bdist import bdist
from distutils.tests import support
class BuildTestCase(support.TempdirManager,
unittest.TestCase):
def test_formats(self):
# we can set the format
dist = self.create_dist()[1]
cmd = bdist(dist)
cmd.formats = ['msi']
cmd.ensure_finalized()
self.assertEqual(cmd.formats, ['msi'])
# what formats does bdist offer?
formats = ['bztar', 'gztar', 'msi', 'rpm', 'tar',
'xztar', 'zip', 'ztar']
found = sorted(cmd.format_command)
self.assertEqual(found, formats)
def test_skip_build(self):
# bug #10946: bdist --skip-build should trickle down to subcommands
dist = self.create_dist()[1]
cmd = bdist(dist)
cmd.skip_build = 1
cmd.ensure_finalized()
dist.command_obj['bdist'] = cmd
names = ['bdist_dumb'] # bdist_rpm does not support --skip-build
if os.name == 'nt':
names.append('bdist_msi')
for name in names:
subcmd = cmd.get_finalized_command(name)
if getattr(subcmd, '_unsupported', False):
# command is not supported on this build
continue
self.assertTrue(subcmd.skip_build,
'%s should take --skip-build from bdist' % name)
def test_suite():
return unittest.makeSuite(BuildTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| true | true |
f72d1185c8f7b68defcc43981f991746536dc0b5 | 484 | py | Python | speedysvc/toolkit/py_ini/write/conv_to_str.py | mcyph/shmrpc | 4e0e972657f677a845eb6e7acbf788535c07117a | [
"Unlicense",
"MIT"
] | 4 | 2020-02-11T04:20:57.000Z | 2021-06-20T10:03:52.000Z | speedysvc/toolkit/py_ini/write/conv_to_str.py | mcyph/shmrpc | 4e0e972657f677a845eb6e7acbf788535c07117a | [
"Unlicense",
"MIT"
] | 1 | 2020-09-16T23:18:30.000Z | 2020-09-21T10:07:22.000Z | speedysvc/toolkit/py_ini/write/conv_to_str.py | mcyph/shmrpc | 4e0e972657f677a845eb6e7acbf788535c07117a | [
"Unlicense",
"MIT"
] | null | null | null | def conv_to_str(o):
if isinstance(o, str):
# Remove initial "u" chars before strings
# if no Unicode in them if possible
try:
o = str(o)
except:
o = str(o)
elif isinstance(o, (list, tuple)):
is_tuple = isinstance(o, tuple)
o = [conv_to_str(i) for i in o]
if is_tuple:
o = tuple(o)
elif isinstance(o, dict):
for k in o:
o[k] = conv_to_str(o[k])
return o
| 23.047619 | 49 | 0.5 | def conv_to_str(o):
if isinstance(o, str):
try:
o = str(o)
except:
o = str(o)
elif isinstance(o, (list, tuple)):
is_tuple = isinstance(o, tuple)
o = [conv_to_str(i) for i in o]
if is_tuple:
o = tuple(o)
elif isinstance(o, dict):
for k in o:
o[k] = conv_to_str(o[k])
return o
| true | true |
f72d11981b4d9f232355f2e60ad86e0a43b91b4f | 11,383 | py | Python | tests/v16/test_enums.py | Jonas628/discovere_ocpp | 6456e72a9d6725634725756e67fcfd5be007de79 | [
"MIT"
] | null | null | null | tests/v16/test_enums.py | Jonas628/discovere_ocpp | 6456e72a9d6725634725756e67fcfd5be007de79 | [
"MIT"
] | null | null | null | tests/v16/test_enums.py | Jonas628/discovere_ocpp | 6456e72a9d6725634725756e67fcfd5be007de79 | [
"MIT"
] | null | null | null | # flake8: noqa
from ocpp_d.v16.enums import *
def test_authorization_status():
assert AuthorizationStatus.accepted == "Accepted"
assert AuthorizationStatus.blocked == "Blocked"
assert AuthorizationStatus.expired == "Expired"
assert AuthorizationStatus.invalid == "Invalid"
assert AuthorizationStatus.concurrent_tx == "ConcurrentTx"
def test_availability_status():
assert AvailabilityStatus.accepted == "Accepted"
assert AvailabilityStatus.rejected == "Rejected"
assert AvailabilityStatus.scheduled == "Scheduled"
def test_availability_type():
assert AvailabilityType.inoperative == "Inoperative"
assert AvailabilityType.operative == "Operative"
def test_cancel_reservation_status():
assert CancelReservationStatus.accepted == "Accepted"
assert CancelReservationStatus.rejected == "Rejected"
def test_charge_point_error_code():
assert (ChargePointErrorCode.connector_lock_failure ==
"ConnectorLockFailure")
assert (ChargePointErrorCode.ev_communication_error ==
"EVCommunicationError")
assert ChargePointErrorCode.ground_failure == "GroundFailure"
assert (ChargePointErrorCode.high_temperature ==
"HighTemperature")
assert ChargePointErrorCode.internal_error == "InternalError"
assert (ChargePointErrorCode.local_list_conflict ==
"LocalListConflict")
assert ChargePointErrorCode.no_error == "NoError"
assert ChargePointErrorCode.other_error == "OtherError"
assert (ChargePointErrorCode.over_current_failure ==
"OverCurrentFailure")
assert ChargePointErrorCode.over_voltage == "OverVoltage"
assert (ChargePointErrorCode.power_meter_failure ==
"PowerMeterFailure")
assert (ChargePointErrorCode.power_switch_failure ==
"PowerSwitchFailure")
assert ChargePointErrorCode.reader_failure == "ReaderFailure"
assert ChargePointErrorCode.reset_failure == "ResetFailure"
assert ChargePointErrorCode.under_voltage == "UnderVoltage"
assert ChargePointErrorCode.weak_signal == "WeakSignal"
def test_charge_point_status():
assert ChargePointStatus.available == 'Available'
assert ChargePointStatus.preparing == 'Preparing'
assert ChargePointStatus.charging == 'Charging'
assert ChargePointStatus.suspended_evse == 'SuspendedEVSE'
assert ChargePointStatus.suspended_ev == 'SuspendedEV'
assert ChargePointStatus.finishing == 'Finishing'
assert ChargePointStatus.reserved == 'Reserved'
assert ChargePointStatus.unavailable == 'Unavailable'
assert ChargePointStatus.faulted == 'Faulted'
def test_charging_profile_kind_type():
assert ChargingProfileKindType.absolute == 'Absolute'
assert ChargingProfileKindType.recurring == 'Recurring'
assert ChargingProfileKindType.relative == 'Relative'
def test_charging_profile_purpose_type():
assert (ChargingProfilePurposeType.charge_point_max_profile ==
'ChargePointMaxProfile')
assert (ChargingProfilePurposeType.tx_default_profile ==
'TxDefaultProfile')
assert ChargingProfilePurposeType.tx_profile == 'TxProfile'
def test_charging_profile_status():
assert ChargingProfileStatus.accepted == "Accepted"
assert ChargingProfileStatus.rejected == "Rejected"
assert ChargingProfileStatus.not_supported == "NotSupported"
def test_charging_rate_unit():
assert ChargingRateUnitType.watts == "W"
assert ChargingRateUnitType.amps == "A"
def test_clear_cache_status():
assert ClearCacheStatus.accepted == "Accepted"
assert ClearCacheStatus.rejected == "Rejected"
def test_clear_charging_profile_status():
assert ClearChargingProfileStatus.accepted == "Accepted"
assert ClearChargingProfileStatus.unknown == "Unknown"
def test_configuration_status():
assert ConfigurationStatus.accepted == "Accepted"
assert ConfigurationStatus.rejected == "Rejected"
assert ConfigurationStatus.reboot_required == "RebootRequired"
assert ConfigurationStatus.not_supported == "NotSupported"
def test_data_transfer_status():
assert DataTransferStatus.accepted == "Accepted"
assert DataTransferStatus.rejected == "Rejected"
assert (DataTransferStatus.unknown_message_id ==
"UnknownMessageId")
assert DataTransferStatus.unknown_vendor_id == "UnknownVendorId"
def test_diagnostics_status():
assert DiagnosticsStatus.idle == "Idle"
assert DiagnosticsStatus.uploaded == "Uploaded"
assert DiagnosticsStatus.upload_failed == "UploadFailed"
assert DiagnosticsStatus.uploading == "Uploading"
def test_firmware_status():
assert FirmwareStatus.downloaded == "Downloaded"
assert FirmwareStatus.download_failed == "DownloadFailed"
assert FirmwareStatus.downloading == "Downloading"
assert FirmwareStatus.idle == "Idle"
assert (FirmwareStatus.installation_failed ==
"InstallationFailed")
assert FirmwareStatus.installing == "Installing"
assert FirmwareStatus.installed == "Installed"
def test_get_composite_schedule_status():
assert GetCompositeScheduleStatus.accepted == "Accepted"
assert GetCompositeScheduleStatus.rejected == "Rejected"
def test_location():
assert Location.inlet == "Inlet"
assert Location.outlet == "Outlet"
assert Location.body == "Body"
assert Location.cable == "Cable"
assert Location.ev == "EV"
def test_measurand():
assert (Measurand.energy_active_export_register ==
"Energy.Active.Export.Register")
assert (Measurand.energy_active_import_register ==
"Energy.Active.Import.Register")
assert (Measurand.energy_reactive_export_register ==
"Energy.Reactive.Export.Register")
assert (Measurand.energy_reactive_import_register ==
"Energy.Reactive.Import.Register")
assert (Measurand.energy_active_export_interval ==
"Energy.Active.Export.Interval")
assert (Measurand.energy_active_import_interval ==
"Energy.Active.Import.Interval")
assert (Measurand.energy_reactive_export_interval ==
"Energy.Reactive.Export.Interval")
assert (Measurand.energy_reactive_import_interval ==
"Energy.Reactive.Import.Interval")
assert Measurand.frequency == "Frequency"
assert Measurand.power_active_export == "Power.Active.Export"
assert Measurand.power_active_import == "Power.Active.Import"
assert Measurand.power_factor == "Power.Factor"
assert Measurand.power_offered == "Power.Offered"
assert (Measurand.power_reactive_export ==
"Power.Reactive.Export")
assert (Measurand.power_reactive_import ==
"Power.Reactive.Import")
assert Measurand.current_export == "Current.Export"
assert Measurand.current_import == "Current.Import"
assert Measurand.current_offered == "Current.Offered"
assert Measurand.rpm == "RPM"
assert Measurand.soc == "SoC"
assert Measurand.voltage == "Voltage"
assert Measurand.temperature == "Temperature"
def test_message_trigger():
assert MessageTrigger.boot_notification == "BootNotification"
assert (MessageTrigger.diagnostics_status_notification ==
"DiagnosticsStatusNotification")
assert (MessageTrigger.firmware_status_notification ==
"FirmwareStatusNotification")
assert MessageTrigger.heartbeat == "Heartbeat"
assert MessageTrigger.meter_values == "MeterValues"
assert (MessageTrigger.status_notification ==
"StatusNotification")
def test_phase():
assert Phase.l1 == "L1"
assert Phase.l2 == "L2"
assert Phase.l3 == "L3"
assert Phase.n == "N"
assert Phase.l1_n == "L1-N"
assert Phase.l2_n == "L2-N"
assert Phase.l3_n == "L3-N"
assert Phase.l1_l2 == "L1-L2"
assert Phase.l2_l3 == "L2-L3"
assert Phase.l3_l1 == "L3-L1"
def test_reading_context():
assert (ReadingContext.interruption_begin ==
"Interruption.Begin")
assert ReadingContext.interruption_end == "Interruption.End"
assert ReadingContext.other == "Other"
assert ReadingContext.sample_clock == "Sample.Clock"
assert ReadingContext.sample_periodic == "Sample.Periodic"
assert ReadingContext.transaction_begin == "Transaction.Begin"
assert ReadingContext.transaction_end == "Transaction.End"
assert ReadingContext.trigger == "Trigger"
def test_reason():
assert Reason.emergency_stop == "EmergencyStop"
assert Reason.ev_disconnected == "EVDisconnected"
assert Reason.hard_reset == "HardReset"
assert Reason.local == "Local"
assert Reason.other == "Other"
assert Reason.power_loss == "PowerLoss"
assert Reason.reboot == "Reboot"
assert Reason.remote == "Remote"
assert Reason.soft_reset == "SoftReset"
assert Reason.unlock_command == "UnlockCommand"
assert Reason.de_authorized == "DeAuthorized"
def test_recurrency_kind():
assert RecurrencyKind.daily == 'Daily'
assert RecurrencyKind.weekly == 'Weekly'
def test_registration_status():
assert RegistrationStatus.accepted == "Accepted"
assert RegistrationStatus.pending == "Pending"
assert RegistrationStatus.rejected == "Rejected"
def test_remote_start_stop_status():
assert RemoteStartStopStatus.accepted == "Accepted"
assert RemoteStartStopStatus.rejected == "Rejected"
def test_reservation_status():
assert ReservationStatus.accepted == "Accepted"
assert ReservationStatus.faulted == "Faulted"
assert ReservationStatus.occupied == "Occupied"
assert ReservationStatus.rejected == "Rejected"
assert ReservationStatus.unavailable == "Unavailable"
def test_reset_status():
assert ResetStatus.accepted == "Accepted"
assert ResetStatus.rejected == "Rejected"
def test_reset_type():
assert ResetType.hard == "Hard"
assert ResetType.soft == "Soft"
def test_trigger_message_status():
assert TriggerMessageStatus.accepted == "Accepted"
assert TriggerMessageStatus.rejected == "Rejected"
assert TriggerMessageStatus.not_implemented == "NotImplemented"
def test_unit_of_measure():
assert UnitOfMeasure.wh == "Wh"
assert UnitOfMeasure.kwh == "kWh"
assert UnitOfMeasure.varh == "varh"
assert UnitOfMeasure.kvarh == "kvarh"
assert UnitOfMeasure.w == "W"
assert UnitOfMeasure.kw == "kW"
assert UnitOfMeasure.va == "VA"
assert UnitOfMeasure.kva == "kVA"
assert UnitOfMeasure.var == "var"
assert UnitOfMeasure.kvar == "kvar"
assert UnitOfMeasure.a == "A"
assert UnitOfMeasure.v == "V"
assert UnitOfMeasure.celsius == "Celsius"
assert UnitOfMeasure.fahrenheit == "Fahrenheit"
assert UnitOfMeasure.k == "K"
assert UnitOfMeasure.percent == "Percent"
assert UnitOfMeasure.hertz == "Hertz"
def test_unlock_status():
assert UnlockStatus.unlocked == "Unlocked"
assert UnlockStatus.unlock_failed == "UnlockFailed"
assert UnlockStatus.not_supported == "NotSupported"
def test_update_status():
assert UpdateStatus.accepted == "Accepted"
assert UpdateStatus.failed == "Failed"
assert UpdateStatus.not_supported == "NotSupported"
assert UpdateStatus.version_mismatch == "VersionMismatch"
def test_update_type():
assert UpdateType.differential == "Differential"
assert UpdateType.full == "Full"
def test_value_format():
assert ValueFormat.raw == "Raw"
assert ValueFormat.signed_data == "SignedData"
| 36.136508 | 68 | 0.734516 |
from ocpp_d.v16.enums import *
def test_authorization_status():
assert AuthorizationStatus.accepted == "Accepted"
assert AuthorizationStatus.blocked == "Blocked"
assert AuthorizationStatus.expired == "Expired"
assert AuthorizationStatus.invalid == "Invalid"
assert AuthorizationStatus.concurrent_tx == "ConcurrentTx"
def test_availability_status():
assert AvailabilityStatus.accepted == "Accepted"
assert AvailabilityStatus.rejected == "Rejected"
assert AvailabilityStatus.scheduled == "Scheduled"
def test_availability_type():
assert AvailabilityType.inoperative == "Inoperative"
assert AvailabilityType.operative == "Operative"
def test_cancel_reservation_status():
assert CancelReservationStatus.accepted == "Accepted"
assert CancelReservationStatus.rejected == "Rejected"
def test_charge_point_error_code():
assert (ChargePointErrorCode.connector_lock_failure ==
"ConnectorLockFailure")
assert (ChargePointErrorCode.ev_communication_error ==
"EVCommunicationError")
assert ChargePointErrorCode.ground_failure == "GroundFailure"
assert (ChargePointErrorCode.high_temperature ==
"HighTemperature")
assert ChargePointErrorCode.internal_error == "InternalError"
assert (ChargePointErrorCode.local_list_conflict ==
"LocalListConflict")
assert ChargePointErrorCode.no_error == "NoError"
assert ChargePointErrorCode.other_error == "OtherError"
assert (ChargePointErrorCode.over_current_failure ==
"OverCurrentFailure")
assert ChargePointErrorCode.over_voltage == "OverVoltage"
assert (ChargePointErrorCode.power_meter_failure ==
"PowerMeterFailure")
assert (ChargePointErrorCode.power_switch_failure ==
"PowerSwitchFailure")
assert ChargePointErrorCode.reader_failure == "ReaderFailure"
assert ChargePointErrorCode.reset_failure == "ResetFailure"
assert ChargePointErrorCode.under_voltage == "UnderVoltage"
assert ChargePointErrorCode.weak_signal == "WeakSignal"
def test_charge_point_status():
assert ChargePointStatus.available == 'Available'
assert ChargePointStatus.preparing == 'Preparing'
assert ChargePointStatus.charging == 'Charging'
assert ChargePointStatus.suspended_evse == 'SuspendedEVSE'
assert ChargePointStatus.suspended_ev == 'SuspendedEV'
assert ChargePointStatus.finishing == 'Finishing'
assert ChargePointStatus.reserved == 'Reserved'
assert ChargePointStatus.unavailable == 'Unavailable'
assert ChargePointStatus.faulted == 'Faulted'
def test_charging_profile_kind_type():
assert ChargingProfileKindType.absolute == 'Absolute'
assert ChargingProfileKindType.recurring == 'Recurring'
assert ChargingProfileKindType.relative == 'Relative'
def test_charging_profile_purpose_type():
assert (ChargingProfilePurposeType.charge_point_max_profile ==
'ChargePointMaxProfile')
assert (ChargingProfilePurposeType.tx_default_profile ==
'TxDefaultProfile')
assert ChargingProfilePurposeType.tx_profile == 'TxProfile'
def test_charging_profile_status():
assert ChargingProfileStatus.accepted == "Accepted"
assert ChargingProfileStatus.rejected == "Rejected"
assert ChargingProfileStatus.not_supported == "NotSupported"
def test_charging_rate_unit():
assert ChargingRateUnitType.watts == "W"
assert ChargingRateUnitType.amps == "A"
def test_clear_cache_status():
assert ClearCacheStatus.accepted == "Accepted"
assert ClearCacheStatus.rejected == "Rejected"
def test_clear_charging_profile_status():
assert ClearChargingProfileStatus.accepted == "Accepted"
assert ClearChargingProfileStatus.unknown == "Unknown"
def test_configuration_status():
assert ConfigurationStatus.accepted == "Accepted"
assert ConfigurationStatus.rejected == "Rejected"
assert ConfigurationStatus.reboot_required == "RebootRequired"
assert ConfigurationStatus.not_supported == "NotSupported"
def test_data_transfer_status():
assert DataTransferStatus.accepted == "Accepted"
assert DataTransferStatus.rejected == "Rejected"
assert (DataTransferStatus.unknown_message_id ==
"UnknownMessageId")
assert DataTransferStatus.unknown_vendor_id == "UnknownVendorId"
def test_diagnostics_status():
assert DiagnosticsStatus.idle == "Idle"
assert DiagnosticsStatus.uploaded == "Uploaded"
assert DiagnosticsStatus.upload_failed == "UploadFailed"
assert DiagnosticsStatus.uploading == "Uploading"
def test_firmware_status():
assert FirmwareStatus.downloaded == "Downloaded"
assert FirmwareStatus.download_failed == "DownloadFailed"
assert FirmwareStatus.downloading == "Downloading"
assert FirmwareStatus.idle == "Idle"
assert (FirmwareStatus.installation_failed ==
"InstallationFailed")
assert FirmwareStatus.installing == "Installing"
assert FirmwareStatus.installed == "Installed"
def test_get_composite_schedule_status():
assert GetCompositeScheduleStatus.accepted == "Accepted"
assert GetCompositeScheduleStatus.rejected == "Rejected"
def test_location():
assert Location.inlet == "Inlet"
assert Location.outlet == "Outlet"
assert Location.body == "Body"
assert Location.cable == "Cable"
assert Location.ev == "EV"
def test_measurand():
assert (Measurand.energy_active_export_register ==
"Energy.Active.Export.Register")
assert (Measurand.energy_active_import_register ==
"Energy.Active.Import.Register")
assert (Measurand.energy_reactive_export_register ==
"Energy.Reactive.Export.Register")
assert (Measurand.energy_reactive_import_register ==
"Energy.Reactive.Import.Register")
assert (Measurand.energy_active_export_interval ==
"Energy.Active.Export.Interval")
assert (Measurand.energy_active_import_interval ==
"Energy.Active.Import.Interval")
assert (Measurand.energy_reactive_export_interval ==
"Energy.Reactive.Export.Interval")
assert (Measurand.energy_reactive_import_interval ==
"Energy.Reactive.Import.Interval")
assert Measurand.frequency == "Frequency"
assert Measurand.power_active_export == "Power.Active.Export"
assert Measurand.power_active_import == "Power.Active.Import"
assert Measurand.power_factor == "Power.Factor"
assert Measurand.power_offered == "Power.Offered"
assert (Measurand.power_reactive_export ==
"Power.Reactive.Export")
assert (Measurand.power_reactive_import ==
"Power.Reactive.Import")
assert Measurand.current_export == "Current.Export"
assert Measurand.current_import == "Current.Import"
assert Measurand.current_offered == "Current.Offered"
assert Measurand.rpm == "RPM"
assert Measurand.soc == "SoC"
assert Measurand.voltage == "Voltage"
assert Measurand.temperature == "Temperature"
def test_message_trigger():
assert MessageTrigger.boot_notification == "BootNotification"
assert (MessageTrigger.diagnostics_status_notification ==
"DiagnosticsStatusNotification")
assert (MessageTrigger.firmware_status_notification ==
"FirmwareStatusNotification")
assert MessageTrigger.heartbeat == "Heartbeat"
assert MessageTrigger.meter_values == "MeterValues"
assert (MessageTrigger.status_notification ==
"StatusNotification")
def test_phase():
assert Phase.l1 == "L1"
assert Phase.l2 == "L2"
assert Phase.l3 == "L3"
assert Phase.n == "N"
assert Phase.l1_n == "L1-N"
assert Phase.l2_n == "L2-N"
assert Phase.l3_n == "L3-N"
assert Phase.l1_l2 == "L1-L2"
assert Phase.l2_l3 == "L2-L3"
assert Phase.l3_l1 == "L3-L1"
def test_reading_context():
assert (ReadingContext.interruption_begin ==
"Interruption.Begin")
assert ReadingContext.interruption_end == "Interruption.End"
assert ReadingContext.other == "Other"
assert ReadingContext.sample_clock == "Sample.Clock"
assert ReadingContext.sample_periodic == "Sample.Periodic"
assert ReadingContext.transaction_begin == "Transaction.Begin"
assert ReadingContext.transaction_end == "Transaction.End"
assert ReadingContext.trigger == "Trigger"
def test_reason():
assert Reason.emergency_stop == "EmergencyStop"
assert Reason.ev_disconnected == "EVDisconnected"
assert Reason.hard_reset == "HardReset"
assert Reason.local == "Local"
assert Reason.other == "Other"
assert Reason.power_loss == "PowerLoss"
assert Reason.reboot == "Reboot"
assert Reason.remote == "Remote"
assert Reason.soft_reset == "SoftReset"
assert Reason.unlock_command == "UnlockCommand"
assert Reason.de_authorized == "DeAuthorized"
def test_recurrency_kind():
assert RecurrencyKind.daily == 'Daily'
assert RecurrencyKind.weekly == 'Weekly'
def test_registration_status():
assert RegistrationStatus.accepted == "Accepted"
assert RegistrationStatus.pending == "Pending"
assert RegistrationStatus.rejected == "Rejected"
def test_remote_start_stop_status():
assert RemoteStartStopStatus.accepted == "Accepted"
assert RemoteStartStopStatus.rejected == "Rejected"
def test_reservation_status():
assert ReservationStatus.accepted == "Accepted"
assert ReservationStatus.faulted == "Faulted"
assert ReservationStatus.occupied == "Occupied"
assert ReservationStatus.rejected == "Rejected"
assert ReservationStatus.unavailable == "Unavailable"
def test_reset_status():
assert ResetStatus.accepted == "Accepted"
assert ResetStatus.rejected == "Rejected"
def test_reset_type():
assert ResetType.hard == "Hard"
assert ResetType.soft == "Soft"
def test_trigger_message_status():
assert TriggerMessageStatus.accepted == "Accepted"
assert TriggerMessageStatus.rejected == "Rejected"
assert TriggerMessageStatus.not_implemented == "NotImplemented"
def test_unit_of_measure():
assert UnitOfMeasure.wh == "Wh"
assert UnitOfMeasure.kwh == "kWh"
assert UnitOfMeasure.varh == "varh"
assert UnitOfMeasure.kvarh == "kvarh"
assert UnitOfMeasure.w == "W"
assert UnitOfMeasure.kw == "kW"
assert UnitOfMeasure.va == "VA"
assert UnitOfMeasure.kva == "kVA"
assert UnitOfMeasure.var == "var"
assert UnitOfMeasure.kvar == "kvar"
assert UnitOfMeasure.a == "A"
assert UnitOfMeasure.v == "V"
assert UnitOfMeasure.celsius == "Celsius"
assert UnitOfMeasure.fahrenheit == "Fahrenheit"
assert UnitOfMeasure.k == "K"
assert UnitOfMeasure.percent == "Percent"
assert UnitOfMeasure.hertz == "Hertz"
def test_unlock_status():
assert UnlockStatus.unlocked == "Unlocked"
assert UnlockStatus.unlock_failed == "UnlockFailed"
assert UnlockStatus.not_supported == "NotSupported"
def test_update_status():
assert UpdateStatus.accepted == "Accepted"
assert UpdateStatus.failed == "Failed"
assert UpdateStatus.not_supported == "NotSupported"
assert UpdateStatus.version_mismatch == "VersionMismatch"
def test_update_type():
assert UpdateType.differential == "Differential"
assert UpdateType.full == "Full"
def test_value_format():
assert ValueFormat.raw == "Raw"
assert ValueFormat.signed_data == "SignedData"
| true | true |
f72d122ad8ca0a487720f9630a8a2086444e3a23 | 384 | py | Python | 1stRound/Easy/819 Most Common Word/RegexCounter1.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
] | 2 | 2020-04-24T18:36:52.000Z | 2020-04-25T00:15:57.000Z | 1stRound/Easy/819 Most Common Word/RegexCounter1.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
] | null | null | null | 1stRound/Easy/819 Most Common Word/RegexCounter1.py | ericchen12377/Leetcode-Algorithm-Python | eb58cd4f01d9b8006b7d1a725fc48910aad7f192 | [
"MIT"
] | null | null | null | import collections
import re
class Solution(object):
def mostCommonWord(self, paragraph, banned):
"""
:type paragraph: str
:type banned: List[str]
:rtype: str
"""
paragraph = re.findall(r"\w+", paragraph.lower())
count = collections.Counter(x for x in paragraph if x not in banned)
return count.most_common(1)[0][0]
| 29.538462 | 76 | 0.606771 | import collections
import re
class Solution(object):
def mostCommonWord(self, paragraph, banned):
paragraph = re.findall(r"\w+", paragraph.lower())
count = collections.Counter(x for x in paragraph if x not in banned)
return count.most_common(1)[0][0]
| true | true |
f72d12744056fae45f212a2dfb4d1368b26b9baf | 15,546 | py | Python | test/tools.py | jni/asv | f1ec1c157d52c77a799853062dac3468fab3e2ab | [
"BSD-3-Clause"
] | null | null | null | test/tools.py | jni/asv | f1ec1c157d52c77a799853062dac3468fab3e2ab | [
"BSD-3-Clause"
] | 3 | 2018-07-26T17:56:30.000Z | 2018-07-27T20:23:27.000Z | test/tools.py | jni/asv | f1ec1c157d52c77a799853062dac3468fab3e2ab | [
"BSD-3-Clause"
] | 3 | 2018-07-25T22:53:31.000Z | 2018-09-16T06:14:43.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
"""
This file contains utilities to generate test repositories.
"""
import datetime
import io
import os
import threading
import time
import six
import tempfile
import textwrap
import sys
from os.path import abspath, join, dirname, relpath, isdir
from contextlib import contextmanager
from hashlib import sha256
from six.moves import SimpleHTTPServer
import pytest
try:
import hglib
except ImportError as exc:
hglib = None
from asv import util
from asv import commands
from asv import config
from asv.commands.preview import create_httpd
from asv.repo import get_repo
from asv.results import Results
# Two Python versions for testing
PYTHON_VER1 = "{0[0]}.{0[1]}".format(sys.version_info)
if sys.version_info < (3,):
PYTHON_VER2 = "3.6"
else:
PYTHON_VER2 = "2.7"
# Installable library versions to use in tests
SIX_VERSION = "1.10"
COLORAMA_VERSIONS = ["0.3.7", "0.3.9"]
try:
import selenium
from selenium.common.exceptions import TimeoutException
HAVE_WEBDRIVER = True
except ImportError:
HAVE_WEBDRIVER = False
WAIT_TIME = 20.0
def run_asv(*argv):
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
return args.func(args)
def run_asv_with_conf(conf, *argv, **kwargs):
assert isinstance(conf, config.Config)
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
if sys.version_info[0] >= 3:
cls = args.func.__self__
else:
cls = args.func.im_self
return cls.run_from_conf_args(conf, args, **kwargs)
# These classes are defined here, rather than using asv/plugins/git.py
# and asv/plugins/mercurial.py since here we need to perform write
# operations to the repository, and the others should be read-only for
# safety.
class Git(object):
def __init__(self, path):
self.path = abspath(path)
self._git = util.which('git')
self._fake_date = datetime.datetime.now()
def run_git(self, args, chdir=True, **kwargs):
if chdir:
cwd = self.path
else:
cwd = None
kwargs['cwd'] = cwd
return util.check_output(
[self._git] + args, **kwargs)
def init(self):
self.run_git(['init'])
self.run_git(['config', 'user.email', 'robot@asv'])
self.run_git(['config', 'user.name', 'Robotic Swallow'])
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
self.run_git(['commit', '--date', date.isoformat(),
'-m', message])
def tag(self, number):
self.run_git(['tag', '-a', '-m', 'Tag {0}'.format(number),
'tag{0}'.format(number)])
def add(self, filename):
self.run_git(['add', relpath(filename, self.path)])
def checkout(self, branch_name, start_commit=None):
args = ["checkout"]
if start_commit is not None:
args.extend(["-b", branch_name, start_commit])
else:
args.append(branch_name)
self.run_git(args)
def merge(self, branch_name, commit_message=None):
self.run_git(["merge", "--no-ff", "--no-commit", "-X", "theirs", branch_name])
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
return self.run_git(['rev-parse', name]).strip()
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "master"
return [x.strip() for x in self.run_git(['rev-list', branch]).splitlines()
if x.strip()]
def get_commit_message(self, commit_hash):
return self.run_git(["log", "-n", "1", "--format=%s", commit_hash]).strip()
_hg_config = """
[ui]
username = Robotic Swallow <robot@asv>
"""
class Hg(object):
encoding = 'utf-8'
def __init__(self, path):
self._fake_date = datetime.datetime.now()
self.path = abspath(path)
def init(self):
hglib.init(self.path)
with io.open(join(self.path, '.hg', 'hgrc'), 'w', encoding="utf-8") as fd:
fd.write(_hg_config)
self._repo = hglib.open(self.path.encode(sys.getfilesystemencoding()),
encoding=self.encoding)
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
date = "{0} 0".format(util.datetime_to_timestamp(date))
self._repo.commit(message.encode(self.encoding),
date=date.encode(self.encoding))
def tag(self, number):
self._fake_date += datetime.timedelta(seconds=1)
date = "{0} 0".format(util.datetime_to_timestamp(self._fake_date))
self._repo.tag(
['tag{0}'.format(number).encode(self.encoding)],
message="Tag {0}".format(number).encode(self.encoding),
date=date.encode(self.encoding))
def add(self, filename):
self._repo.add([filename.encode(sys.getfilesystemencoding())])
def checkout(self, branch_name, start_commit=None):
if start_commit is not None:
self._repo.update(start_commit.encode(self.encoding))
self._repo.branch(branch_name.encode(self.encoding))
else:
self._repo.update(branch_name.encode(self.encoding))
def merge(self, branch_name, commit_message=None):
self._repo.merge(branch_name.encode(self.encoding),
tool=b"internal:other")
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
log = self._repo.log(name.encode(self.encoding), limit=1)
if log:
return log[0][1].decode(self.encoding)
return None
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "default"
log = self._repo.log('sort(ancestors({0}), -rev)'.format(branch).encode(self.encoding))
return [entry[1].decode(self.encoding) for entry in log]
def get_commit_message(self, commit_hash):
return self._repo.log(commit_hash.encode(self.encoding))[0].desc.decode(self.encoding)
def copy_template(src, dst, dvcs, values):
for root, dirs, files in os.walk(src):
for dir in dirs:
src_path = join(root, dir)
dst_path = join(dst, relpath(src_path, src))
if not isdir(dst_path):
os.makedirs(dst_path)
for file in files:
src_path = join(root, file)
dst_path = join(dst, relpath(src_path, src))
try:
with io.open(src_path, 'r', encoding='utf-8') as fd:
content = fd.read()
except UnicodeDecodeError:
# File is some sort of binary file... just copy it
# directly with no template substitution
with io.open(src_path, 'rb') as fd:
content = fd.read()
with io.open(dst_path, 'wb') as fd:
fd.write(content)
else:
content = content.format(**values)
with io.open(dst_path, 'w', encoding='utf-8') as fd:
fd.write(content)
dvcs.add(dst_path)
def generate_test_repo(tmpdir, values=[0], dvcs_type='git',
extra_branches=(), subdir=''):
"""
Generate a test repository
Parameters
----------
tmpdir
Repository directory
values : list
List of values to substitute in the template
dvcs_type : {'git', 'hg'}
What dvcs to use
extra_branches : list of (start_commit, branch_name, values)
Additional branches to generate in the repository.
For branch start commits, use relative references, e.g.,
the format 'master~10' or 'default~10' works both for Hg
and Git.
subdir
A relative subdirectory inside the repository to copy the
test project into.
Returns
-------
dvcs : Git or Hg
"""
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
project_path = os.path.join(dvcs_path, subdir)
if not os.path.exists(project_path):
os.makedirs(project_path)
for i, value in enumerate(values):
mapping = {
'version': i,
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}".format(i))
dvcs.tag(i)
if extra_branches:
for start_commit, branch_name, values in extra_branches:
dvcs.checkout(branch_name, start_commit)
for i, value in enumerate(values):
mapping = {
'version': "{0}".format(i),
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}.{1}".format(branch_name, i))
return dvcs
def generate_repo_from_ops(tmpdir, dvcs_type, operations):
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
version = 0
for op in operations:
if op[0] == "commit":
copy_template(template_path, dvcs_path, dvcs, {
"version": version,
"dummy_value": op[1],
})
version += 1
dvcs.commit("Revision {0}".format(version), *op[2:])
elif op[0] == "checkout":
dvcs.checkout(*op[1:])
elif op[0] == "merge":
dvcs.merge(*op[1:])
else:
raise ValueError("Unknown dvcs operation {0}".format(op))
return dvcs
def generate_result_dir(tmpdir, dvcs, values, branches=None):
result_dir = join(tmpdir, "results")
os.makedirs(result_dir)
html_dir = join(tmpdir, "html")
machine_dir = join(result_dir, "tarzan")
os.makedirs(machine_dir)
if branches is None:
branches = [None]
conf = config.Config.from_json({
'results_dir': result_dir,
'html_dir': html_dir,
'repo': dvcs.path,
'project': 'asv',
'branches': branches or [None],
})
repo = get_repo(conf)
util.write_json(join(machine_dir, "machine.json"), {
'machine': 'tarzan',
'version': 1,
})
timestamp = datetime.datetime.utcnow()
benchmark_version = sha256(os.urandom(16)).hexdigest()
params = None
param_names = None
for commit, value in values.items():
if isinstance(value, dict):
params = value["params"]
result = Results({"machine": "tarzan"}, {}, commit,
repo.get_date_from_name(commit), "2.7", None)
value = {
'result': [value],
'params': [],
'started_at': timestamp,
'ended_at': timestamp,
'stats': None,
'samples': None,
'number': None,
}
result.add_result("time_func", value, benchmark_version)
result.save(result_dir)
if params:
param_names = ["param{}".format(k) for k in range(len(params))]
util.write_json(join(result_dir, "benchmarks.json"), {
"time_func": {
"name": "time_func",
"params": params or [],
"param_names": param_names or [],
"version": benchmark_version,
}
}, api_version=1)
return conf
@pytest.fixture(scope="session")
def browser(request, pytestconfig):
"""
Fixture for Selenium WebDriver browser interface
"""
driver_str = pytestconfig.getoption('webdriver')
if driver_str == "None":
pytest.skip("No webdriver selected for tests (use --webdriver).")
# Evaluate the options
def FirefoxHeadless():
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument("-headless")
return selenium.webdriver.Firefox(firefox_options=options)
def ChromeHeadless():
options = selenium.webdriver.ChromeOptions()
options.add_argument('headless')
return selenium.webdriver.Chrome(chrome_options=options)
ns = {}
six.exec_("import selenium.webdriver", ns)
six.exec_("from selenium.webdriver import *", ns)
ns['FirefoxHeadless'] = FirefoxHeadless
ns['ChromeHeadless'] = ChromeHeadless
create_driver = ns.get(driver_str, None)
if create_driver is None:
src = "def create_driver():\n"
src += textwrap.indent(driver_str, " ")
six.exec_(src, ns)
create_driver = ns['create_driver']
# Create the browser
browser = create_driver()
# Set timeouts
browser.set_page_load_timeout(WAIT_TIME)
browser.set_script_timeout(WAIT_TIME)
# Clean up on fixture finalization
def fin():
browser.quit()
request.addfinalizer(fin)
# Set default time to wait for AJAX requests to complete
browser.implicitly_wait(WAIT_TIME)
return browser
@contextmanager
def preview(base_path):
"""
Context manager for ASV preview web server. Gives the base URL to use.
Parameters
----------
base_path : str
Path to serve files from
"""
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
# Don't serve from cwd, but from a different directory
path = SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(self, path)
path = os.path.join(base_path, os.path.relpath(path, os.getcwd()))
return util.long_path(path)
httpd, base_url = create_httpd(Handler)
def run():
try:
httpd.serve_forever()
except:
import traceback
traceback.print_exc()
return
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
try:
yield base_url
finally:
# Stop must be run in a separate thread, because
# httpd.shutdown blocks until serve_forever returns. We don't
# want to block here --- it appears in some environments
# problems shutting down the server may arise.
stopper = threading.Thread(target=httpd.shutdown)
stopper.daemon = True
stopper.start()
stopper.join(5.0)
def get_with_retry(browser, url):
for j in range(2):
try:
return browser.get(url)
except TimeoutException:
time.sleep(2)
return browser.get(url)
| 29.724665 | 95 | 0.607487 |
from __future__ import absolute_import, division, unicode_literals, print_function
import datetime
import io
import os
import threading
import time
import six
import tempfile
import textwrap
import sys
from os.path import abspath, join, dirname, relpath, isdir
from contextlib import contextmanager
from hashlib import sha256
from six.moves import SimpleHTTPServer
import pytest
try:
import hglib
except ImportError as exc:
hglib = None
from asv import util
from asv import commands
from asv import config
from asv.commands.preview import create_httpd
from asv.repo import get_repo
from asv.results import Results
PYTHON_VER1 = "{0[0]}.{0[1]}".format(sys.version_info)
if sys.version_info < (3,):
PYTHON_VER2 = "3.6"
else:
PYTHON_VER2 = "2.7"
SIX_VERSION = "1.10"
COLORAMA_VERSIONS = ["0.3.7", "0.3.9"]
try:
import selenium
from selenium.common.exceptions import TimeoutException
HAVE_WEBDRIVER = True
except ImportError:
HAVE_WEBDRIVER = False
WAIT_TIME = 20.0
def run_asv(*argv):
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
return args.func(args)
def run_asv_with_conf(conf, *argv, **kwargs):
assert isinstance(conf, config.Config)
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
if sys.version_info[0] >= 3:
cls = args.func.__self__
else:
cls = args.func.im_self
return cls.run_from_conf_args(conf, args, **kwargs)
class Git(object):
def __init__(self, path):
self.path = abspath(path)
self._git = util.which('git')
self._fake_date = datetime.datetime.now()
def run_git(self, args, chdir=True, **kwargs):
if chdir:
cwd = self.path
else:
cwd = None
kwargs['cwd'] = cwd
return util.check_output(
[self._git] + args, **kwargs)
def init(self):
self.run_git(['init'])
self.run_git(['config', 'user.email', 'robot@asv'])
self.run_git(['config', 'user.name', 'Robotic Swallow'])
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
self.run_git(['commit', '--date', date.isoformat(),
'-m', message])
def tag(self, number):
self.run_git(['tag', '-a', '-m', 'Tag {0}'.format(number),
'tag{0}'.format(number)])
def add(self, filename):
self.run_git(['add', relpath(filename, self.path)])
def checkout(self, branch_name, start_commit=None):
args = ["checkout"]
if start_commit is not None:
args.extend(["-b", branch_name, start_commit])
else:
args.append(branch_name)
self.run_git(args)
def merge(self, branch_name, commit_message=None):
self.run_git(["merge", "--no-ff", "--no-commit", "-X", "theirs", branch_name])
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
return self.run_git(['rev-parse', name]).strip()
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "master"
return [x.strip() for x in self.run_git(['rev-list', branch]).splitlines()
if x.strip()]
def get_commit_message(self, commit_hash):
return self.run_git(["log", "-n", "1", "--format=%s", commit_hash]).strip()
_hg_config = """
[ui]
username = Robotic Swallow <robot@asv>
"""
class Hg(object):
encoding = 'utf-8'
def __init__(self, path):
self._fake_date = datetime.datetime.now()
self.path = abspath(path)
def init(self):
hglib.init(self.path)
with io.open(join(self.path, '.hg', 'hgrc'), 'w', encoding="utf-8") as fd:
fd.write(_hg_config)
self._repo = hglib.open(self.path.encode(sys.getfilesystemencoding()),
encoding=self.encoding)
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
date = "{0} 0".format(util.datetime_to_timestamp(date))
self._repo.commit(message.encode(self.encoding),
date=date.encode(self.encoding))
def tag(self, number):
self._fake_date += datetime.timedelta(seconds=1)
date = "{0} 0".format(util.datetime_to_timestamp(self._fake_date))
self._repo.tag(
['tag{0}'.format(number).encode(self.encoding)],
message="Tag {0}".format(number).encode(self.encoding),
date=date.encode(self.encoding))
def add(self, filename):
self._repo.add([filename.encode(sys.getfilesystemencoding())])
def checkout(self, branch_name, start_commit=None):
if start_commit is not None:
self._repo.update(start_commit.encode(self.encoding))
self._repo.branch(branch_name.encode(self.encoding))
else:
self._repo.update(branch_name.encode(self.encoding))
def merge(self, branch_name, commit_message=None):
self._repo.merge(branch_name.encode(self.encoding),
tool=b"internal:other")
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
log = self._repo.log(name.encode(self.encoding), limit=1)
if log:
return log[0][1].decode(self.encoding)
return None
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "default"
log = self._repo.log('sort(ancestors({0}), -rev)'.format(branch).encode(self.encoding))
return [entry[1].decode(self.encoding) for entry in log]
def get_commit_message(self, commit_hash):
return self._repo.log(commit_hash.encode(self.encoding))[0].desc.decode(self.encoding)
def copy_template(src, dst, dvcs, values):
for root, dirs, files in os.walk(src):
for dir in dirs:
src_path = join(root, dir)
dst_path = join(dst, relpath(src_path, src))
if not isdir(dst_path):
os.makedirs(dst_path)
for file in files:
src_path = join(root, file)
dst_path = join(dst, relpath(src_path, src))
try:
with io.open(src_path, 'r', encoding='utf-8') as fd:
content = fd.read()
except UnicodeDecodeError:
with io.open(src_path, 'rb') as fd:
content = fd.read()
with io.open(dst_path, 'wb') as fd:
fd.write(content)
else:
content = content.format(**values)
with io.open(dst_path, 'w', encoding='utf-8') as fd:
fd.write(content)
dvcs.add(dst_path)
def generate_test_repo(tmpdir, values=[0], dvcs_type='git',
extra_branches=(), subdir=''):
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
project_path = os.path.join(dvcs_path, subdir)
if not os.path.exists(project_path):
os.makedirs(project_path)
for i, value in enumerate(values):
mapping = {
'version': i,
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}".format(i))
dvcs.tag(i)
if extra_branches:
for start_commit, branch_name, values in extra_branches:
dvcs.checkout(branch_name, start_commit)
for i, value in enumerate(values):
mapping = {
'version': "{0}".format(i),
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}.{1}".format(branch_name, i))
return dvcs
def generate_repo_from_ops(tmpdir, dvcs_type, operations):
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
version = 0
for op in operations:
if op[0] == "commit":
copy_template(template_path, dvcs_path, dvcs, {
"version": version,
"dummy_value": op[1],
})
version += 1
dvcs.commit("Revision {0}".format(version), *op[2:])
elif op[0] == "checkout":
dvcs.checkout(*op[1:])
elif op[0] == "merge":
dvcs.merge(*op[1:])
else:
raise ValueError("Unknown dvcs operation {0}".format(op))
return dvcs
def generate_result_dir(tmpdir, dvcs, values, branches=None):
result_dir = join(tmpdir, "results")
os.makedirs(result_dir)
html_dir = join(tmpdir, "html")
machine_dir = join(result_dir, "tarzan")
os.makedirs(machine_dir)
if branches is None:
branches = [None]
conf = config.Config.from_json({
'results_dir': result_dir,
'html_dir': html_dir,
'repo': dvcs.path,
'project': 'asv',
'branches': branches or [None],
})
repo = get_repo(conf)
util.write_json(join(machine_dir, "machine.json"), {
'machine': 'tarzan',
'version': 1,
})
timestamp = datetime.datetime.utcnow()
benchmark_version = sha256(os.urandom(16)).hexdigest()
params = None
param_names = None
for commit, value in values.items():
if isinstance(value, dict):
params = value["params"]
result = Results({"machine": "tarzan"}, {}, commit,
repo.get_date_from_name(commit), "2.7", None)
value = {
'result': [value],
'params': [],
'started_at': timestamp,
'ended_at': timestamp,
'stats': None,
'samples': None,
'number': None,
}
result.add_result("time_func", value, benchmark_version)
result.save(result_dir)
if params:
param_names = ["param{}".format(k) for k in range(len(params))]
util.write_json(join(result_dir, "benchmarks.json"), {
"time_func": {
"name": "time_func",
"params": params or [],
"param_names": param_names or [],
"version": benchmark_version,
}
}, api_version=1)
return conf
@pytest.fixture(scope="session")
def browser(request, pytestconfig):
driver_str = pytestconfig.getoption('webdriver')
if driver_str == "None":
pytest.skip("No webdriver selected for tests (use --webdriver).")
def FirefoxHeadless():
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument("-headless")
return selenium.webdriver.Firefox(firefox_options=options)
def ChromeHeadless():
options = selenium.webdriver.ChromeOptions()
options.add_argument('headless')
return selenium.webdriver.Chrome(chrome_options=options)
ns = {}
six.exec_("import selenium.webdriver", ns)
six.exec_("from selenium.webdriver import *", ns)
ns['FirefoxHeadless'] = FirefoxHeadless
ns['ChromeHeadless'] = ChromeHeadless
create_driver = ns.get(driver_str, None)
if create_driver is None:
src = "def create_driver():\n"
src += textwrap.indent(driver_str, " ")
six.exec_(src, ns)
create_driver = ns['create_driver']
browser = create_driver()
browser.set_page_load_timeout(WAIT_TIME)
browser.set_script_timeout(WAIT_TIME)
def fin():
browser.quit()
request.addfinalizer(fin)
browser.implicitly_wait(WAIT_TIME)
return browser
@contextmanager
def preview(base_path):
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
path = SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(self, path)
path = os.path.join(base_path, os.path.relpath(path, os.getcwd()))
return util.long_path(path)
httpd, base_url = create_httpd(Handler)
def run():
try:
httpd.serve_forever()
except:
import traceback
traceback.print_exc()
return
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
try:
yield base_url
finally:
# Stop must be run in a separate thread, because
# httpd.shutdown blocks until serve_forever returns. We don't
stopper = threading.Thread(target=httpd.shutdown)
stopper.daemon = True
stopper.start()
stopper.join(5.0)
def get_with_retry(browser, url):
for j in range(2):
try:
return browser.get(url)
except TimeoutException:
time.sleep(2)
return browser.get(url)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.