hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c896321225ea20ed199b7cf712095a999192d6de | 9,950 | py | Python | bypy/const.py | dbpython/bypy | da6e1e0fca09a560e60dc93dbfa63c9119ce38b6 | [
"MIT"
] | 1 | 2018-04-11T07:08:47.000Z | 2018-04-11T07:08:47.000Z | bypy/const.py | limited1010/bypy | da6e1e0fca09a560e60dc93dbfa63c9119ce38b6 | [
"MIT"
] | null | null | null | bypy/const.py | limited1010/bypy | da6e1e0fca09a560e60dc93dbfa63c9119ce38b6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# PYTHON_ARGCOMPLETE_OK
# from __future__ imports must occur at the beginning of the file
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import sys
import os
# https://packaging.python.org/single_source_version/
__title__ = 'bypy'
__version__ = '1.6.3'
__author__ = 'Hou Tianze'
__license__ = 'MIT'
__desc__ = 'Python client for Baidu Yun (Personal Cloud Storage) 百度云/百度网盘 Python 客户端'
__url__ = 'https://github.com/houtianze/bypy'
### return (error) codes
# they are put at the top because:
# 1. they have zero dependencies
# 2. can be referred in any abort later, e.g. return error on import faliures
ENoError = 0 # plain old OK, fine, no error.
EIncorrectPythonVersion = 1
#EApiNotConfigured = 10 # Deprecated: ApiKey, SecretKey and AppPcsPath not properly configured
EArgument = 10 # invalid program command argument
EAbort = 20 # aborted
EException = 30 # unhandled exception occured
EParameter = 40 # invalid parameter passed to ByPy
EInvalidJson = 50
EHashMismatch = 60 # MD5 hashes of the local file and remote file don't match each other
EFileWrite = 70
EFileTooBig = 80 # file too big to upload
EFailToCreateLocalDir = 90
EFailToCreateLocalFile = 100
EFailToDeleteDir = 110
EFailToDeleteFile = 120
EFileNotFound = 130
EMaxRetry = 140
ERequestFailed = 150 # request failed
ECacheNotLoaded = 160
EMigrationFailed = 170
EDownloadCerts = 180
EUserRejected = 190 # user's decision
EUpdateNeeded = 200
ESkipped = 210
EFatal = -1 # No way to continue
# internal errors
IEMD5NotFound = 31079 # File md5 not found, you should use upload API to upload the whole file.
IESuperfileCreationFailed = 31081 # superfile create failed (HTTP 404)
# Undocumented, see #308 , https://paste.ubuntu.com/23672323/
IEBlockMissInSuperFile2 = 31363 # block miss in superfile2 (HTTP 403)
IETaskNotFound = 36016 # Task was not found
IEFileAlreadyExists = 31061 # {"error_code":31061,"error_msg":"file already exists","request_id":2939656146461714799}
# TODO: Should have use an enum or some sort of data structure for this,
# but now changing this is too time consuming and error-prone
ErrorExplanations = {
ENoError: "Everything went fine.",
EIncorrectPythonVersion: "Incorrect Python version",
EArgument: "Invalid program argument passed in",
EAbort: "Abort due to unrecovrable errors",
EException: "Unhandled exception occurred",
EParameter: "Some or all the parameters passed to the function are invalid",
EInvalidJson: "Invalid JSON received",
EHashMismatch: "MD5 hashes of the local file and remote file don't match each other",
EFileWrite: "Error writing file",
EFileTooBig: "File too big to upload",
EFailToCreateLocalDir: "Unable to create some directory(ies)",
EFailToCreateLocalFile: "Unable to create some local file(s)",
EFailToDeleteDir:" Unable to delete some directory(ies)",
EFailToDeleteFile: "Unable to delete some file(s)",
EFileNotFound: "File not found",
EMaxRetry: "Maximum retries reached",
ERequestFailed: "Request failed",
ECacheNotLoaded: "Failed to load file caches",
EMigrationFailed: "Failed to migrate from the old cache format",
EDownloadCerts: "Failed to download certificats", # no long in use
EUserRejected: "User chose to not to proceed",
EUpdateNeeded: "Need to update bypy",
ESkipped: "Some files/directores are skipped",
EFatal: "Fatal error, unable to continue",
IEMD5NotFound: "File md5 not found, you should use upload API to upload the whole file.",
IESuperfileCreationFailed: "superfile create failed (HTTP 404)",
# Undocumented, see #308 , https://paste.ubuntu.com/23672323/
IEBlockMissInSuperFile2: "Block miss in superfile2 (HTTP 403)",
IETaskNotFound: "Task was not found",
IEFileAlreadyExists: "File already exists"
}
DownloaderAria2 = 'aria2'
Downloaders = [DownloaderAria2]
DownloaderDefaultArgs = {
DownloaderAria2 : "-c -k10M -x4 -s4 --file-allocation=none"
}
DownloaderArgsEnvKey = 'DOWNLOADER_ARGUMENTS'
DownloaderArgsIsFilePrefix = '@'
PipBinaryName = 'pip' + str(sys.version_info[0])
PipInstallCommand = PipBinaryName + ' install requests'
PipUpgradeCommand = PipBinaryName + ' install -U requests'
#### Definitions that are real world constants
OneK = 1024
OneM = OneK * OneK
OneG = OneM * OneK
OneT = OneG * OneK
OneP = OneT * OneK
OneE = OneP * OneK
OneZ = OneE * OneK
OneY = OneZ * OneK
SIPrefixNames = [ '', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y' ]
SIPrefixTimes = {
'K' : OneK,
'M' : OneM,
'G' : OneG,
'T' : OneT,
'E' : OneE,
'Z' : OneZ,
'Y' : OneY }
# before this, you don't know me, i don't know you - Eason
TenYearInSeconds = 60 * 60 * 24 * 366 * 10
# For Python 3 only, threading.TIMEOUT_MAX is 9223372036854.0 on all *nix systems,
# but it's a little over 49 days for Windows, if we give a value larger than that,
# Python 3 on Windows will throw towel, so we cringe.
FortyNineDaysInSeconds = 60 * 60 * 24 * 49
#### Baidu PCS constants
# ==== NOTE ====
# I use server auth, because it's the only possible method to protect the SecretKey.
# If you want to perform local authorization using 'Device' method instead, you just need:
# - Paste your own ApiKey and SecretKey. (An non-NONE or non-empty SecretKey means using local auth
# - Change the AppPcsPath to your own App's directory at Baidu PCS
# Then you are good to go
ApiKey = 'q8WE4EpCsau1oS0MplgMKNBn' # replace with your own ApiKey if you use your own appid
SecretKey = '' # replace with your own SecretKey if you use your own appid
# NOTE: no trailing '/'
AppPcsPath = '/apps/bypy' # change this to the App's directory you specified when creating the app
AppPcsPathLen = len(AppPcsPath)
## Baidu PCS URLs etc.
OpenApiUrl = "https://openapi.baidu.com"
OpenApiVersion = "2.0"
OAuthUrl = OpenApiUrl + "/oauth/" + OpenApiVersion
ServerAuthUrl = OAuthUrl + "/authorize"
DeviceAuthUrl = OAuthUrl + "/device/code"
TokenUrl = OAuthUrl + "/token"
PcsDomain = 'pcs.baidu.com'
RestApiPath = '/rest/2.0/pcs/'
PcsUrl = 'https://' + PcsDomain + RestApiPath
CPcsUrl = 'https://c.pcs.baidu.com/rest/2.0/pcs/'
DPcsUrl = 'https://d.pcs.baidu.com/rest/2.0/pcs/'
## Baidu PCS constants
MinRapidUploadFileSize = 256 * OneK
MaxSliceSize = 2 * OneG
MaxSlicePieces = 1024
MaxListEntries = 1000 # https://github.com/houtianze/bypy/issues/285
### Auth servers
GaeUrl = 'https://bypyoauth.appspot.com'
#OpenShiftUrl = 'https://bypy-tianze.rhcloud.com'
OpenShiftUrl = 'https://bypyoauth-route-bypy.a3c1.starter-us-west-1.openshiftapps.com'
HerokuUrl = 'https://bypyoauth.herokuapp.com'
Heroku1Url = 'https://bypyoauth1.herokuapp.com'
GaeRedirectUrl = GaeUrl + '/auth'
GaeRefreshUrl = GaeUrl + '/refresh'
OpenShiftRedirectUrl = OpenShiftUrl + '/auth'
OpenShiftRefreshUrl = OpenShiftUrl + '/refresh'
HerokuRedirectUrl = HerokuUrl + '/auth'
HerokuRefreshUrl = HerokuUrl + '/refresh'
Heroku1RedirectUrl = Heroku1Url + '/auth'
Heroku1RefreshUrl = Heroku1Url + '/refresh'
AuthServerList = [
# url, rety?, message
(OpenShiftRedirectUrl, False, "Authorizing/refreshing with the OpenShift server ..."),
(HerokuRedirectUrl, False, "OpenShift server failed, authorizing/refreshing with the Heroku server ..."),
(Heroku1RedirectUrl, False, "Heroku server failed, authorizing/refreshing with the Heroku1 server ..."),
(GaeRedirectUrl, False, "Heroku1 server failed. Last resort: authorizing/refreshing with the GAE server ..."),
]
RefreshServerList = [
# url, rety?, message
(OpenShiftRefreshUrl, False, "Authorizing/refreshing with the OpenShift server ..."),
(HerokuRefreshUrl, False, "OpenShift server failed, authorizing/refreshing with the Heroku server ..."),
(Heroku1RefreshUrl, False, "Heroku server failed, authorizing/refreshing with the Heroku1 server ..."),
(GaeRefreshUrl, False, "Heroku1 server failed. Last resort: authorizing/refreshing with the GAE server ..."),
]
### public static properties
HelpMarker = "Usage:"
### ByPy config constants
## directories, for setting, cache, etc
HomeDir = os.path.expanduser('~')
# os.path.join() may not handle unicode well
ConfigDir = HomeDir + os.sep + '.bypy'
TokenFileName = 'bypy.json'
TokenFilePath = ConfigDir + os.sep + TokenFileName
SettingFileName = 'bypy.setting.json'
SettingFilePath = ConfigDir + os.sep + SettingFileName
HashCacheFileName = 'bypy.hashcache.json'
HashCachePath = ConfigDir + os.sep + HashCacheFileName
PickleFileName = 'bypy.pickle'
PicklePath = ConfigDir + os.sep + PickleFileName
# ProgressPath saves the MD5s of uploaded slices, for upload resuming
# format:
# {
# abspath: [slice_size, [slice1md5, slice2md5, ...]],
# }
#
ProgressFileName = 'bypy.parts.json'
ProgressPath = ConfigDir + os.sep + ProgressFileName
ByPyCertsFileName = 'bypy.cacerts.pem'
OldByPyCertsPath = ConfigDir + os.sep + ByPyCertsFileName
# Old setting locations, should be moved to ~/.bypy to be clean
OldTokenFilePath = HomeDir + os.sep + '.bypy.json'
OldPicklePath = HomeDir + os.sep + '.bypy.pickle'
RemoteTempDir = AppPcsPath + '/.bypytemp'
SettingKey_OverwriteRemoteTempDir = 'overwriteRemoteTempDir'
SettingKey_LastUpdateCheckTime = 'lastUpdateCheck'
## default config values
PrintFlushPeriodInSec = 5.0
# TODO: Does the following User-Agent emulation help?
UserAgent = None # According to xslidian, User-Agent affects download.
#UserAgent = 'Mozilla/5.0'
#UserAgent = "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)"
#UserAgent = 'netdisk;5.2.7.2;PC;PC-Windows;6.2.9200;WindowsBaiduYunGuanJia'
DefaultSliceInMB = 20
DefaultSliceSize = 20 * OneM
DefaultDlChunkSize = 20 * OneM
RetryDelayInSec = 10
CacheSavePeriodInSec = 10 * 60.0
DefaultTimeOutInSeconds=300
# share retries
ShareRapidUploadRetries = 3
DefaultResumeDlRevertCount = 1
DefaultProcessCount = 1
## program switches
CleanOptionShort = '-c'
CleanOptionLong = '--clean'
DisableSslCheckOption = '--disable-ssl-check'
CaCertsOption = '--cacerts'
MultiprocessOption = '--processes'
# vim: tabstop=4 noexpandtab shiftwidth=4 softtabstop=4 ff=unix fileencoding=utf-8
| 39.484127 | 117 | 0.752261 |
fc2bc42ef26b386da5b7f0d4f544a0872c192958 | 430 | py | Python | wsgi/iportalen_django/iportalen/wsgi.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 4 | 2016-09-21T17:06:01.000Z | 2018-02-06T16:36:44.000Z | wsgi/iportalen_django/iportalen/wsgi.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 149 | 2016-03-07T23:50:47.000Z | 2022-03-11T23:16:33.000Z | wsgi/iportalen_django/iportalen/wsgi.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 1 | 2016-03-07T23:02:06.000Z | 2016-03-07T23:02:06.000Z | """
WSGI config for iportalen project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "iportalen.settings")
application = Cling(get_wsgi_application())
| 23.888889 | 78 | 0.790698 |
0acfa5ff604b21eec420212126080f003543ebd0 | 6,414 | py | Python | release_chef_11-14/source/conf.py | jblaine/chef-docs | dc540f7bbc2d3eedb05a74f34b1caf25f1a5d7d3 | [
"CC-BY-3.0"
] | null | null | null | release_chef_11-14/source/conf.py | jblaine/chef-docs | dc540f7bbc2d3eedb05a74f34b1caf25f1a5d7d3 | [
"CC-BY-3.0"
] | 1 | 2021-06-27T17:03:16.000Z | 2021-06-27T17:03:16.000Z | release_chef_11-14/source/conf.py | jblaine/chef-docs | dc540f7bbc2d3eedb05a74f34b1caf25f1a5d7d3 | [
"CC-BY-3.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Chef documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 22 13:50:49 2012.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates', '../../_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Chef 11.14 Documentation'
copyright = u'This work is licensed under a Creative Commons Attribution 3.0 Unported License.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.0.1'
# The full version, including alpha/beta/rc tags.
# release = '0.0.1-1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'emacs'
# highlight_language = 'ruby'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# A string of reStructuredText that will be included at the beginning of every source file that is read.
rst_prolog = """
.. include:: ../../swaps/swap_descriptions.txt
.. include:: ../../swaps/swap_names.txt
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'chefv2'
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../../_themes/']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "chef-client 11.14 Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = "../../images/chef_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = "chef.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# This setting is being used by Chef to override a version # stamp inserted
# at every page bottom, using a string. For example: chef-client 11.6.x. Or:
# Enterprise Chef Server 11.0.x. And yeah, this is just a hack, but it's the
# hack that achieved the desired behavior. Plus, there's 0% chance that we'll
# ever want to insert a datetime stamp in the docs.
html_last_updated_fmt = 'chef-client, version 11.14'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'search': 'chef_search.html',
}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = False
# This is set to "False" because we don't want to show the default copyright, but
# do want to show the custom string defined by the "copyright" general setting (above).
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'chef-client 11.14 Documentation'
| 34.483871 | 104 | 0.730745 |
c1a886aafa71757d5efccb3953d2c30a96a14a34 | 5,175 | py | Python | webdriver/tests/add_cookie/add.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 8 | 2019-04-09T21:13:05.000Z | 2021-11-23T17:25:18.000Z | webdriver/tests/add_cookie/add.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 7 | 2021-03-19T15:21:44.000Z | 2022-03-12T00:50:35.000Z | webdriver/tests/add_cookie/add.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 11 | 2019-04-12T01:20:16.000Z | 2021-11-23T17:25:02.000Z | from datetime import datetime, timedelta
from webdriver.transport import Response
from tests.support.asserts import assert_error, assert_success
from tests.support.helpers import clear_all_cookies
def add_cookie(session, cookie):
return session.transport.send(
"POST", "session/{session_id}/cookie".format(**vars(session)),
{"cookie": cookie})
def test_null_parameter_value(session, http):
path = "/session/{session_id}/cookie".format(**vars(session))
with http.post(path, None) as response:
assert_error(Response.from_http(response), "invalid argument")
def test_null_response_value(session, url):
new_cookie = {
"name": "hello",
"value": "world",
}
session.url = url("/common/blank.html")
clear_all_cookies(session)
response = add_cookie(session, new_cookie)
value = assert_success(response)
assert value is None
def test_no_browsing_context(session, closed_window):
new_cookie = {
"name": "hello",
"value": "world",
}
response = add_cookie(session, new_cookie)
assert_error(response, "no such window")
def test_add_domain_cookie(session, url, server_config):
new_cookie = {
"name": "hello",
"value": "world",
"domain": server_config["browser_host"],
"path": "/",
"httpOnly": False,
"secure": False
}
session.url = url("/common/blank.html")
clear_all_cookies(session)
result = add_cookie(session, new_cookie)
assert_success(result)
cookie = session.cookies("hello")
assert "domain" in cookie
assert isinstance(cookie["domain"], basestring)
assert "name" in cookie
assert isinstance(cookie["name"], basestring)
assert "value" in cookie
assert isinstance(cookie["value"], basestring)
assert cookie["name"] == "hello"
assert cookie["value"] == "world"
assert cookie["domain"] == server_config["browser_host"] or \
cookie["domain"] == ".%s" % server_config["browser_host"]
def test_add_cookie_for_ip(session, url, server_config, configuration):
new_cookie = {
"name": "hello",
"value": "world",
"domain": "127.0.0.1",
"path": "/",
"httpOnly": False,
"secure": False
}
session.url = "http://127.0.0.1:%s/common/blank.html" % (server_config["ports"]["http"][0])
clear_all_cookies(session)
result = add_cookie(session, new_cookie)
assert_success(result)
cookie = session.cookies("hello")
assert "name" in cookie
assert isinstance(cookie["name"], basestring)
assert "value" in cookie
assert isinstance(cookie["value"], basestring)
assert "domain" in cookie
assert isinstance(cookie["domain"], basestring)
assert cookie["name"] == "hello"
assert cookie["value"] == "world"
assert cookie["domain"] == "127.0.0.1"
def test_add_non_session_cookie(session, url):
a_day_from_now = int(
(datetime.utcnow() + timedelta(days=1) - datetime.utcfromtimestamp(0)).total_seconds())
new_cookie = {
"name": "hello",
"value": "world",
"expiry": a_day_from_now
}
session.url = url("/common/blank.html")
clear_all_cookies(session)
result = add_cookie(session, new_cookie)
assert_success(result)
cookie = session.cookies("hello")
assert "name" in cookie
assert isinstance(cookie["name"], basestring)
assert "value" in cookie
assert isinstance(cookie["value"], basestring)
assert "expiry" in cookie
assert isinstance(cookie["expiry"], int)
assert cookie["name"] == "hello"
assert cookie["value"] == "world"
assert cookie["expiry"] == a_day_from_now
def test_add_session_cookie(session, url):
new_cookie = {
"name": "hello",
"value": "world"
}
session.url = url("/common/blank.html")
clear_all_cookies(session)
result = add_cookie(session, new_cookie)
assert_success(result)
cookie = session.cookies("hello")
assert "name" in cookie
assert isinstance(cookie["name"], basestring)
assert "value" in cookie
assert isinstance(cookie["value"], basestring)
if "expiry" in cookie:
assert cookie.get("expiry") is None
assert cookie["name"] == "hello"
assert cookie["value"] == "world"
def test_add_session_cookie_with_leading_dot_character_in_domain(session, url, server_config):
new_cookie = {
"name": "hello",
"value": "world",
"domain": ".%s" % server_config["browser_host"]
}
session.url = url("/common/blank.html")
clear_all_cookies(session)
result = add_cookie(session, new_cookie)
assert_success(result)
cookie = session.cookies("hello")
assert "name" in cookie
assert isinstance(cookie["name"], basestring)
assert "value" in cookie
assert isinstance(cookie["value"], basestring)
assert "domain" in cookie
assert isinstance(cookie["domain"], basestring)
assert cookie["name"] == "hello"
assert cookie["value"] == "world"
assert cookie["domain"] == server_config["browser_host"] or \
cookie["domain"] == ".%s" % server_config["browser_host"]
| 28.59116 | 95 | 0.651401 |
d4766432d997ebac98215125cbbb46fb41d36506 | 2,116 | py | Python | uw_sws/tests/test_course.py | uw-it-aca/uw-restclients-sws | 21c034d75e141223d74c200317f402f9628b6d52 | [
"Apache-2.0"
] | 1 | 2018-06-12T20:32:29.000Z | 2018-06-12T20:32:29.000Z | uw_sws/tests/test_course.py | uw-it-aca/uw-restclients-sws | 21c034d75e141223d74c200317f402f9628b6d52 | [
"Apache-2.0"
] | 156 | 2017-03-02T17:11:34.000Z | 2022-01-03T21:36:23.000Z | uw_sws/tests/test_course.py | uw-it-aca/uw-restclients-sws | 21c034d75e141223d74c200317f402f9628b6d52 | [
"Apache-2.0"
] | 2 | 2017-03-15T22:10:46.000Z | 2018-07-05T20:54:59.000Z | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from unittest import TestCase
from uw_sws.util import fdao_sws_override
from uw_pws.util import fdao_pws_override
from uw_sws.course import validate_course_label, get_course_by_label
from uw_sws.exceptions import InvalidCourseID
@fdao_pws_override
@fdao_sws_override
class SWSTestCourseData(TestCase):
def test_validate(self):
good_label = "2019,winter,CSE,142"
bad_label = "winter,2019,142,CSE"
self.assertRaises(InvalidCourseID, validate_course_label, bad_label)
validate_course_label(good_label)
def test_get_by_label(self):
course = get_course_by_label("2013,spring,CSE,142")
course_resp = {"curriculum_abbr": "CSE",
"course_number": "142",
"course_title": "COMPUTER PRGRMNG I",
"course_title_long": "Computer Programming I",
"course_campus": "Seattle",
"course_description": "Basic programming-in-the-small "
"abilities and concepts including"
" procedural programming (methods"
", parameters, return, values),"
" basic control structures "
"(sequence, if/else, for loop,"
" while loop), file processing,"
" arrays, and an introduction to"
" defining objects. Intended for"
" students without prior "
"programming experience. "
"Offered: AWSpS."}
self.assertEqual(course.json_data(), course_resp)
self.assertIsNone(get_course_by_label("2013,spring,FOO,123"))
self.assertIsNotNone(get_course_by_label("2013,spring,ATMO S,142"))
| 47.022222 | 79 | 0.535917 |
96eb9923cc4831842da1f364585a5acb6af5bf00 | 113,380 | py | Python | salt/utils/cloud.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | null | null | null | salt/utils/cloud.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 1 | 2019-09-06T13:57:28.000Z | 2019-09-06T13:57:28.000Z | salt/utils/cloud.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 1 | 2020-09-30T16:09:48.000Z | 2020-09-30T16:09:48.000Z | # -*- coding: utf-8 -*-
'''
Utility functions for salt.cloud
'''
# Import python libs
from __future__ import absolute_import
import errno
import os
import sys
import stat
import codecs
import shutil
import hashlib
import socket
import tempfile
import time
import subprocess
import multiprocessing
import logging
import pipes
import msgpack
import traceback
import copy
import re
import uuid
# Let's import pwd and catch the ImportError. We'll raise it if this is not
# Windows
try:
import pwd
except ImportError:
if not sys.platform.lower().startswith('win'):
# We can't use salt.utils.is_windows() from the import a little down
# because that will cause issues under windows at install time.
raise
try:
import salt.utils.smb
HAS_SMB = True
except ImportError:
HAS_SMB = False
try:
import winrm
from winrm.exceptions import WinRMTransportError
HAS_WINRM = True
except ImportError:
HAS_WINRM = False
# Import salt libs
import salt.crypt
import salt.client
import salt.config
import salt.utils
import salt.utils.event
from salt.utils import vt
from salt.utils.nb_popen import NonBlockingPopen
from salt.utils.yamldumper import SafeOrderedDumper
from salt.utils.validate.path import is_writeable
# Import salt cloud libs
import salt.cloud
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudSystemExit,
SaltCloudExecutionTimeout,
SaltCloudExecutionFailure,
SaltCloudPasswordError
)
# Import third party libs
import salt.ext.six as six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin,W0611
from jinja2 import Template
import yaml
try:
import getpass
HAS_GETPASS = True
except ImportError:
HAS_GETPASS = False
NSTATES = {
0: 'running',
1: 'rebooting',
2: 'terminated',
3: 'pending',
}
SSH_PASSWORD_PROMP_RE = re.compile(r'(?:.*)[Pp]assword(?: for .*)?:\ *$', re.M)
SSH_PASSWORD_PROMP_SUDO_RE = \
re.compile(r'(?:.*sudo)(?:.*)[Pp]assword(?: for .*)?:', re.M)
# Get logging started
log = logging.getLogger(__name__)
def __render_script(path, vm_=None, opts=None, minion=''):
'''
Return the rendered script
'''
log.info('Rendering deploy script: {0}'.format(path))
try:
with salt.utils.fopen(path, 'r') as fp_:
template = Template(fp_.read())
return str(template.render(opts=opts, vm=vm_, minion=minion))
except AttributeError:
# Specified renderer was not found
with salt.utils.fopen(path, 'r') as fp_:
return fp_.read()
def os_script(os_, vm_=None, opts=None, minion=''):
'''
Return the script as a string for the specific os
'''
if minion:
minion = salt_config_to_yaml(minion)
if os.path.isabs(os_):
# The user provided an absolute path to the deploy script, let's use it
return __render_script(os_, vm_, opts, minion)
if os.path.isabs('{0}.sh'.format(os_)):
# The user provided an absolute path to the deploy script, although no
# extension was provided. Let's use it anyway.
return __render_script('{0}.sh'.format(os_), vm_, opts, minion)
for search_path in opts['deploy_scripts_search_path']:
if os.path.isfile(os.path.join(search_path, os_)):
return __render_script(
os.path.join(search_path, os_), vm_, opts, minion
)
if os.path.isfile(os.path.join(search_path, '{0}.sh'.format(os_))):
return __render_script(
os.path.join(search_path, '{0}.sh'.format(os_)),
vm_, opts, minion
)
# No deploy script was found, return an empty string
return ''
def gen_keys(keysize=2048):
'''
Generate Salt minion keys and return them as PEM file strings
'''
# Mandate that keys are at least 2048 in size
if keysize < 2048:
keysize = 2048
tdir = tempfile.mkdtemp()
salt.crypt.gen_keys(tdir, 'minion', keysize)
priv_path = os.path.join(tdir, 'minion.pem')
pub_path = os.path.join(tdir, 'minion.pub')
with salt.utils.fopen(priv_path) as fp_:
priv = fp_.read()
with salt.utils.fopen(pub_path) as fp_:
pub = fp_.read()
shutil.rmtree(tdir)
return priv, pub
def accept_key(pki_dir, pub, id_):
'''
If the master config was available then we will have a pki_dir key in
the opts directory, this method places the pub key in the accepted
keys dir and removes it from the unaccepted keys dir if that is the case.
'''
for key_dir in 'minions', 'minions_pre', 'minions_rejected':
key_path = os.path.join(pki_dir, key_dir)
if not os.path.exists(key_path):
os.makedirs(key_path)
key = os.path.join(pki_dir, 'minions', id_)
with salt.utils.fopen(key, 'w+') as fp_:
fp_.write(pub)
oldkey = os.path.join(pki_dir, 'minions_pre', id_)
if os.path.isfile(oldkey):
with salt.utils.fopen(oldkey) as fp_:
if fp_.read() == pub:
os.remove(oldkey)
def remove_key(pki_dir, id_):
'''
This method removes a specified key from the accepted keys dir
'''
key = os.path.join(pki_dir, 'minions', id_)
if os.path.isfile(key):
os.remove(key)
log.debug('Deleted \'{0}\''.format(key))
def rename_key(pki_dir, id_, new_id):
'''
Rename a key, when an instance has also been renamed
'''
oldkey = os.path.join(pki_dir, 'minions', id_)
newkey = os.path.join(pki_dir, 'minions', new_id)
if os.path.isfile(oldkey):
os.rename(oldkey, newkey)
def minion_config(opts, vm_):
'''
Return a minion's configuration for the provided options and VM
'''
# Don't start with a copy of the default minion opts; they're not always
# what we need. Some default options are Null, let's set a reasonable default
minion = {
'master': 'salt',
'log_level': 'info',
'hash_type': 'sha256',
}
# Now, let's update it to our needs
minion['id'] = vm_['name']
master_finger = salt.config.get_cloud_config_value('master_finger', vm_, opts)
if master_finger is not None:
minion['master_finger'] = master_finger
minion.update(
# Get ANY defined minion settings, merging data, in the following order
# 1. VM config
# 2. Profile config
# 3. Global configuration
salt.config.get_cloud_config_value(
'minion', vm_, opts, default={}, search_global=True
)
)
make_master = salt.config.get_cloud_config_value('make_master', vm_, opts)
if 'master' not in minion and make_master is not True:
raise SaltCloudConfigError(
'A master setting was not defined in the minion\'s configuration.'
)
# Get ANY defined grains settings, merging data, in the following order
# 1. VM config
# 2. Profile config
# 3. Global configuration
minion.setdefault('grains', {}).update(
salt.config.get_cloud_config_value(
'grains', vm_, opts, default={}, search_global=True
)
)
return minion
def master_config(opts, vm_):
'''
Return a master's configuration for the provided options and VM
'''
# Let's get a copy of the salt master default options
master = copy.deepcopy(salt.config.DEFAULT_MASTER_OPTS)
# Some default options are Null, let's set a reasonable default
master.update(
log_level='info',
log_level_logfile='info',
hash_type='sha256'
)
# Get ANY defined master setting, merging data, in the following order
# 1. VM config
# 2. Profile config
# 3. Global configuration
master.update(
salt.config.get_cloud_config_value(
'master', vm_, opts, default={}, search_global=True
)
)
return master
def salt_config_to_yaml(configuration, line_break='\n'):
'''
Return a salt configuration dictionary, master or minion, as a yaml dump
'''
return yaml.dump(configuration,
line_break=line_break,
default_flow_style=False,
Dumper=SafeOrderedDumper)
def bootstrap(vm_, opts):
'''
This is the primary entry point for logging into any system (POSIX or
Windows) to install Salt. It will make the decision on its own as to which
deploy function to call.
'''
deploy_config = salt.config.get_cloud_config_value(
'deploy',
vm_, opts, default=False)
inline_script_config = salt.config.get_cloud_config_value(
'inline_script',
vm_, opts, default=None)
if deploy_config is False and inline_script_config is None:
return {
'Error': {
'No Deploy': '\'deploy\' is not enabled. Not deploying.'
}
}
key_filename = salt.config.get_cloud_config_value(
'key_filename', vm_, opts, search_global=False,
default=salt.config.get_cloud_config_value(
'ssh_keyfile', vm_, opts, search_global=False, default=None
)
)
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined ssh_keyfile \'{0}\' does not exist'.format(
key_filename
)
)
has_ssh_agent = False
if (opts.get('ssh_agent', False) and
'SSH_AUTH_SOCK' in os.environ and
stat.S_ISSOCK(os.stat(os.environ['SSH_AUTH_SOCK']).st_mode)):
has_ssh_agent = True
if (key_filename is None and
salt.config.get_cloud_config_value(
'password', vm_, opts, default=None
) is None and
salt.config.get_cloud_config_value(
'win_password', vm_, opts, default=None
) is None and
has_ssh_agent is False):
raise SaltCloudSystemExit(
'Cannot deploy Salt in a VM if the \'key_filename\' setting '
'is not set and there is no password set for the VM. '
'Check the provider docs for \'change_password\' option if it '
'is supported by your provider.'
)
ret = {}
minion_conf = salt.utils.cloud.minion_config(opts, vm_)
deploy_script_code = os_script(
salt.config.get_cloud_config_value(
'os', vm_, opts, default='bootstrap-salt'
),
vm_, opts, minion_conf
)
ssh_username = salt.config.get_cloud_config_value(
'ssh_username', vm_, opts, default='root'
)
if 'file_transport' not in opts:
opts['file_transport'] = vm_.get('file_transport', 'sftp')
# If we haven't generated any keys yet, do so now.
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating keys for \'{0[name]}\''.format(vm_))
vm_['priv_key'], vm_['pub_key'] = gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
opts
)
)
key_id = vm_.get('name')
if 'append_domain' in vm_:
key_id = '.'.join([key_id, vm_['append_domain']])
accept_key(
opts['pki_dir'], vm_['pub_key'], key_id
)
if 'os' not in vm_:
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
opts
)
# NOTE: deploy_kwargs is also used to pass inline_script variable content
# to run_inline_script function
deploy_kwargs = {
'opts': opts,
'host': vm_['ssh_host'],
'port': salt.config.get_cloud_config_value(
'ssh_port', vm_, opts, default=22
),
'salt_host': vm_.get('salt_host', vm_['ssh_host']),
'username': ssh_username,
'script': deploy_script_code,
'inline_script': inline_script_config,
'name': vm_['name'],
'has_ssh_agent': has_ssh_agent,
'tmp_dir': salt.config.get_cloud_config_value(
'tmp_dir', vm_, opts, default='/tmp/.saltcloud'
),
'deploy_command': salt.config.get_cloud_config_value(
'deploy_command', vm_, opts,
default='/tmp/.saltcloud/deploy.sh',
),
'start_action': opts['start_action'],
'parallel': opts['parallel'],
'sock_dir': opts['sock_dir'],
'conf_file': opts['conf_file'],
'minion_pem': vm_['priv_key'],
'minion_pub': vm_['pub_key'],
'master_sign_pub_file': salt.config.get_cloud_config_value(
'master_sign_pub_file', vm_, opts, default=None),
'keep_tmp': opts['keep_tmp'],
'sudo': salt.config.get_cloud_config_value(
'sudo', vm_, opts, default=(ssh_username != 'root')
),
'sudo_password': salt.config.get_cloud_config_value(
'sudo_password', vm_, opts, default=None
),
'tty': salt.config.get_cloud_config_value(
'tty', vm_, opts, default=True
),
'password': salt.config.get_cloud_config_value(
'password', vm_, opts, search_global=False
),
'key_filename': key_filename,
'script_args': salt.config.get_cloud_config_value(
'script_args', vm_, opts
),
'script_env': salt.config.get_cloud_config_value(
'script_env', vm_, opts
),
'minion_conf': minion_conf,
'preseed_minion_keys': vm_.get('preseed_minion_keys', None),
'display_ssh_output': salt.config.get_cloud_config_value(
'display_ssh_output', vm_, opts, default=True
),
'known_hosts_file': salt.config.get_cloud_config_value(
'known_hosts_file', vm_, opts, default='/dev/null'
),
'file_map': salt.config.get_cloud_config_value(
'file_map', vm_, opts, default=None
),
'maxtries': salt.config.get_cloud_config_value(
'wait_for_passwd_maxtries', vm_, opts, default=15
),
}
inline_script_kwargs = deploy_kwargs
# forward any info about possible ssh gateway to deploy script
# as some providers need also a 'gateway' configuration
if 'gateway' in vm_:
deploy_kwargs.update({'gateway': vm_['gateway']})
# Deploy salt-master files, if necessary
if salt.config.get_cloud_config_value('make_master', vm_, opts) is True:
deploy_kwargs['make_master'] = True
deploy_kwargs['master_pub'] = vm_['master_pub']
deploy_kwargs['master_pem'] = vm_['master_pem']
master_conf = salt.utils.cloud.master_config(opts, vm_)
deploy_kwargs['master_conf'] = master_conf
if master_conf.get('syndic_master', None):
deploy_kwargs['make_syndic'] = True
deploy_kwargs['make_minion'] = salt.config.get_cloud_config_value(
'make_minion', vm_, opts, default=True
)
win_installer = salt.config.get_cloud_config_value(
'win_installer', vm_, opts
)
if win_installer:
deploy_kwargs['port'] = salt.config.get_cloud_config_value(
'smb_port', vm_, opts, default=445
)
deploy_kwargs['win_installer'] = win_installer
minion = salt.utils.cloud.minion_config(opts, vm_)
deploy_kwargs['master'] = minion['master']
deploy_kwargs['username'] = salt.config.get_cloud_config_value(
'win_username', vm_, opts, default='Administrator'
)
win_pass = salt.config.get_cloud_config_value(
'win_password', vm_, opts, default=''
)
if win_pass:
deploy_kwargs['password'] = win_pass
deploy_kwargs['use_winrm'] = salt.config.get_cloud_config_value(
'use_winrm', vm_, opts, default=False
)
deploy_kwargs['winrm_port'] = salt.config.get_cloud_config_value(
'winrm_port', vm_, opts, default=5986
)
# Store what was used to the deploy the VM
event_kwargs = copy.deepcopy(deploy_kwargs)
del event_kwargs['opts']
del event_kwargs['minion_pem']
del event_kwargs['minion_pub']
del event_kwargs['sudo_password']
if 'password' in event_kwargs:
del event_kwargs['password']
ret['deploy_kwargs'] = event_kwargs
fire_event(
'event',
'executing deploy script',
'salt/cloud/{0}/deploying'.format(vm_['name']),
args={'kwargs': event_kwargs},
sock_dir=opts.get(
'sock_dir',
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)
if inline_script_config and deploy_config is False:
inline_script_deployed = run_inline_script(**inline_script_kwargs)
if inline_script_deployed is not False:
log.info('Inline script(s) ha(s|ve) run on {0}'.format(vm_['name']))
ret['deployed'] = False
return ret
else:
if win_installer:
deployed = deploy_windows(**deploy_kwargs)
else:
deployed = deploy_script(**deploy_kwargs)
if inline_script_config:
inline_script_deployed = run_inline_script(**inline_script_kwargs)
if inline_script_deployed is not False:
log.info('Inline script(s) ha(s|ve) run on {0}'.format(vm_['name']))
if deployed is not False:
ret['deployed'] = True
if deployed is not True:
ret.update(deployed)
log.info('Salt installed on {0}'.format(vm_['name']))
return ret
log.error('Failed to start Salt on host {0}'.format(vm_['name']))
return {
'Error': {
'Not Deployed': 'Failed to start Salt on host {0}'.format(
vm_['name']
)
}
}
def ssh_usernames(vm_, opts, default_users=None):
'''
Return the ssh_usernames. Defaults to a built-in list of users for trying.
'''
if default_users is None:
default_users = ['root']
usernames = salt.config.get_cloud_config_value(
'ssh_username', vm_, opts
)
if not isinstance(usernames, list):
usernames = [usernames]
# get rid of None's or empty names
usernames = [x for x in usernames if x]
# Keep a copy of the usernames the user might have provided
initial = usernames[:]
# Add common usernames to the list to be tested
for name in default_users:
if name not in usernames:
usernames.append(name)
# Add the user provided usernames to the end of the list since enough time
# might need to pass before the remote service is available for logins and
# the proper username might have passed its iteration.
# This has detected in a CentOS 5.7 EC2 image
usernames.extend(initial)
return usernames
def wait_for_fun(fun, timeout=900, **kwargs):
'''
Wait until a function finishes, or times out
'''
start = time.time()
log.debug('Attempting function {0}'.format(fun))
trycount = 0
while True:
trycount += 1
try:
response = fun(**kwargs)
if not isinstance(response, bool):
return response
except Exception as exc:
log.debug('Caught exception in wait_for_fun: {0}'.format(exc))
time.sleep(1)
log.debug(
'Retrying function {0} on (try {1})'.format(
fun, trycount
)
)
if time.time() - start > timeout:
log.error('Function timed out: {0}'.format(timeout))
return False
def wait_for_port(host, port=22, timeout=900, gateway=None):
'''
Wait until a connection to the specified port can be made on a specified
host. This is usually port 22 (for SSH), but in the case of Windows
installations, it might be port 445 (for winexe). It may also be an
alternate port for SSH, depending on the base image.
'''
start = time.time()
# Assign test ports because if a gateway is defined
# we first want to test the gateway before the host.
test_ssh_host = host
test_ssh_port = port
if gateway:
ssh_gateway = gateway['ssh_gateway']
ssh_gateway_port = 22
if ':' in ssh_gateway:
ssh_gateway, ssh_gateway_port = ssh_gateway.split(':')
if 'ssh_gateway_port' in gateway:
ssh_gateway_port = gateway['ssh_gateway_port']
test_ssh_host = ssh_gateway
test_ssh_port = ssh_gateway_port
log.debug(
'Attempting connection to host {0} on port {1} '
'via gateway {2} on port {3}'.format(
host, port, ssh_gateway, ssh_gateway_port
)
)
else:
log.debug(
'Attempting connection to host {0} on port {1}'.format(
host, port
)
)
trycount = 0
while True:
trycount += 1
try:
if socket.inet_pton(socket.AF_INET6, host):
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(5)
sock.connect((test_ssh_host, int(test_ssh_port)))
# Stop any remaining reads/writes on the socket
sock.shutdown(socket.SHUT_RDWR)
# Close it!
sock.close()
break
except socket.error as exc:
log.debug('Caught exception in wait_for_port: {0}'.format(exc))
time.sleep(1)
if time.time() - start > timeout:
log.error('Port connection timed out: {0}'.format(timeout))
return False
if not gateway:
log.debug(
'Retrying connection to host {0} on port {1} '
'(try {2})'.format(
test_ssh_host, test_ssh_port, trycount
)
)
else:
log.debug(
'Retrying connection to Gateway {0} on port {1} '
'(try {2})'.format(
test_ssh_host, test_ssh_port, trycount
)
)
if not gateway:
return True
# Let the user know that his gateway is good!
log.debug(
'Gateway {0} on port {1} is reachable.'.format(
test_ssh_host, test_ssh_port
)
)
# Now we need to test the host via the gateway.
# We will use netcat on the gateway to test the port
ssh_args = []
ssh_args.extend([
# Don't add new hosts to the host key database
'-oStrictHostKeyChecking=no',
# Set hosts key database path to /dev/null, i.e., non-existing
'-oUserKnownHostsFile=/dev/null',
# Don't re-use the SSH connection. Less failures.
'-oControlPath=none'
])
# There should never be both a password and an ssh key passed in, so
if 'ssh_gateway_key' in gateway:
ssh_args.extend([
# tell SSH to skip password authentication
'-oPasswordAuthentication=no',
'-oChallengeResponseAuthentication=no',
# Make sure public key authentication is enabled
'-oPubkeyAuthentication=yes',
# do only use the provided identity file
'-oIdentitiesOnly=yes',
# No Keyboard interaction!
'-oKbdInteractiveAuthentication=no',
# Also, specify the location of the key file
'-i {0}'.format(gateway['ssh_gateway_key'])
])
# Netcat command testing remote port
command = 'nc -z -w5 -q0 {0} {1}'.format(host, port)
# SSH command
pcmd = 'ssh {0} {1}@{2} -p {3} {4}'.format(
' '.join(ssh_args), gateway['ssh_gateway_user'], ssh_gateway,
ssh_gateway_port, pipes.quote('date')
)
cmd = 'ssh {0} {1}@{2} -p {3} {4}'.format(
' '.join(ssh_args), gateway['ssh_gateway_user'], ssh_gateway,
ssh_gateway_port, pipes.quote(command)
)
log.debug('SSH command: \'{0}\''.format(cmd))
kwargs = {'display_ssh_output': False,
'password': gateway.get('ssh_gateway_password', None)}
trycount = 0
usable_gateway = False
gateway_retries = 5
while True:
trycount += 1
# test gateway usage
if not usable_gateway:
pstatus = _exec_ssh_cmd(pcmd, allow_failure=True, **kwargs)
if pstatus == 0:
usable_gateway = True
else:
gateway_retries -= 1
log.error(
'Gateway usage seems to be broken, '
'password error ? Tries left: {0}'.format(gateway_retries))
if not gateway_retries:
raise SaltCloudExecutionFailure(
'SSH gateway is reachable but we can not login')
# then try to reach out the target
if usable_gateway:
status = _exec_ssh_cmd(cmd, allow_failure=True, **kwargs)
# Get the exit code of the SSH command.
# If 0 then the port is open.
if status == 0:
return True
time.sleep(1)
if time.time() - start > timeout:
log.error('Port connection timed out: {0}'.format(timeout))
return False
log.debug(
'Retrying connection to host {0} on port {1} '
'via gateway {2} on port {3}. (try {4})'.format(
host, port, ssh_gateway, ssh_gateway_port,
trycount
)
)
def wait_for_winexesvc(host, port, username, password, timeout=900):
'''
Wait until winexe connection can be established.
'''
start = time.time()
log.debug(
'Attempting winexe connection to host {0} on port {1}'.format(
host,
port
)
)
creds = "-U '{0}%{1}' //{2}".format(
username,
password,
host
)
logging_creds = "-U '{0}%XXX-REDACTED-XXX' //{1}".format(
username,
host
)
try_count = 0
while True:
try_count += 1
try:
# Shell out to winexe to check %TEMP%
ret_code = win_cmd(
'winexe {0} "sc query winexesvc"'.format(creds),
logging_command=logging_creds
)
if ret_code == 0:
log.debug('winexe connected...')
return True
log.debug('Return code was {0}'.format(ret_code))
time.sleep(1)
except socket.error as exc:
log.debug('Caught exception in wait_for_winexesvc: {0}'.format(exc))
time.sleep(1)
if time.time() - start > timeout:
log.error('winexe connection timed out: {0}'.format(timeout))
return False
log.debug(
'Retrying winexe connection to host {0} on port {1} '
'(try {2})'.format(
host,
port,
try_count
)
)
def wait_for_winrm(host, port, username, password, timeout=900):
'''
Wait until WinRM connection can be established.
'''
start = time.time()
log.debug(
'Attempting WinRM connection to host {0} on port {1}'.format(
host, port
)
)
trycount = 0
while True:
trycount += 1
try:
s = winrm.Session(host, auth=(username, password), transport='ssl')
if hasattr(s.protocol, 'set_timeout'):
s.protocol.set_timeout(15)
log.trace('WinRM endpoint url: {0}'.format(s.url))
r = s.run_cmd('sc query winrm')
if r.status_code == 0:
log.debug('WinRM session connected...')
return s
log.debug('Return code was {0}'.format(r.status_code))
time.sleep(1)
except WinRMTransportError as exc:
log.debug('Caught exception in wait_for_winrm: {0}'.format(exc))
if time.time() - start > timeout:
log.error('WinRM connection timed out: {0}'.format(timeout))
return None
log.debug(
'Retrying WinRM connection to host {0} on port {1} '
'(try {2})'.format(
host, port, trycount
)
)
time.sleep(1)
def validate_windows_cred(host,
username='Administrator',
password=None,
retries=10,
retry_delay=1):
'''
Check if the windows credentials are valid
'''
cmd = "winexe -U '{0}%{1}' //{2} \"hostname\"".format(
username,
password,
host
)
logging_cmd = "winexe -U '{0}%XXX-REDACTED-XXX' //{1} \"hostname\"".format(
username,
host
)
for i in xrange(retries):
ret_code = win_cmd(
cmd,
logging_command=logging_cmd
)
if ret_code == 0:
break
time.sleep(retry_delay)
return ret_code == 0
def wait_for_passwd(host, port=22, ssh_timeout=15, username='root',
password=None, key_filename=None, maxtries=15,
trysleep=1, display_ssh_output=True, gateway=None,
known_hosts_file='/dev/null', hard_timeout=None):
'''
Wait until ssh connection can be accessed via password or ssh key
'''
trycount = 0
while trycount < maxtries:
connectfail = False
try:
kwargs = {'hostname': host,
'port': port,
'username': username,
'password_retries': maxtries,
'timeout': ssh_timeout,
'display_ssh_output': display_ssh_output,
'known_hosts_file': known_hosts_file,
'ssh_timeout': ssh_timeout,
'hard_timeout': hard_timeout}
if gateway:
kwargs['ssh_gateway'] = gateway['ssh_gateway']
kwargs['ssh_gateway_key'] = gateway['ssh_gateway_key']
kwargs['ssh_gateway_user'] = gateway['ssh_gateway_user']
if key_filename:
if not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined key_filename \'{0}\' does not exist'.format(
key_filename
)
)
kwargs['key_filename'] = key_filename
log.debug('Using {0} as the key_filename'.format(key_filename))
elif password:
kwargs['password'] = password
log.debug('Using password authentication')
trycount += 1
log.debug(
'Attempting to authenticate as {0} (try {1} of {2})'.format(
username, trycount, maxtries
)
)
status = root_cmd('date', tty=False, sudo=False, **kwargs)
if status != 0:
connectfail = True
if trycount < maxtries:
time.sleep(trysleep)
continue
log.error(
'Authentication failed: status code {0}'.format(
status
)
)
return False
if connectfail is False:
return True
return False
except SaltCloudPasswordError:
raise
except Exception:
if trycount >= maxtries:
return False
time.sleep(trysleep)
def deploy_windows(host,
port=445,
timeout=900,
username='Administrator',
password=None,
name=None,
sock_dir=None,
conf_file=None,
start_action=None,
parallel=False,
minion_pub=None,
minion_pem=None,
minion_conf=None,
keep_tmp=False,
script_args=None,
script_env=None,
port_timeout=15,
preseed_minion_keys=None,
win_installer=None,
master=None,
tmp_dir='C:\\salttmp',
opts=None,
master_sign_pub_file=None,
use_winrm=False,
winrm_port=5986,
**kwargs):
'''
Copy the install files to a remote Windows box, and execute them
'''
if not isinstance(opts, dict):
opts = {}
if use_winrm and not HAS_WINRM:
log.error('WinRM requested but module winrm could not be imported')
return False
starttime = time.mktime(time.localtime())
log.debug('Deploying {0} at {1} (Windows)'.format(host, starttime))
log.trace('HAS_WINRM: {0}, use_winrm: {1}'.format(HAS_WINRM, use_winrm))
port_available = wait_for_port(host=host, port=port, timeout=port_timeout * 60)
if not port_available:
return False
service_available = False
winrm_session = None
if HAS_WINRM and use_winrm:
winrm_session = wait_for_winrm(host=host, port=winrm_port,
username=username, password=password,
timeout=port_timeout * 60)
if winrm_session is not None:
service_available = True
else:
service_available = wait_for_winexesvc(host=host, port=port,
username=username, password=password,
timeout=port_timeout * 60)
if port_available and service_available:
log.debug('SMB port {0} on {1} is available'.format(port, host))
log.debug(
'Logging into {0}:{1} as {2}'.format(
host, port, username
)
)
newtimeout = timeout - (time.mktime(time.localtime()) - starttime)
smb_conn = salt.utils.smb.get_conn(host, username, password)
if smb_conn is False:
log.error('Please install impacket to enable SMB functionality')
return False
creds = "-U '{0}%{1}' //{2}".format(
username,
password,
host
)
logging_creds = "-U '{0}%XXX-REDACTED-XXX' //{1}".format(
username,
host
)
salt.utils.smb.mkdirs('salttemp', conn=smb_conn)
salt.utils.smb.mkdirs('salt/conf/pki/minion', conn=smb_conn)
# minion_pub, minion_pem
kwargs = {'hostname': host,
'creds': creds}
if minion_pub:
salt.utils.smb.put_str(minion_pub, 'salt\\conf\\pki\\minion\\minion.pub', conn=smb_conn)
if minion_pem:
salt.utils.smb.put_str(minion_pem, 'salt\\conf\\pki\\minion\\minion.pem', conn=smb_conn)
if master_sign_pub_file:
# Read master-sign.pub file
log.debug("Copying master_sign.pub file from {0} to minion".format(master_sign_pub_file))
try:
with salt.utils.fopen(master_sign_pub_file, 'rb') as master_sign_fh:
smb_conn.putFile('C$', 'salt\\conf\\pki\\minion\\master_sign.pub', master_sign_fh.read)
except Exception as e:
log.debug("Exception copying master_sign.pub file {0} to minion".format(master_sign_pub_file))
# Copy over win_installer
# win_installer refers to a file such as:
# /root/Salt-Minion-0.17.0-win32-Setup.exe
# ..which exists on the same machine as salt-cloud
comps = win_installer.split('/')
local_path = '/'.join(comps[:-1])
installer = comps[-1]
with salt.utils.fopen(win_installer, 'rb') as inst_fh:
smb_conn.putFile('C$', 'salttemp/{0}'.format(installer), inst_fh.read)
if use_winrm:
winrm_cmd(winrm_session, 'c:\\salttemp\\{0}'.format(installer), ['/S', '/master={0}'.format(master),
'/minion-name={0}'.format(name)]
)
else:
# Shell out to winexe to execute win_installer
# We don't actually need to set the master and the minion here since
# the minion config file will be set next via impacket
cmd = 'winexe {0} "c:\\salttemp\\{1} /S /master={2} /minion-name={3}"'.format(
creds,
installer,
master,
name
)
logging_cmd = 'winexe {0} "c:\\salttemp\\{1} /S /master={2} /minion-name={3}"'.format(
logging_creds,
installer,
master,
name
)
win_cmd(cmd, logging_command=logging_cmd)
# Copy over minion_conf
if minion_conf:
if not isinstance(minion_conf, dict):
# Let's not just fail regarding this change, specially
# since we can handle it
raise DeprecationWarning(
'`salt.utils.cloud.deploy_windows` now only accepts '
'dictionaries for its `minion_conf` parameter. '
'Loading YAML...'
)
minion_grains = minion_conf.pop('grains', {})
if minion_grains:
salt.utils.smb.put_str(
salt_config_to_yaml(minion_grains, line_break='\r\n'),
'salt\\conf\\grains',
conn=smb_conn
)
# Add special windows minion configuration
# that must be in the minion config file
windows_minion_conf = {
'ipc_mode': 'tcp',
'root_dir': 'c:\\salt',
'pki_dir': '/conf/pki/minion',
'multiprocessing': False,
}
minion_conf = dict(minion_conf, **windows_minion_conf)
salt.utils.smb.put_str(
salt_config_to_yaml(minion_conf, line_break='\r\n'),
'salt\\conf\\minion',
conn=smb_conn
)
# Delete C:\salttmp\ and installer file
# Unless keep_tmp is True
if not keep_tmp:
smb_conn.deleteFile('C$', 'salttemp/{0}'.format(installer))
smb_conn.deleteDirectory('C$', 'salttemp')
# Shell out to winexe to ensure salt-minion service started
if use_winrm:
winrm_cmd(winrm_session, 'sc', ['stop', 'salt-minion'])
time.sleep(5)
winrm_cmd(winrm_session, 'sc', ['start', 'salt-minion'])
else:
stop_cmd = 'winexe {0} "sc stop salt-minion"'.format(
creds
)
logging_stop_cmd = 'winexe {0} "sc stop salt-minion"'.format(
logging_creds
)
win_cmd(stop_cmd, logging_command=logging_stop_cmd)
time.sleep(5)
start_cmd = 'winexe {0} "sc start salt-minion"'.format(creds)
logging_start_cmd = 'winexe {0} "sc start salt-minion"'.format(
logging_creds
)
win_cmd(start_cmd, logging_command=logging_start_cmd)
# Fire deploy action
fire_event(
'event',
'{0} has been deployed at {1}'.format(name, host),
'salt/cloud/{0}/deploy_windows'.format(name),
args={'name': name},
sock_dir=opts.get(
'sock_dir',
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)
return True
return False
def deploy_script(host,
port=22,
timeout=900,
username='root',
password=None,
key_filename=None,
script=None,
name=None,
sock_dir=None,
provider=None,
conf_file=None,
start_action=None,
make_master=False,
master_pub=None,
master_pem=None,
master_conf=None,
minion_pub=None,
minion_pem=None,
minion_conf=None,
keep_tmp=False,
script_args=None,
script_env=None,
ssh_timeout=15,
maxtries=15,
make_syndic=False,
make_minion=True,
display_ssh_output=True,
preseed_minion_keys=None,
parallel=False,
sudo_password=None,
sudo=False,
tty=None,
deploy_command='/tmp/.saltcloud/deploy.sh',
opts=None,
tmp_dir='/tmp/.saltcloud',
file_map=None,
master_sign_pub_file=None,
**kwargs):
'''
Copy a deploy script to a remote server, execute it, and remove it
'''
if not isinstance(opts, dict):
opts = {}
tmp_dir = '{0}-{1}'.format(tmp_dir.rstrip('/'), uuid.uuid4())
deploy_command = os.path.join(tmp_dir, 'deploy.sh')
if key_filename is not None and not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined key_filename \'{0}\' does not exist'.format(
key_filename
)
)
gateway = None
if 'gateway' in kwargs:
gateway = kwargs['gateway']
starttime = time.mktime(time.localtime())
log.debug('Deploying {0} at {1}'.format(host, starttime))
known_hosts_file = kwargs.get('known_hosts_file', '/dev/null')
hard_timeout = opts.get('hard_timeout', None)
if wait_for_port(host=host, port=port, gateway=gateway):
log.debug('SSH port {0} on {1} is available'.format(port, host))
if wait_for_passwd(host, port=port, username=username,
password=password, key_filename=key_filename,
ssh_timeout=ssh_timeout,
display_ssh_output=display_ssh_output,
gateway=gateway, known_hosts_file=known_hosts_file,
maxtries=maxtries, hard_timeout=hard_timeout):
log.debug(
'Logging into {0}:{1} as {2}'.format(
host, port, username
)
)
ssh_kwargs = {
'hostname': host,
'port': port,
'username': username,
'timeout': ssh_timeout,
'display_ssh_output': display_ssh_output,
'sudo_password': sudo_password,
'sftp': opts.get('use_sftp', False)
}
if gateway:
ssh_kwargs['ssh_gateway'] = gateway['ssh_gateway']
ssh_kwargs['ssh_gateway_key'] = gateway['ssh_gateway_key']
ssh_kwargs['ssh_gateway_user'] = gateway['ssh_gateway_user']
if key_filename:
log.debug('Using {0} as the key_filename'.format(key_filename))
ssh_kwargs['key_filename'] = key_filename
elif password and kwargs.get('has_ssh_agent', False) is False:
ssh_kwargs['password'] = password
if root_cmd('test -e \'{0}\''.format(tmp_dir), tty, sudo,
allow_failure=True, **ssh_kwargs):
ret = root_cmd(('sh -c "( mkdir -p -m 700 \'{0}\' )"').format(tmp_dir),
tty, sudo, **ssh_kwargs)
if ret:
raise SaltCloudSystemExit(
'Can\'t create temporary '
'directory in {0} !'.format(tmp_dir)
)
if sudo:
comps = tmp_dir.lstrip('/').rstrip('/').split('/')
if len(comps) > 0:
if len(comps) > 1 or comps[0] != 'tmp':
ret = root_cmd(
'chown {0} "{1}"'.format(username, tmp_dir),
tty, sudo, **ssh_kwargs
)
if ret:
raise SaltCloudSystemExit(
'Cant set {0} ownership on {1}'.format(
username, tmp_dir))
if not isinstance(file_map, dict):
file_map = {}
# Copy an arbitrary group of files to the target system
remote_dirs = []
file_map_success = []
file_map_fail = []
for map_item in file_map:
local_file = map_item
remote_file = file_map[map_item]
if not os.path.exists(map_item):
log.error(
'The local file "{0}" does not exist, and will not be '
'copied to "{1}" on the target system'.format(
local_file, remote_file
)
)
file_map_fail.append({local_file: remote_file})
continue
if os.path.isdir(local_file):
dir_name = os.path.basename(local_file)
remote_dir = os.path.join(os.path.dirname(remote_file),
dir_name)
else:
remote_dir = os.path.dirname(remote_file)
if remote_dir not in remote_dirs:
root_cmd('mkdir -p \'{0}\''.format(remote_dir), tty, sudo, **ssh_kwargs)
if ssh_kwargs['username'] != 'root':
root_cmd(
'chown {0} \'{1}\''.format(
ssh_kwargs['username'], remote_dir
),
tty, sudo, **ssh_kwargs
)
remote_dirs.append(remote_dir)
ssh_file(
opts, remote_file, kwargs=ssh_kwargs, local_file=local_file
)
file_map_success.append({local_file: remote_file})
# Minion configuration
if minion_pem:
ssh_file(opts, '{0}/minion.pem'.format(tmp_dir), minion_pem, ssh_kwargs)
ret = root_cmd('chmod 600 \'{0}/minion.pem\''.format(tmp_dir),
tty, sudo, **ssh_kwargs)
if ret:
raise SaltCloudSystemExit(
'Can\'t set perms on {0}/minion.pem'.format(tmp_dir))
if minion_pub:
ssh_file(opts, '{0}/minion.pub'.format(tmp_dir), minion_pub, ssh_kwargs)
if master_sign_pub_file:
ssh_file(opts, '{0}/master_sign.pub'.format(tmp_dir), kwargs=ssh_kwargs, local_file=master_sign_pub_file)
if minion_conf:
if not isinstance(minion_conf, dict):
# Let's not just fail regarding this change, specially
# since we can handle it
raise DeprecationWarning(
'`salt.utils.cloud.deploy_script now only accepts '
'dictionaries for it\'s `minion_conf` parameter. '
'Loading YAML...'
)
minion_grains = minion_conf.pop('grains', {})
if minion_grains:
ssh_file(
opts,
'{0}/grains'.format(tmp_dir),
salt_config_to_yaml(minion_grains),
ssh_kwargs
)
ssh_file(
opts,
'{0}/minion'.format(tmp_dir),
salt_config_to_yaml(minion_conf),
ssh_kwargs
)
# Master configuration
if master_pem:
ssh_file(opts, '{0}/master.pem'.format(tmp_dir), master_pem, ssh_kwargs)
ret = root_cmd('chmod 600 \'{0}/master.pem\''.format(tmp_dir),
tty, sudo, **ssh_kwargs)
if ret:
raise SaltCloudSystemExit(
'Cant set perms on {0}/master.pem'.format(tmp_dir))
if master_pub:
ssh_file(opts, '{0}/master.pub'.format(tmp_dir), master_pub, ssh_kwargs)
if master_conf:
if not isinstance(master_conf, dict):
# Let's not just fail regarding this change, specially
# since we can handle it
raise DeprecationWarning(
'`salt.utils.cloud.deploy_script now only accepts '
'dictionaries for it\'s `master_conf` parameter. '
'Loading from YAML ...'
)
ssh_file(
opts,
'{0}/master'.format(tmp_dir),
salt_config_to_yaml(master_conf),
ssh_kwargs
)
# XXX: We need to make these paths configurable
preseed_minion_keys_tempdir = '{0}/preseed-minion-keys'.format(
tmp_dir)
if preseed_minion_keys is not None:
# Create remote temp dir
ret = root_cmd(
'mkdir \'{0}\''.format(preseed_minion_keys_tempdir),
tty, sudo, **ssh_kwargs
)
if ret:
raise SaltCloudSystemExit(
'Cant create {0}'.format(preseed_minion_keys_tempdir))
ret = root_cmd(
'chmod 700 \'{0}\''.format(preseed_minion_keys_tempdir),
tty, sudo, **ssh_kwargs
)
if ret:
raise SaltCloudSystemExit(
'Can\'t set perms on {0}'.format(
preseed_minion_keys_tempdir))
if ssh_kwargs['username'] != 'root':
root_cmd(
'chown {0} \'{1}\''.format(
ssh_kwargs['username'], preseed_minion_keys_tempdir
),
tty, sudo, **ssh_kwargs
)
# Copy pre-seed minion keys
for minion_id, minion_key in six.iteritems(preseed_minion_keys):
rpath = os.path.join(
preseed_minion_keys_tempdir, minion_id
)
ssh_file(opts, rpath, minion_key, ssh_kwargs)
if ssh_kwargs['username'] != 'root':
root_cmd(
'chown -R root \'{0}\''.format(
preseed_minion_keys_tempdir
),
tty, sudo, **ssh_kwargs
)
if ret:
raise SaltCloudSystemExit(
'Can\'t set ownership for {0}'.format(
preseed_minion_keys_tempdir))
# The actual deploy script
if script:
# got strange escaping issues with sudoer, going onto a
# subshell fixes that
ssh_file(opts, '{0}/deploy.sh'.format(tmp_dir), script, ssh_kwargs)
ret = root_cmd(
('sh -c "( chmod +x \'{0}/deploy.sh\' )";'
'exit $?').format(tmp_dir),
tty, sudo, **ssh_kwargs)
if ret:
raise SaltCloudSystemExit(
'Can\'t set perms on {0}/deploy.sh'.format(tmp_dir))
newtimeout = timeout - (time.mktime(time.localtime()) - starttime)
queue = None
process = None
# Consider this code experimental. It causes Salt Cloud to wait
# for the minion to check in, and then fire a startup event.
# Disabled if parallel because it doesn't work!
if start_action and not parallel:
queue = multiprocessing.Queue()
process = multiprocessing.Process(
target=check_auth, kwargs=dict(
name=name, sock_dir=sock_dir,
timeout=newtimeout, queue=queue
)
)
log.debug('Starting new process to wait for salt-minion')
process.start()
# Run the deploy script
if script:
if 'bootstrap-salt' in script:
deploy_command += ' -c \'{0}\''.format(tmp_dir)
if make_syndic is True:
deploy_command += ' -S'
if make_master is True:
deploy_command += ' -M'
if make_minion is False:
deploy_command += ' -N'
if keep_tmp is True:
deploy_command += ' -K'
if preseed_minion_keys is not None:
deploy_command += ' -k \'{0}\''.format(
preseed_minion_keys_tempdir
)
if script_args:
deploy_command += ' {0}'.format(script_args)
if script_env:
if not isinstance(script_env, dict):
raise SaltCloudSystemExit(
'The \'script_env\' configuration setting NEEDS '
'to be a dictionary not a {0}'.format(
type(script_env)
)
)
environ_script_contents = ['#!/bin/sh']
for key, value in six.iteritems(script_env):
environ_script_contents.append(
'setenv {0} \'{1}\' >/dev/null 2>&1 || '
'export {0}=\'{1}\''.format(key, value)
)
environ_script_contents.append(deploy_command)
# Upload our environ setter wrapper
ssh_file(
opts,
'{0}/environ-deploy-wrapper.sh'.format(tmp_dir),
'\n'.join(environ_script_contents),
ssh_kwargs
)
root_cmd(
'chmod +x \'{0}/environ-deploy-wrapper.sh\''.format(tmp_dir),
tty, sudo, **ssh_kwargs
)
# The deploy command is now our wrapper
deploy_command = '\'{0}/environ-deploy-wrapper.sh\''.format(
tmp_dir,
)
if root_cmd(deploy_command, tty, sudo, **ssh_kwargs) != 0:
raise SaltCloudSystemExit(
'Executing the command \'{0}\' failed'.format(
deploy_command
)
)
log.debug('Executed command \'{0}\''.format(deploy_command))
# Remove the deploy script
if not keep_tmp:
root_cmd('rm -f \'{0}/deploy.sh\''.format(tmp_dir),
tty, sudo, **ssh_kwargs)
log.debug('Removed {0}/deploy.sh'.format(tmp_dir))
if script_env:
root_cmd(
'rm -f \'{0}/environ-deploy-wrapper.sh\''.format(
tmp_dir
),
tty, sudo, **ssh_kwargs
)
log.debug(
'Removed {0}/environ-deploy-wrapper.sh'.format(
tmp_dir
)
)
if keep_tmp:
log.debug(
'Not removing deployment files from {0}/'.format(tmp_dir)
)
else:
# Remove minion configuration
if minion_pub:
root_cmd('rm -f \'{0}/minion.pub\''.format(tmp_dir),
tty, sudo, **ssh_kwargs)
log.debug('Removed {0}/minion.pub'.format(tmp_dir))
if minion_pem:
root_cmd('rm -f \'{0}/minion.pem\''.format(tmp_dir),
tty, sudo, **ssh_kwargs)
log.debug('Removed {0}/minion.pem'.format(tmp_dir))
if minion_conf:
root_cmd('rm -f \'{0}/grains\''.format(tmp_dir),
tty, sudo, **ssh_kwargs)
log.debug('Removed {0}/grains'.format(tmp_dir))
root_cmd('rm -f \'{0}/minion\''.format(tmp_dir),
tty, sudo, **ssh_kwargs)
log.debug('Removed {0}/minion'.format(tmp_dir))
if master_sign_pub_file:
root_cmd('rm -f {0}/master_sign.pub'.format(tmp_dir),
tty, sudo, **ssh_kwargs)
log.debug('Removed {0}/master_sign.pub'.format(tmp_dir))
# Remove master configuration
if master_pub:
root_cmd('rm -f \'{0}/master.pub\''.format(tmp_dir),
tty, sudo, **ssh_kwargs)
log.debug('Removed {0}/master.pub'.format(tmp_dir))
if master_pem:
root_cmd('rm -f \'{0}/master.pem\''.format(tmp_dir),
tty, sudo, **ssh_kwargs)
log.debug('Removed {0}/master.pem'.format(tmp_dir))
if master_conf:
root_cmd('rm -f \'{0}/master\''.format(tmp_dir),
tty, sudo, **ssh_kwargs)
log.debug('Removed {0}/master'.format(tmp_dir))
# Remove pre-seed keys directory
if preseed_minion_keys is not None:
root_cmd(
'rm -rf \'{0}\''.format(
preseed_minion_keys_tempdir
), tty, sudo, **ssh_kwargs
)
log.debug(
'Removed {0}'.format(preseed_minion_keys_tempdir)
)
if start_action and not parallel:
queuereturn = queue.get()
process.join()
if queuereturn and start_action:
# client = salt.client.LocalClient(conf_file)
# output = client.cmd_iter(
# host, 'state.highstate', timeout=timeout
# )
# for line in output:
# print(line)
log.info(
'Executing {0} on the salt-minion'.format(
start_action
)
)
root_cmd(
'salt-call {0}'.format(start_action),
tty, sudo, **ssh_kwargs
)
log.info(
'Finished executing {0} on the salt-minion'.format(
start_action
)
)
# Fire deploy action
fire_event(
'event',
'{0} has been deployed at {1}'.format(name, host),
'salt/cloud/{0}/deploy_script'.format(name),
args={
'name': name,
'host': host
},
sock_dir=opts.get(
'sock_dir',
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)
if file_map_fail or file_map_success:
return {
'File Upload Success': file_map_success,
'File Upload Failure': file_map_fail,
}
return True
return False
def run_inline_script(host,
name=None,
port=22,
timeout=900,
username='root',
key_filename=None,
inline_script=None,
ssh_timeout=15,
display_ssh_output=True,
parallel=False,
sudo_password=None,
sudo=False,
password=None,
tty=None,
opts=None,
tmp_dir='/tmp/.saltcloud-inline_script',
**kwargs):
'''
Run the inline script commands, one by one
:**kwargs: catch all other things we may get but don't actually need/use
'''
gateway = None
if 'gateway' in kwargs:
gateway = kwargs['gateway']
starttime = time.mktime(time.localtime())
log.debug('Deploying {0} at {1}'.format(host, starttime))
known_hosts_file = kwargs.get('known_hosts_file', '/dev/null')
if wait_for_port(host=host, port=port, gateway=gateway):
log.debug('SSH port {0} on {1} is available'.format(port, host))
newtimeout = timeout - (time.mktime(time.localtime()) - starttime)
if wait_for_passwd(host, port=port, username=username,
password=password, key_filename=key_filename,
ssh_timeout=ssh_timeout,
display_ssh_output=display_ssh_output,
gateway=gateway, known_hosts_file=known_hosts_file):
log.debug(
'Logging into {0}:{1} as {2}'.format(
host, port, username
)
)
newtimeout = timeout - (time.mktime(time.localtime()) - starttime)
ssh_kwargs = {
'hostname': host,
'port': port,
'username': username,
'timeout': ssh_timeout,
'display_ssh_output': display_ssh_output,
'sudo_password': sudo_password,
'sftp': opts.get('use_sftp', False)
}
if gateway:
ssh_kwargs['ssh_gateway'] = gateway['ssh_gateway']
ssh_kwargs['ssh_gateway_key'] = gateway['ssh_gateway_key']
ssh_kwargs['ssh_gateway_user'] = gateway['ssh_gateway_user']
if key_filename:
log.debug('Using {0} as the key_filename'.format(key_filename))
ssh_kwargs['key_filename'] = key_filename
elif password and 'has_ssh_agent' in kwargs and kwargs['has_ssh_agent'] is False:
ssh_kwargs['password'] = password
# TODO: write some tests ???
# TODO: check edge cases (e.g. ssh gateways, salt deploy disabled, etc.)
if root_cmd('test -e \\"{0}\\"'.format(tmp_dir), tty, sudo,
allow_failure=True, **ssh_kwargs) and inline_script:
log.debug('Found inline script to execute.')
for cmd_line in inline_script:
log.info("Executing inline command: " + str(cmd_line))
ret = root_cmd('sh -c "( {0} )"'.format(cmd_line),
tty, sudo, allow_failure=True, **ssh_kwargs)
if ret:
log.info("[" + str(cmd_line) + "] Output: " + str(ret))
# TODO: ensure we send the correct return value
return True
def fire_event(key, msg, tag, args=None, sock_dir=None, transport='zeromq'):
# Fire deploy action
if sock_dir is None:
salt.utils.warn_until(
'Oxygen',
'`salt.utils.cloud.fire_event` requires that the `sock_dir`'
'parameter be passed in when calling the function.'
)
sock_dir = __opts__['sock_dir']
event = salt.utils.event.get_event(
'master',
sock_dir,
transport,
listen=False)
try:
event.fire_event(msg, tag)
except ValueError:
# We're using at least a 0.17.x version of salt
if isinstance(args, dict):
args[key] = msg
else:
args = {key: msg}
event.fire_event(args, tag)
# https://github.com/zeromq/pyzmq/issues/173#issuecomment-4037083
# Assertion failed: get_load () == 0 (poller_base.cpp:32)
time.sleep(0.025)
def _exec_ssh_cmd(cmd, error_msg=None, allow_failure=False, **kwargs):
if error_msg is None:
error_msg = 'A wrong password has been issued while establishing ssh session.'
password_retries = kwargs.get('password_retries', 3)
try:
stdout, stderr = None, None
proc = vt.Terminal(
cmd,
shell=True,
log_stdout=True,
log_stderr=True,
stream_stdout=kwargs.get('display_ssh_output', True),
stream_stderr=kwargs.get('display_ssh_output', True)
)
sent_password = 0
while proc.has_unread_data:
stdout, stderr = proc.recv()
if stdout and SSH_PASSWORD_PROMP_RE.search(stdout):
# if authenticating with an SSH key and 'sudo' is found
# in the password prompt
if ('key_filename' in kwargs and kwargs['key_filename']
and SSH_PASSWORD_PROMP_SUDO_RE.search(stdout)
):
proc.sendline(kwargs['sudo_password', None])
# elif authenticating via password and haven't exhausted our
# password_retires
elif (
kwargs.get('password', None)
and (sent_password < password_retries)
):
sent_password += 1
proc.sendline(kwargs['password'])
# else raise an error as we are not authenticating properly
# * not authenticating with an SSH key
# * not authenticating with a Password
else:
raise SaltCloudPasswordError(error_msg)
# 0.0125 is really too fast on some systems
time.sleep(0.5)
if proc.exitstatus != 0 and allow_failure is False:
raise SaltCloudSystemExit(
'Command \'{0}\' failed. Exit code: {1}'.format(
cmd, proc.exitstatus
)
)
return proc.exitstatus
except vt.TerminalException as err:
trace = traceback.format_exc()
log.error(error_msg.format(cmd, err, trace))
finally:
proc.close(terminate=True, kill=True)
# Signal an error
return 1
def scp_file(dest_path, contents=None, kwargs=None, local_file=None):
'''
Use scp or sftp to copy a file to a server
'''
file_to_upload = None
try:
if contents is not None:
try:
tmpfd, file_to_upload = tempfile.mkstemp()
os.write(tmpfd, contents)
finally:
try:
os.close(tmpfd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
log.debug('Uploading {0} to {1}'.format(dest_path, kwargs['hostname']))
ssh_args = [
# Don't add new hosts to the host key database
'-oStrictHostKeyChecking=no',
# Set hosts key database path to /dev/null, i.e., non-existing
'-oUserKnownHostsFile=/dev/null',
# Don't re-use the SSH connection. Less failures.
'-oControlPath=none'
]
if local_file is not None:
file_to_upload = local_file
if os.path.isdir(local_file):
ssh_args.append('-r')
if 'key_filename' in kwargs:
# There should never be both a password and an ssh key passed in, so
ssh_args.extend([
# tell SSH to skip password authentication
'-oPasswordAuthentication=no',
'-oChallengeResponseAuthentication=no',
# Make sure public key authentication is enabled
'-oPubkeyAuthentication=yes',
# do only use the provided identity file
'-oIdentitiesOnly=yes',
# No Keyboard interaction!
'-oKbdInteractiveAuthentication=no',
# Also, specify the location of the key file
'-i {0}'.format(kwargs['key_filename'])
])
if 'port' in kwargs:
ssh_args.append('-oPort={0}'.format(kwargs['port']))
if 'ssh_gateway' in kwargs:
ssh_gateway = kwargs['ssh_gateway']
ssh_gateway_port = 22
ssh_gateway_key = ''
ssh_gateway_user = 'root'
if ':' in ssh_gateway:
ssh_gateway, ssh_gateway_port = ssh_gateway.split(':')
if 'ssh_gateway_port' in kwargs:
ssh_gateway_port = kwargs['ssh_gateway_port']
if 'ssh_gateway_key' in kwargs:
ssh_gateway_key = '-i {0}'.format(kwargs['ssh_gateway_key'])
if 'ssh_gateway_user' in kwargs:
ssh_gateway_user = kwargs['ssh_gateway_user']
ssh_args.append(
# Setup ProxyCommand
'-oProxyCommand="ssh {0} {1} {2} {3} {4}@{5} -p {6} nc -q0 %h %p"'.format(
# Don't add new hosts to the host key database
'-oStrictHostKeyChecking=no',
# Set hosts key database path to /dev/null, i.e., non-existing
'-oUserKnownHostsFile=/dev/null',
# Don't re-use the SSH connection. Less failures.
'-oControlPath=none',
ssh_gateway_key,
ssh_gateway_user,
ssh_gateway,
ssh_gateway_port
)
)
try:
if socket.inet_pton(socket.AF_INET6, kwargs['hostname']):
ipaddr = '[{0}]'.format(kwargs['hostname'])
else:
ipaddr = kwargs['hostname']
except socket.error:
ipaddr = kwargs['hostname']
if file_to_upload is None:
log.warning(
'No source file to upload. Please make sure that either file '
'contents or the path to a local file are provided.'
)
cmd = (
'scp {0} {1} {2[username]}@{4}:{3} || '
'echo "put {1} {3}" | sftp {0} {2[username]}@{4} || '
'rsync -avz -e "ssh {0}" {1} {2[username]}@{2[hostname]}:{3}'.format(
' '.join(ssh_args), file_to_upload, kwargs, dest_path, ipaddr
)
)
log.debug('SCP command: \'{0}\''.format(cmd))
retcode = _exec_ssh_cmd(cmd,
error_msg='Failed to upload file \'{0}\': {1}\n{2}',
password_retries=3,
**kwargs)
finally:
if contents is not None:
try:
os.remove(file_to_upload)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise exc
return retcode
def ssh_file(opts, dest_path, contents=None, kwargs=None, local_file=None):
'''
Copies a file to the remote SSH target using either sftp or scp, as
configured.
'''
if opts.get('file_transport', 'sftp') == 'sftp':
return sftp_file(dest_path, contents, kwargs, local_file)
return scp_file(dest_path, contents, kwargs, local_file)
def sftp_file(dest_path, contents=None, kwargs=None, local_file=None):
'''
Use sftp to upload a file to a server
'''
put_args = []
if kwargs is None:
kwargs = {}
file_to_upload = None
try:
if contents is not None:
try:
tmpfd, file_to_upload = tempfile.mkstemp()
if isinstance(contents, str):
os.write(tmpfd, contents.encode(__salt_system_encoding__))
else:
os.write(tmpfd, contents)
finally:
try:
os.close(tmpfd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
if local_file is not None:
file_to_upload = local_file
if os.path.isdir(local_file):
put_args = ['-r']
log.debug('Uploading {0} to {1} (sftp)'.format(dest_path, kwargs.get('hostname')))
ssh_args = [
# Don't add new hosts to the host key database
'-oStrictHostKeyChecking=no',
# Set hosts key database path to /dev/null, i.e., non-existing
'-oUserKnownHostsFile=/dev/null',
# Don't re-use the SSH connection. Less failures.
'-oControlPath=none'
]
if 'key_filename' in kwargs:
# There should never be both a password and an ssh key passed in, so
ssh_args.extend([
# tell SSH to skip password authentication
'-oPasswordAuthentication=no',
'-oChallengeResponseAuthentication=no',
# Make sure public key authentication is enabled
'-oPubkeyAuthentication=yes',
# do only use the provided identity file
'-oIdentitiesOnly=yes',
# No Keyboard interaction!
'-oKbdInteractiveAuthentication=no',
# Also, specify the location of the key file
'-oIdentityFile={0}'.format(kwargs['key_filename'])
])
if 'port' in kwargs:
ssh_args.append('-oPort={0}'.format(kwargs['port']))
if 'ssh_gateway' in kwargs:
ssh_gateway = kwargs['ssh_gateway']
ssh_gateway_port = 22
ssh_gateway_key = ''
ssh_gateway_user = 'root'
if ':' in ssh_gateway:
ssh_gateway, ssh_gateway_port = ssh_gateway.split(':')
if 'ssh_gateway_port' in kwargs:
ssh_gateway_port = kwargs['ssh_gateway_port']
if 'ssh_gateway_key' in kwargs:
ssh_gateway_key = '-i {0}'.format(kwargs['ssh_gateway_key'])
if 'ssh_gateway_user' in kwargs:
ssh_gateway_user = kwargs['ssh_gateway_user']
ssh_args.append(
# Setup ProxyCommand
'-oProxyCommand="ssh {0} {1} {2} {3} {4}@{5} -p {6} nc -q0 %h %p"'.format(
# Don't add new hosts to the host key database
'-oStrictHostKeyChecking=no',
# Set hosts key database path to /dev/null, i.e., non-existing
'-oUserKnownHostsFile=/dev/null',
# Don't re-use the SSH connection. Less failures.
'-oControlPath=none',
ssh_gateway_key,
ssh_gateway_user,
ssh_gateway,
ssh_gateway_port
)
)
try:
if socket.inet_pton(socket.AF_INET6, kwargs['hostname']):
ipaddr = '[{0}]'.format(kwargs['hostname'])
else:
ipaddr = kwargs['hostname']
except socket.error:
ipaddr = kwargs['hostname']
if file_to_upload is None:
log.warning(
'No source file to upload. Please make sure that either file '
'contents or the path to a local file are provided.'
)
cmd = 'echo "put {0} {1} {2}" | sftp {3} {4[username]}@{5}'.format(
' '.join(put_args), file_to_upload, dest_path, ' '.join(ssh_args), kwargs, ipaddr
)
log.debug('SFTP command: \'{0}\''.format(cmd))
retcode = _exec_ssh_cmd(cmd,
error_msg='Failed to upload file \'{0}\': {1}\n{2}',
password_retries=3,
**kwargs)
finally:
if contents is not None:
try:
os.remove(file_to_upload)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise exc
return retcode
def win_cmd(command, **kwargs):
'''
Wrapper for commands to be run against Windows boxes
'''
logging_command = kwargs.get('logging_command', None)
try:
proc = NonBlockingPopen(
command,
shell=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
stream_stds=kwargs.get('display_ssh_output', True),
logging_command=logging_command,
)
if logging_command is None:
log.debug(
'Executing command(PID %s): \'%s\'',
proc.pid, command
)
else:
log.debug(
'Executing command(PID %s): \'%s\'',
proc.pid, logging_command
)
proc.poll_and_read_until_finish()
proc.communicate()
return proc.returncode
except Exception as err:
log.error(
'Failed to execute command \'{0}\': {1}\n'.format(
logging_command,
err
),
exc_info=True
)
# Signal an error
return 1
def winrm_cmd(session, command, flags, **kwargs):
'''
Wrapper for commands to be run against Windows boxes using WinRM.
'''
log.debug('Executing WinRM command: {0} {1}'.format(
command, flags
))
r = session.run_cmd(command, flags)
return r.status_code
def root_cmd(command, tty, sudo, allow_failure=False, **kwargs):
'''
Wrapper for commands to be run as root
'''
logging_command = command
sudo_password = kwargs.get('sudo_password', None)
if sudo:
if sudo_password is None:
command = 'sudo {0}'.format(command)
logging_command = command
else:
logging_command = 'sudo -S "XXX-REDACTED-XXX" {0}'.format(command)
command = 'sudo -S {0}'.format(command)
log.debug('Using sudo to run command {0}'.format(logging_command))
ssh_args = []
if tty:
# Use double `-t` on the `ssh` command, it's necessary when `sudo` has
# `requiretty` enforced.
ssh_args.extend(['-t', '-t'])
known_hosts_file = kwargs.get('known_hosts_file', '/dev/null')
host_key_checking = 'no'
if known_hosts_file != '/dev/null':
host_key_checking = 'yes'
ssh_args.extend([
# Don't add new hosts to the host key database
'-oStrictHostKeyChecking={0}'.format(host_key_checking),
# Set hosts key database path to /dev/null, i.e., non-existing
'-oUserKnownHostsFile={0}'.format(known_hosts_file),
# Don't re-use the SSH connection. Less failures.
'-oControlPath=none'
])
if 'key_filename' in kwargs:
# There should never be both a password and an ssh key passed in, so
ssh_args.extend([
# tell SSH to skip password authentication
'-oPasswordAuthentication=no',
'-oChallengeResponseAuthentication=no',
# Make sure public key authentication is enabled
'-oPubkeyAuthentication=yes',
# do only use the provided identity file
'-oIdentitiesOnly=yes',
# No Keyboard interaction!
'-oKbdInteractiveAuthentication=no',
# Also, specify the location of the key file
'-i {0}'.format(kwargs['key_filename'])
])
if 'ssh_timeout' in kwargs:
ssh_args.extend(['-oConnectTimeout={0}'.format(kwargs['ssh_timeout'])])
if 'ssh_gateway' in kwargs:
ssh_gateway = kwargs['ssh_gateway']
ssh_gateway_port = 22
ssh_gateway_key = ''
ssh_gateway_user = 'root'
if ':' in ssh_gateway:
ssh_gateway, ssh_gateway_port = ssh_gateway.split(':')
if 'ssh_gateway_port' in kwargs:
ssh_gateway_port = kwargs['ssh_gateway_port']
if 'ssh_gateway_key' in kwargs:
ssh_gateway_key = '-i {0}'.format(kwargs['ssh_gateway_key'])
if 'ssh_gateway_user' in kwargs:
ssh_gateway_user = kwargs['ssh_gateway_user']
ssh_args.extend([
# Setup ProxyCommand
'-oProxyCommand="ssh {0} {1} {2} {3} {4}@{5} -p {6} nc -q0 %h %p"'.format(
# Don't add new hosts to the host key database
'-oStrictHostKeyChecking=no',
# Set hosts key database path to /dev/null, i.e., non-existing
'-oUserKnownHostsFile=/dev/null',
# Don't re-use the SSH connection. Less failures.
'-oControlPath=none',
ssh_gateway_key,
ssh_gateway_user,
ssh_gateway,
ssh_gateway_port
)
])
log.info(
'Using SSH gateway {0}@{1}:{2}'.format(
ssh_gateway_user, ssh_gateway, ssh_gateway_port
)
)
if 'port' in kwargs:
ssh_args.extend(['-p {0}'.format(kwargs['port'])])
cmd = 'ssh {0} {1[username]}@{1[hostname]} '.format(
' '.join(ssh_args),
kwargs
)
logging_command = cmd + logging_command
cmd = cmd + pipes.quote(command)
hard_timeout = kwargs.get('hard_timeout')
if hard_timeout is not None:
logging_command = 'timeout {0} {1}'.format(hard_timeout, logging_command)
cmd = 'timeout {0} {1}'.format(hard_timeout, cmd)
log.debug('SSH command: \'{0}\''.format(logging_command))
retcode = _exec_ssh_cmd(cmd, allow_failure=allow_failure, **kwargs)
return retcode
def check_auth(name, sock_dir=None, queue=None, timeout=300):
'''
This function is called from a multiprocess instance, to wait for a minion
to become available to receive salt commands
'''
event = salt.utils.event.SaltEvent('master', sock_dir, listen=True)
starttime = time.mktime(time.localtime())
newtimeout = timeout
log.debug(
'In check_auth, waiting for {0} to become available'.format(
name
)
)
while newtimeout > 0:
newtimeout = timeout - (time.mktime(time.localtime()) - starttime)
ret = event.get_event(full=True)
if ret is None:
continue
if ret['tag'] == 'minion_start' and ret['data']['id'] == name:
queue.put(name)
newtimeout = 0
log.debug('Minion {0} is ready to receive commands'.format(name))
def ip_to_int(ip):
'''
Converts an IP address to an integer
'''
ret = 0
for octet in ip.split('.'):
ret = ret * 256 + int(octet)
return ret
def is_public_ip(ip):
'''
Determines whether an IP address falls within one of the private IP ranges
'''
if ':' in ip:
# ipv6
if ip.startswith('fe80:'):
# ipv6 link local
return False
return True
addr = ip_to_int(ip)
if addr > 167772160 and addr < 184549375:
# 10.0.0.0/24
return False
elif addr > 3232235520 and addr < 3232301055:
# 192.168.0.0/16
return False
elif addr > 2886729728 and addr < 2887778303:
# 172.16.0.0/12
return False
return True
def check_name(name, safe_chars):
'''
Check whether the specified name contains invalid characters
'''
regexp = re.compile('[^{0}]'.format(safe_chars))
if regexp.search(name):
raise SaltCloudException(
'{0} contains characters not supported by this cloud provider. '
'Valid characters are: {1}'.format(
name, safe_chars
)
)
def remove_sshkey(host, known_hosts=None):
'''
Remove a host from the known_hosts file
'''
if known_hosts is None:
if 'HOME' in os.environ:
known_hosts = '{0}/.ssh/known_hosts'.format(os.environ['HOME'])
else:
try:
known_hosts = '{0}/.ssh/known_hosts'.format(
pwd.getpwuid(os.getuid()).pwd_dir
)
except Exception:
pass
if known_hosts is not None:
log.debug(
'Removing ssh key for {0} from known hosts file {1}'.format(
host, known_hosts
)
)
else:
log.debug(
'Removing ssh key for {0} from known hosts file'.format(host)
)
cmd = 'ssh-keygen -R {0}'.format(host)
subprocess.call(cmd, shell=True)
def wait_for_ip(update_callback,
update_args=None,
update_kwargs=None,
timeout=5 * 60,
interval=5,
interval_multiplier=1,
max_failures=10):
'''
Helper function that waits for an IP address for a specific maximum amount
of time.
:param update_callback: callback function which queries the cloud provider
for the VM ip address. It must return None if the
required data, IP included, is not available yet.
:param update_args: Arguments to pass to update_callback
:param update_kwargs: Keyword arguments to pass to update_callback
:param timeout: The maximum amount of time(in seconds) to wait for the IP
address.
:param interval: The looping interval, i.e., the amount of time to sleep
before the next iteration.
:param interval_multiplier: Increase the interval by this multiplier after
each request; helps with throttling
:param max_failures: If update_callback returns ``False`` it's considered
query failure. This value is the amount of failures
accepted before giving up.
:returns: The update_callback returned data
:raises: SaltCloudExecutionTimeout
'''
if update_args is None:
update_args = ()
if update_kwargs is None:
update_kwargs = {}
duration = timeout
while True:
log.debug(
'Waiting for VM IP. Giving up in 00:{0:02d}:{1:02d}.'.format(
int(timeout // 60),
int(timeout % 60)
)
)
data = update_callback(*update_args, **update_kwargs)
if data is False:
log.debug(
'\'update_callback\' has returned \'False\', which is '
'considered a failure. Remaining Failures: {0}.'.format(
max_failures
)
)
max_failures -= 1
if max_failures <= 0:
raise SaltCloudExecutionFailure(
'Too many failures occurred while waiting for '
'the IP address.'
)
elif data is not None:
return data
if timeout < 0:
raise SaltCloudExecutionTimeout(
'Unable to get IP for 00:{0:02d}:{1:02d}.'.format(
int(duration // 60),
int(duration % 60)
)
)
time.sleep(interval)
timeout -= interval
if interval_multiplier > 1:
interval *= interval_multiplier
if interval > timeout:
interval = timeout + 1
log.info('Interval multiplier in effect; interval is '
'now {0}s.'.format(interval))
def list_nodes_select(nodes, selection, call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_select function must be called '
'with -f or --function.'
)
if 'error' in nodes:
raise SaltCloudSystemExit(
'An error occurred while listing nodes: {0}'.format(
nodes['error']['Errors']['Error']['Message']
)
)
ret = {}
for node in nodes:
pairs = {}
data = nodes[node]
for key in data:
if str(key) in selection:
value = data[key]
pairs[key] = value
ret[node] = pairs
return ret
def lock_file(filename, interval=.5, timeout=15):
'''
Lock a file; if it is already locked, then wait for it to become available
before locking it.
Note that these locks are only recognized by Salt Cloud, and not other
programs or platforms.
'''
log.trace('Attempting to obtain lock for {0}'.format(filename))
lock = filename + '.lock'
start = time.time()
while True:
if os.path.exists(lock):
if time.time() - start >= timeout:
log.warning('Unable to obtain lock for {0}'.format(filename))
return False
time.sleep(interval)
else:
break
with salt.utils.fopen(lock, 'a'):
pass
def unlock_file(filename):
'''
Unlock a locked file
Note that these locks are only recognized by Salt Cloud, and not other
programs or platforms.
'''
log.trace('Removing lock for {0}'.format(filename))
lock = filename + '.lock'
try:
os.remove(lock)
except OSError as exc:
log.trace('Unable to remove lock for {0}: {1}'.format(filename, exc))
def cachedir_index_add(minion_id, profile, driver, provider, base=None):
'''
Add an entry to the cachedir index. This generally only needs to happen when
a new instance is created. This entry should contain:
.. code-block:: yaml
- minion_id
- profile used to create the instance
- provider and driver name
The intent of this function is to speed up lookups for the cloud roster for
salt-ssh. However, other code that makes use of profile information can also
make use of this function.
'''
base = init_cachedir(base)
index_file = os.path.join(base, 'index.p')
lock_file(index_file)
if os.path.exists(index_file):
with salt.utils.fopen(index_file, 'r') as fh_:
index = msgpack.load(fh_)
else:
index = {}
prov_comps = provider.split(':')
index.update({
minion_id: {
'id': minion_id,
'profile': profile,
'driver': driver,
'provider': prov_comps[0],
}
})
with salt.utils.fopen(index_file, 'w') as fh_:
msgpack.dump(index, fh_)
unlock_file(index_file)
def cachedir_index_del(minion_id, base=None):
'''
Delete an entry from the cachedir index. This generally only needs to happen
when an instance is deleted.
'''
base = init_cachedir(base)
index_file = os.path.join(base, 'index.p')
lock_file(index_file)
if os.path.exists(index_file):
with salt.utils.fopen(index_file, 'r') as fh_:
index = msgpack.load(fh_)
else:
return
if minion_id in index:
del index[minion_id]
with salt.utils.fopen(index_file, 'w') as fh_:
msgpack.dump(index, fh_)
unlock_file(index_file)
def init_cachedir(base=None):
'''
Initialize the cachedir needed for Salt Cloud to keep track of minions
'''
if base is None:
base = __opts__['cachedir']
needed_dirs = (base,
os.path.join(base, 'requested'),
os.path.join(base, 'active'))
for dir_ in needed_dirs:
if not os.path.exists(dir_):
os.makedirs(dir_)
os.chmod(base, 0o755)
return base
# FIXME: This function seems used nowhere. Dead code?
def request_minion_cachedir(
minion_id,
opts=None,
fingerprint='',
pubkey=None,
provider=None,
base=None,
):
'''
Creates an entry in the requested/ cachedir. This means that Salt Cloud has
made a request to a cloud provider to create an instance, but it has not
yet verified that the instance properly exists.
If the fingerprint is unknown, a raw pubkey can be passed in, and a
fingerprint will be calculated. If both are empty, then the fingerprint
will be set to None.
'''
if base is None:
base = __opts__['cachedir']
if not fingerprint and pubkey is not None:
fingerprint = salt.utils.pem_finger(key=pubkey, sum_type=(opts and opts.get('hash_type') or 'sha256'))
init_cachedir(base)
data = {
'minion_id': minion_id,
'fingerprint': fingerprint,
'provider': provider,
}
fname = '{0}.p'.format(minion_id)
path = os.path.join(base, 'requested', fname)
with salt.utils.fopen(path, 'w') as fh_:
msgpack.dump(data, fh_)
def change_minion_cachedir(
minion_id,
cachedir,
data=None,
base=None,
):
'''
Changes the info inside a minion's cachedir entry. The type of cachedir
must be specified (i.e., 'requested' or 'active'). A dict is also passed in
which contains the data to be changed.
Example:
change_minion_cachedir(
'myminion',
'requested',
{'fingerprint': '26:5c:8c:de:be:fe:89:c0:02:ed:27:65:0e:bb:be:60'},
)
'''
if not isinstance(data, dict):
return False
if base is None:
base = __opts__['cachedir']
fname = '{0}.p'.format(minion_id)
path = os.path.join(base, cachedir, fname)
with salt.utils.fopen(path, 'r') as fh_:
cache_data = msgpack.load(fh_)
cache_data.update(data)
with salt.utils.fopen(path, 'w') as fh_:
msgpack.dump(cache_data, fh_)
def activate_minion_cachedir(minion_id, base=None):
'''
Moves a minion from the requested/ cachedir into the active/ cachedir. This
means that Salt Cloud has verified that a requested instance properly
exists, and should be expected to exist from here on out.
'''
if base is None:
base = __opts__['cachedir']
fname = '{0}.p'.format(minion_id)
src = os.path.join(base, 'requested', fname)
dst = os.path.join(base, 'active')
shutil.move(src, dst)
def delete_minion_cachedir(minion_id, provider, opts, base=None):
'''
Deletes a minion's entry from the cloud cachedir. It will search through
all cachedirs to find the minion's cache file.
Needs `update_cachedir` set to True.
'''
if isinstance(opts, dict):
__opts__.update(opts)
if __opts__.get('update_cachedir', False) is False:
return
if base is None:
base = __opts__['cachedir']
driver = next(six.iterkeys(__opts__['providers'][provider]))
fname = '{0}.p'.format(minion_id)
for cachedir in 'requested', 'active':
path = os.path.join(base, cachedir, driver, provider, fname)
log.debug('path: {0}'.format(path))
if os.path.exists(path):
os.remove(path)
def list_cache_nodes_full(opts=None, provider=None, base=None):
'''
Return a list of minion data from the cloud cache, rather from the cloud
providers themselves. This is the cloud cache version of list_nodes_full().
'''
if opts is None:
opts = __opts__
if opts.get('update_cachedir', False) is False:
return
if base is None:
base = os.path.join(opts['cachedir'], 'active')
minions = {}
# First, get a list of all drivers in use
for driver in os.listdir(base):
minions[driver] = {}
prov_dir = os.path.join(base, driver)
# Then, get a list of all providers per driver
for prov in os.listdir(prov_dir):
# If a specific provider is requested, filter out everyone else
if provider and provider != prov:
continue
minions[driver][prov] = {}
min_dir = os.path.join(prov_dir, prov)
# Get a list of all nodes per provider
for fname in os.listdir(min_dir):
# Finally, get a list of full minion data
fpath = os.path.join(min_dir, fname)
minion_id = fname[:-2] # strip '.p' from end of msgpack filename
with salt.utils.fopen(fpath, 'r') as fh_:
minions[driver][prov][minion_id] = msgpack.load(fh_)
return minions
def cache_nodes_ip(opts, base=None):
'''
Retrieve a list of all nodes from Salt Cloud cache, and any associated IP
addresses. Returns a dict.
'''
if base is None:
base = opts['cachedir']
minions = list_cache_nodes_full(opts, base=base)
def update_bootstrap(config, url=None):
'''
Update the salt-bootstrap script
url can be one of:
- The URL to fetch the bootstrap script from
- The absolute path to the bootstrap
- The content of the bootstrap script
'''
default_url = config.get('bootstrap_script_url',
'https://bootstrap.saltstack.com')
if not url:
url = default_url
if not url:
raise ValueError('Cant get any source to update')
if url.startswith('http') or '://' in url:
log.debug('Updating the bootstrap-salt.sh script to latest stable')
try:
import requests
except ImportError:
return {'error': (
'Updating the bootstrap-salt.sh script requires the '
'Python requests library to be installed'
)}
req = requests.get(url)
if req.status_code != 200:
return {'error': (
'Failed to download the latest stable version of the '
'bootstrap-salt.sh script from {0}. HTTP error: '
'{1}'.format(
url, req.status_code
)
)}
script_content = req.text
if url == default_url:
script_name = 'bootstrap-salt.sh'
else:
script_name = os.path.basename(url)
elif os.path.exists(url):
with salt.utils.fopen(url) as fic:
script_content = fic.read()
script_name = os.path.basename(url)
# in last case, assuming we got a script content
else:
script_content = url
script_name = '{0}.sh'.format(
hashlib.sha1(script_content).hexdigest()
)
if not script_content:
raise ValueError('No content in bootstrap script !')
# Get the path to the built-in deploy scripts directory
builtin_deploy_dir = os.path.join(
os.path.dirname(__file__),
'deploy'
)
# Compute the search path from the current loaded opts conf_file
# value
deploy_d_from_conf_file = os.path.join(
os.path.dirname(config['conf_file']),
'cloud.deploy.d'
)
# Compute the search path using the install time defined
# syspaths.CONF_DIR
deploy_d_from_syspaths = os.path.join(
config['config_dir'],
'cloud.deploy.d'
)
# Get a copy of any defined search paths, flagging them not to
# create parent
deploy_scripts_search_paths = []
for entry in config.get('deploy_scripts_search_path', []):
if entry.startswith(builtin_deploy_dir):
# We won't write the updated script to the built-in deploy
# directory
continue
if entry in (deploy_d_from_conf_file, deploy_d_from_syspaths):
# Allow parent directories to be made
deploy_scripts_search_paths.append((entry, True))
else:
deploy_scripts_search_paths.append((entry, False))
# In case the user is not using defaults and the computed
# 'cloud.deploy.d' from conf_file and syspaths is not included, add
# them
if deploy_d_from_conf_file not in deploy_scripts_search_paths:
deploy_scripts_search_paths.append(
(deploy_d_from_conf_file, True)
)
if deploy_d_from_syspaths not in deploy_scripts_search_paths:
deploy_scripts_search_paths.append(
(deploy_d_from_syspaths, True)
)
finished = []
finished_full = []
for entry, makedirs in deploy_scripts_search_paths:
# This handles duplicate entries, which are likely to appear
if entry in finished:
continue
else:
finished.append(entry)
if makedirs and not os.path.isdir(entry):
try:
os.makedirs(entry)
except (OSError, IOError) as err:
log.info(
'Failed to create directory \'{0}\''.format(entry)
)
continue
if not is_writeable(entry):
log.debug(
'The \'{0}\' is not writeable. Continuing...'.format(
entry
)
)
continue
deploy_path = os.path.join(entry, script_name)
try:
finished_full.append(deploy_path)
with salt.utils.fopen(deploy_path, 'w') as fp_:
fp_.write(script_content)
except (OSError, IOError) as err:
log.debug(
'Failed to write the updated script: {0}'.format(err)
)
continue
return {'Success': {'Files updated': finished_full}}
def cache_node_list(nodes, provider, opts):
'''
If configured to do so, update the cloud cachedir with the current list of
nodes. Also fires configured events pertaining to the node list.
.. versionadded:: 2014.7.0
'''
if 'update_cachedir' not in opts or not opts['update_cachedir']:
return
base = os.path.join(init_cachedir(), 'active')
driver = next(six.iterkeys(opts['providers'][provider]))
prov_dir = os.path.join(base, driver, provider)
if not os.path.exists(prov_dir):
os.makedirs(prov_dir)
# Check to see if any nodes in the cache are not in the new list
missing_node_cache(prov_dir, nodes, provider, opts)
for node in nodes:
diff_node_cache(prov_dir, node, nodes[node], opts)
path = os.path.join(prov_dir, '{0}.p'.format(node))
with salt.utils.fopen(path, 'w') as fh_:
msgpack.dump(nodes[node], fh_)
def cache_node(node, provider, opts):
'''
Cache node individually
.. versionadded:: 2014.7.0
'''
if isinstance(opts, dict):
__opts__.update(opts)
if 'update_cachedir' not in __opts__ or not __opts__['update_cachedir']:
return
if not os.path.exists(os.path.join(__opts__['cachedir'], 'active')):
init_cachedir()
base = os.path.join(__opts__['cachedir'], 'active')
provider, driver = provider.split(':')
prov_dir = os.path.join(base, driver, provider)
if not os.path.exists(prov_dir):
os.makedirs(prov_dir)
path = os.path.join(prov_dir, '{0}.p'.format(node['name']))
with salt.utils.fopen(path, 'w') as fh_:
msgpack.dump(node, fh_)
def missing_node_cache(prov_dir, node_list, provider, opts):
'''
Check list of nodes to see if any nodes which were previously known about
in the cache have been removed from the node list.
This function will only run if configured to do so in the main Salt Cloud
configuration file (normally /etc/salt/cloud).
.. code-block:: yaml
diff_cache_events: True
.. versionadded:: 2014.7.0
'''
cached_nodes = []
for node in os.listdir(prov_dir):
cached_nodes.append(os.path.splitext(node)[0])
for node in cached_nodes:
if node not in node_list:
delete_minion_cachedir(node, provider, opts)
if 'diff_cache_events' in opts and opts['diff_cache_events']:
fire_event(
'event',
'cached node missing from provider',
'salt/cloud/{0}/cache_node_missing'.format(node),
args={'missing node': node},
sock_dir=opts.get(
'sock_dir',
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)
def diff_node_cache(prov_dir, node, new_data, opts):
'''
Check new node data against current cache. If data differ, fire an event
which consists of the new node data.
This function will only run if configured to do so in the main Salt Cloud
configuration file (normally /etc/salt/cloud).
.. code-block:: yaml
diff_cache_events: True
.. versionadded:: 2014.7.0
'''
if 'diff_cache_events' not in opts or not opts['diff_cache_events']:
return
if node is None:
return
path = '{0}.p'.format(os.path.join(prov_dir, node))
if not os.path.exists(path):
event_data = _strip_cache_events(new_data, opts)
fire_event(
'event',
'new node found',
'salt/cloud/{0}/cache_node_new'.format(node),
args={'new_data': event_data},
sock_dir=opts.get(
'sock_dir',
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)
return
with salt.utils.fopen(path, 'r') as fh_:
try:
cache_data = msgpack.load(fh_)
except ValueError:
log.warning('Cache for {0} was corrupt: Deleting'.format(node))
cache_data = {}
# Perform a simple diff between the old and the new data, and if it differs,
# return both dicts.
# TODO: Return an actual diff
diff = cmp(new_data, cache_data)
if diff != 0:
fire_event(
'event',
'node data differs',
'salt/cloud/{0}/cache_node_diff'.format(node),
args={
'new_data': _strip_cache_events(new_data, opts),
'cache_data': _strip_cache_events(cache_data, opts),
},
sock_dir=opts.get(
'sock_dir',
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)
def _strip_cache_events(data, opts):
'''
Strip out user-configured sensitive event data. The fields to be stripped
are configured in the main Salt Cloud configuration file, usually
``/etc/salt/cloud``.
.. code-block: yaml
cache_event_strip_fields:
- password
- priv_key
.. versionadded:: 2014.7.0
'''
event_data = copy.deepcopy(data)
strip_fields = opts.get('cache_event_strip_fields', [])
for field in strip_fields:
if field in event_data:
del event_data[field]
return event_data
def _salt_cloud_force_ascii(exc):
'''
Helper method to try its best to convert any Unicode text into ASCII
without stack tracing since salt internally does not handle Unicode strings
This method is not supposed to be used directly. Once
`py:module: salt.utils.cloud` is imported this method register's with
python's codecs module for proper automatic conversion in case of encoding
errors.
'''
if not isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
raise TypeError('Can\'t handle {0}'.format(exc))
unicode_trans = {
# Convert non-breaking space to space
u'\xa0': u' ',
# Convert en dash to dash
u'\u2013': u'-',
}
if exc.object[exc.start:exc.end] in unicode_trans:
return unicode_trans[exc.object[exc.start:exc.end]], exc.end
# There's nothing else we can do, raise the exception
raise exc
codecs.register_error('salt-cloud-force-ascii', _salt_cloud_force_ascii)
def retrieve_password_from_keyring(credential_id, username):
'''
Retrieve particular user's password for a specified credential set from system keyring.
'''
try:
import keyring # pylint: disable=import-error
return keyring.get_password(credential_id, username)
except ImportError:
log.error('USE_KEYRING configured as a password, but no keyring module is installed')
return False
def _save_password_in_keyring(credential_id, username, password):
'''
Saves provider password in system keyring
'''
try:
import keyring # pylint: disable=import-error
return keyring.set_password(credential_id, username, password)
except ImportError:
log.error('Tried to store password in keyring, but no keyring module is installed')
return False
def store_password_in_keyring(credential_id, username, password=None):
'''
Interactively prompts user for a password and stores it in system keyring
'''
try:
# pylint: disable=import-error
import keyring
import keyring.errors
# pylint: enable=import-error
if password is None:
prompt = 'Please enter password for {0}: '.format(credential_id)
try:
password = getpass.getpass(prompt)
except EOFError:
password = None
if not password:
# WE should raise something else here to be able to use this
# as/from an API
raise RuntimeError('Invalid password provided.')
try:
_save_password_in_keyring(credential_id, username, password)
except keyring.errors.PasswordSetError as exc:
log.debug('Problem saving password in the keyring: {0}'.format(exc))
except ImportError:
log.error('Tried to store password in keyring, but no keyring module is installed')
return False
def _unwrap_dict(dictionary, index_string):
'''
Accepts index in form of a string
Returns final value
Example: dictionary = {'a': {'b': {'c': 'foobar'}}}
index_string = 'a,b,c'
returns 'foobar'
'''
index = index_string.split(',')
for k in index:
dictionary = dictionary[k]
return dictionary
def run_func_until_ret_arg(fun, kwargs, fun_call=None,
argument_being_watched=None, required_argument_response=None):
'''
Waits until the function retrieves some required argument.
NOTE: Tested with ec2 describe_volumes and describe_snapshots only.
'''
status = None
while status != required_argument_response:
f_result = fun(kwargs, call=fun_call)
r_set = {}
for d in f_result:
if isinstance(d, list):
d0 = d[0]
if isinstance(d0, dict):
for k, v in six.iteritems(d0):
r_set[k] = v
status = _unwrap_dict(r_set, argument_being_watched)
log.debug('Function: {0}, Watched arg: {1}, Response: {2}'.format(str(fun).split(' ')[1],
argument_being_watched,
status))
time.sleep(5)
return True
def get_salt_interface(vm_, opts):
'''
Return the salt_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
salt_host = salt.config.get_cloud_config_value(
'salt_interface', vm_, opts, default=False,
search_global=False
)
if salt_host is False:
salt_host = salt.config.get_cloud_config_value(
'ssh_interface', vm_, opts, default='public_ips',
search_global=False
)
return salt_host
def check_key_path_and_mode(provider, key_path):
'''
Checks that the key_path exists and the key_mode is either 0400 or 0600.
Returns True or False.
.. versionadded:: 2016.3.0
provider
The provider name that the key_path to check belongs to.
key_path
The key_path to ensure that it exists and to check adequate permissions
against.
'''
if not os.path.exists(key_path):
log.error(
'The key file \'{0}\' used in the \'{1}\' provider configuration '
'does not exist.\n'.format(
key_path,
provider
)
)
return False
key_mode = str(oct(stat.S_IMODE(os.stat(key_path).st_mode)))
if key_mode not in ('0400', '0600'):
log.error(
'The key file \'{0}\' used in the \'{1}\' provider configuration '
'needs to be set to mode 0400 or 0600.\n'.format(
key_path,
provider
)
)
return False
return True
| 35.331879 | 121 | 0.544629 |
223a455e0cabadac5fb9cd0778d76f9a06578e07 | 5,969 | py | Python | mava/utils/environments/flatland_utils.py | 1998x-stack/Mava | 2c8f7f59f235340886e92045b6730cf5a542a496 | [
"Apache-2.0"
] | 1 | 2022-03-19T12:51:16.000Z | 2022-03-19T12:51:16.000Z | mava/utils/environments/flatland_utils.py | 1998x-stack/Mava | 2c8f7f59f235340886e92045b6730cf5a542a496 | [
"Apache-2.0"
] | null | null | null | mava/utils/environments/flatland_utils.py | 1998x-stack/Mava | 2c8f7f59f235340886e92045b6730cf5a542a496 | [
"Apache-2.0"
] | null | null | null | # python3
# Copyright 2021 InstaDeep Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for making Flatland environment."""
from typing import Optional
from mava.wrappers.env_preprocess_wrappers import (
ConcatAgentIdToObservation,
ConcatPrevActionToObservation,
)
from mava.wrappers.flatland import FlatlandEnvWrapper
def check_flatland_import() -> bool:
"""Consistent way to check if flatland has been installed.
Returns:
whether flatland exists or not.
"""
try:
from flatland.envs.line_generators import sparse_line_generator
# Delete unused var
del sparse_line_generator
_found_flatland = True
except ModuleNotFoundError:
_found_flatland = False
return _found_flatland
_found_flatland = check_flatland_import()
if _found_flatland:
from flatland.envs.line_generators import sparse_line_generator
from flatland.envs.malfunction_generators import (
MalfunctionParameters,
ParamMalfunctionGen,
)
from flatland.envs.observations import TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
def _create_rail_env_with_tree_obs(
n_agents: int = 5,
x_dim: int = 30,
y_dim: int = 30,
n_cities: int = 2,
max_rails_between_cities: int = 2,
max_rails_in_city: int = 3,
seed: Optional[int] = 0,
malfunction_rate: float = 1 / 200,
malfunction_min_duration: int = 20,
malfunction_max_duration: int = 50,
observation_max_path_depth: int = 30,
observation_tree_depth: int = 2,
) -> RailEnv:
"""Create a Flatland RailEnv with TreeObservation.
Args:
n_agents: Number of trains. Defaults to 5.
x_dim: Width of map. Defaults to 30.
y_dim: Height of map. Defaults to 30.
n_cities: Number of cities. Defaults to 2.
max_rails_between_cities: Max rails between cities. Defaults to 2.
max_rails_in_city: Max rails in cities. Defaults to 3.
seed: Random seed. Defaults to 0.
malfunction_rate: Malfunction rate. Defaults to 1/200.
malfunction_min_duration: Min malfunction duration. Defaults to 20.
malfunction_max_duration: Max malfunction duration. Defaults to 50.
observation_max_path_depth: Shortest path predictor depth. Defaults to 30.
observation_tree_depth: TreeObs depth. Defaults to 2.
Returns:
RailEnv: A Flatland RailEnv.
"""
# Break agents from time to time
malfunction_parameters = MalfunctionParameters(
malfunction_rate=malfunction_rate,
min_duration=malfunction_min_duration,
max_duration=malfunction_max_duration,
)
# Observation builder
predictor = ShortestPathPredictorForRailEnv(observation_max_path_depth)
tree_observation = TreeObsForRailEnv(
max_depth=observation_tree_depth, predictor=predictor
)
rail_env = RailEnv(
width=x_dim,
height=y_dim,
rail_generator=sparse_rail_generator(
max_num_cities=n_cities,
grid_mode=False,
max_rails_between_cities=max_rails_between_cities,
max_rail_pairs_in_city=max_rails_in_city // 2,
),
line_generator=sparse_line_generator(),
number_of_agents=n_agents,
malfunction_generator=ParamMalfunctionGen(malfunction_parameters),
obs_builder_object=tree_observation,
random_seed=seed,
)
return rail_env
def make_environment(
n_agents: int = 10,
x_dim: int = 30,
y_dim: int = 30,
n_cities: int = 2,
max_rails_between_cities: int = 2,
max_rails_in_city: int = 3,
seed: int = 0,
malfunction_rate: float = 1 / 200,
malfunction_min_duration: int = 20,
malfunction_max_duration: int = 50,
observation_max_path_depth: int = 30,
observation_tree_depth: int = 2,
concat_prev_actions: bool = True,
concat_agent_id: bool = False,
evaluation: bool = False,
random_seed: Optional[int] = None,
) -> FlatlandEnvWrapper:
"""Loads a flatand environment and wraps it using the flatland wrapper"""
del evaluation # since it has same behaviour for both train and eval
env = _create_rail_env_with_tree_obs(
n_agents=n_agents,
x_dim=x_dim,
y_dim=y_dim,
n_cities=n_cities,
max_rails_between_cities=max_rails_between_cities,
max_rails_in_city=max_rails_in_city,
seed=random_seed,
malfunction_rate=malfunction_rate,
malfunction_min_duration=malfunction_min_duration,
malfunction_max_duration=malfunction_max_duration,
observation_max_path_depth=observation_max_path_depth,
observation_tree_depth=observation_tree_depth,
)
env = FlatlandEnvWrapper(env)
if concat_prev_actions:
env = ConcatPrevActionToObservation(env)
if concat_agent_id:
env = ConcatAgentIdToObservation(env)
return env
| 35.742515 | 86 | 0.669291 |
8ac543f1d263cb6a6139e6c4eb4cd615d96d3aed | 577 | py | Python | rex_gym/__init__.py | osigaud/rex-gym | cd2a1a333fba7d7e7ee3bf2165979dfda750ddda | [
"Apache-2.0"
] | 1 | 2021-09-11T10:10:28.000Z | 2021-09-11T10:10:28.000Z | rex_gym/__init__.py | osigaud/rex-gym | cd2a1a333fba7d7e7ee3bf2165979dfda750ddda | [
"Apache-2.0"
] | null | null | null | rex_gym/__init__.py | osigaud/rex-gym | cd2a1a333fba7d7e7ee3bf2165979dfda750ddda | [
"Apache-2.0"
] | 1 | 2021-09-11T10:10:32.000Z | 2021-09-11T10:10:32.000Z | from gym.envs.registration import register
register(
id="RexGallop-v0",
entry_point="rex_gym.envs:RexReactiveEnv",
max_episode_steps=400,
)
register(
id="RexPoses-v0",
entry_point="rex_gym.envs:RexPosesEnv",
max_episode_steps=400,
)
register(
id="RexStandup-v0",
entry_point="rex_gym.envs:RexStandupEnv",
max_episode_steps=400,
)
register(
id="RexTurn-v0",
entry_point="rex_gym.envs:RexTurnEnv",
max_episode_steps=400,
)
register(
id="RexWalk-v0",
entry_point="rex_gym.envs:RexWalkEnv",
max_episode_steps=400,
)
| 18.03125 | 46 | 0.70364 |
de07228dd81e4021eb65d1a02db7aeb847cfb004 | 10,379 | py | Python | models/cycle_gan_model.py | ShangxuanWu/CycleGAN-Face-off | 2bbc737cbaa6fa8a1e636db2afc3653e3ecb7341 | [
"BSD-3-Clause"
] | 18 | 2017-12-18T21:53:36.000Z | 2021-09-07T21:53:59.000Z | models/cycle_gan_model.py | ShangxuanWu/CycleGAN-Face-off | 2bbc737cbaa6fa8a1e636db2afc3653e3ecb7341 | [
"BSD-3-Clause"
] | null | null | null | models/cycle_gan_model.py | ShangxuanWu/CycleGAN-Face-off | 2bbc737cbaa6fa8a1e636db2afc3653e3ecb7341 | [
"BSD-3-Clause"
] | 5 | 2018-02-01T06:51:45.000Z | 2020-04-23T12:06:25.000Z | import numpy as np
import torch
import os
from collections import OrderedDict
from torch.autograd import Variable
import itertools
import util.util as util
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import sys, pdb
import ssim
class CycleGANModel(BaseModel):
def name(self):
return 'CycleGANModel'
def initialize(self, opt):
BaseModel.initialize(self, opt)
nb = opt.batchSize
size = opt.fineSize
self.input_A = self.Tensor(nb, opt.input_nc, size, size)
self.input_B = self.Tensor(nb, opt.output_nc, size, size)
# load/define networks
# The naming conversion is different from those used in the paper
# Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc,
opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc,
opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netD_A = networks.define_D(opt.output_nc, opt.ndf,
opt.which_model_netD,
opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf,
opt.which_model_netD,
opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)
if not self.isTrain or opt.continue_train:
which_epoch = opt.which_epoch
self.load_network(self.netG_A, 'G_A', which_epoch)
self.load_network(self.netG_B, 'G_B', which_epoch)
if self.isTrain:
self.load_network(self.netD_A, 'D_A', which_epoch)
self.load_network(self.netD_B, 'D_B', which_epoch)
if self.isTrain:
self.old_lr = opt.lr
self.fake_A_pool = ImagePool(opt.pool_size)
self.fake_B_pool = ImagePool(opt.pool_size)
# define loss functions
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
if opt.with_ssim:
self.criterionSSIM = ssim.SSIM()
# initialize optimizers
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers = []
self.schedulers = []
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D_A)
self.optimizers.append(self.optimizer_D_B)
for optimizer in self.optimizers:
self.schedulers.append(networks.get_scheduler(optimizer, opt))
print('---------- Networks initialized -------------')
networks.print_network(self.netG_A)
networks.print_network(self.netG_B)
if self.isTrain:
networks.print_network(self.netD_A)
networks.print_network(self.netD_B)
print('-----------------------------------------------')
def set_input(self, input):
AtoB = self.opt.which_direction == 'AtoB'
input_A = input['A' if AtoB else 'B']
input_B = input['B' if AtoB else 'A']
self.input_A.resize_(input_A.size()).copy_(input_A)
self.input_B.resize_(input_B.size()).copy_(input_B)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
self.real_A = Variable(self.input_A)
self.real_B = Variable(self.input_B)
def test(self):
self.real_A = Variable(self.input_A, volatile=True)
self.fake_B = self.netG_A.forward(self.real_A)
self.rec_A = self.netG_B.forward(self.fake_B)
self.real_B = Variable(self.input_B, volatile=True)
self.fake_A = self.netG_B.forward(self.real_B)
self.rec_B = self.netG_A.forward(self.fake_A)
# get image paths
def get_image_paths(self):
return self.image_paths
def backward_D_basic(self, netD, real, fake):
# Real
pred_real = netD.forward(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
pred_fake = netD.forward(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss
loss_D = (loss_D_real + loss_D_fake) * 0.5
# backward
loss_D.backward()
return loss_D
def backward_D_A(self):
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
lambda_idt = self.opt.identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed.
self.idt_A = self.netG_A.forward(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed.
self.idt_B = self.netG_B.forward(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss
# D_A(G_A(A))
self.fake_B = self.netG_A.forward(self.real_A)
pred_fake = self.netD_A.forward(self.fake_B)
self.loss_G_A = self.criterionGAN(pred_fake, True)
# D_B(G_B(B))
self.fake_A = self.netG_B.forward(self.real_B)
pred_fake = self.netD_B.forward(self.fake_A)
self.loss_G_B = self.criterionGAN(pred_fake, True)
# Forward cycle loss
self.rec_A = self.netG_B.forward(self.fake_B)
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# Forward SSIM loss
if self.opt.with_ssim:
self.loss_ssim_A = -self.criterionSSIM(self.real_A, self.rec_A)
# Backward cycle loss
self.rec_B = self.netG_A.forward(self.fake_A)
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# Backward SSIM loss
if self.opt.with_ssim:
self.loss_ssim_B = -self.criterionSSIM(self.real_B, self.rec_B)
# combined loss
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B + self.loss_ssim_A + self.loss_ssim_B
else:
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
self.loss_G.backward()
def optimize_parameters(self):
# forward
self.forward()
# G_A and G_B
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
# D_A
self.optimizer_D_A.zero_grad()
self.backward_D_A()
self.optimizer_D_A.step()
# D_B
self.optimizer_D_B.zero_grad()
self.backward_D_B()
self.optimizer_D_B.step()
def get_current_errors(self):
D_A = self.loss_D_A.data[0]
G_A = self.loss_G_A.data[0]
Cyc_A = self.loss_cycle_A.data[0]
if self.opt.with_ssim:
SSIM_A = -self.loss_ssim_A.data[0] or 0
D_B = self.loss_D_B.data[0]
G_B = self.loss_G_B.data[0]
Cyc_B = self.loss_cycle_B.data[0]
if self.opt.with_ssim:
SSIM_B = -self.loss_ssim_B.data[0] or 0
# we didn't use identity loss with SSIM loss at the same time
if self.opt.identity > 0.0:
idt_A = self.loss_idt_A.data[0]
idt_B = self.loss_idt_B.data[0]
return OrderedDict([('D_A', D_A), ('G_A', G_A), ('Cyc_A', Cyc_A), ('idt_A', idt_A),
('D_B', D_B), ('G_B', G_B), ('Cyc_B', Cyc_B), ('idt_B', idt_B)])
else:
if self.opt.with_ssim:
return OrderedDict([('D_A', D_A), ('G_A', G_A), ('Cyc_A', Cyc_A), ('SSIM_A', SSIM_A),
('D_B', D_B), ('G_B', G_B), ('Cyc_B', Cyc_B), ('SSIM_B', SSIM_B)])
else:
return OrderedDict([('D_A', D_A), ('G_A', G_A), ('Cyc_A', Cyc_A),
('D_B', D_B), ('G_B', G_B), ('Cyc_B', Cyc_B)])
def get_current_visuals(self):
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
rec_A = util.tensor2im(self.rec_A.data)
real_B = util.tensor2im(self.real_B.data)
fake_A = util.tensor2im(self.fake_A.data)
rec_B = util.tensor2im(self.rec_B.data)
if self.opt.isTrain and self.opt.identity > 0.0:
idt_A = util.tensor2im(self.idt_A.data)
idt_B = util.tensor2im(self.idt_B.data)
return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A), ('idt_B', idt_B),
('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B), ('idt_A', idt_A)])
else:
return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A),
('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B)])
def save(self, label):
self.save_network(self.netG_A, 'G_A', label, self.gpu_ids)
self.save_network(self.netD_A, 'D_A', label, self.gpu_ids)
self.save_network(self.netG_B, 'G_B', label, self.gpu_ids)
self.save_network(self.netD_B, 'D_B', label, self.gpu_ids)
| 44.354701 | 169 | 0.596878 |
181ee0ea246039bcfaafb67cff8de7c001c891b7 | 383 | py | Python | example/asgi.py | PNoryk/django-filter-grouped | 39a096a65121aefe989700896cd348ebcd7216c0 | [
"BSD-3-Clause"
] | 3 | 2021-10-14T05:59:32.000Z | 2022-01-20T09:32:13.000Z | example/asgi.py | PNoryk/django-filter-groups | 39a096a65121aefe989700896cd348ebcd7216c0 | [
"BSD-3-Clause"
] | null | null | null | example/asgi.py | PNoryk/django-filter-groups | 39a096a65121aefe989700896cd348ebcd7216c0 | [
"BSD-3-Clause"
] | null | null | null | """
ASGI config for example project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
application = get_asgi_application()
| 22.529412 | 78 | 0.78329 |
19ff43687a4d4ff0d3784ee8866aae8432ab4ff4 | 7,566 | py | Python | host/greatfet/commands/greatfet_firmware.py | colinoflynn/greatfet | ee81a2101b21f69052240698ebd4da83f6126f10 | [
"BSD-3-Clause"
] | null | null | null | host/greatfet/commands/greatfet_firmware.py | colinoflynn/greatfet | ee81a2101b21f69052240698ebd4da83f6126f10 | [
"BSD-3-Clause"
] | null | null | null | host/greatfet/commands/greatfet_firmware.py | colinoflynn/greatfet | ee81a2101b21f69052240698ebd4da83f6126f10 | [
"BSD-3-Clause"
] | 1 | 2021-05-19T17:20:41.000Z | 2021-05-19T17:20:41.000Z | #!/usr/bin/env python
#
# This file is part of GreatFET
#
"""
Utility for flashing firmware on GreatFET boards.
"""
from __future__ import print_function
import os
import sys
import errno
import subprocess
import usb
from greatfet import GreatFET
from greatfet.errors import DeviceNotFoundError
from greatfet.utils import log_silent, GreatFETArgumentParser
# The serial number expected from the DFU flash stub.
DFU_STUB_NAME = 'flash_stub.dfu'
DFU_STUB_PATHS = [ '~/.local/share/greatfet', '~/.local/share/GreatFET' ]
# Vendor VID/PID if the device is in DFU.
NXP_DFU_VID = 0x1fc9
NXP_DFU_PID = 0x000c
# Maximum length to allow, for now.
MAX_FLASH_LENGTH = 0x100000
def spi_flash_read(device, filename, address, length, log_function=log_silent):
"""Reads the data from the device's SPI flash to a file. """
def print_progress(bytes_read, bytes_total):
log_function("Read {} bytes of {}.".format(bytes_read, bytes_total), end='\r')
# Read the data from the board's SPI flash to a file.
with open(filename, 'wb') as f:
flash_data = device.onboard_flash.read(address, length,
progress_callback=print_progress)
flash_data.tofile(f)
log_function('')
def spi_flash_write(device, filename, address, log_function=log_silent):
"""Writes the data from a given file to the SPI flash."""
def print_progress(bytes_written, bytes_total):
log_function("Written {} bytes of {}.".format(bytes_written, bytes_total), end='\r')
# Read the data from the board's SPI flash to a file.
with open(filename, 'rb') as f:
flash_data = f.read()
device.onboard_flash.write(flash_data, address,
erase_first=True,
progress_callback=print_progress)
log_function('')
def find_dfu_stub(args):
""" Finds the DFU stub. """
# FIXME: This should be cleaned up to search paths that make sense given
# where and how we might install GreatFET.
# If we have an explicit DFU stub location, use it.
if args.dfu_stub:
path = os.path.expanduser(args.dfu_stub)
if os.path.isfile(path):
return path
# Otherwise, search each of the paths around.
for path in DFU_STUB_PATHS:
filename = os.path.expanduser(os.path.join(path, DFU_STUB_NAME))
print(filename)
if os.path.isfile(filename):
return filename
# If we weren't able to find it, give up, for now.
# TODO: eventually ship this with the GreatFET distribution and/or
# download it on demand?
return None
def load_dfu_stub(args):
""" Loads a DFU programming stub onto a GreatFET in DFU mode. """
# First: check to make sure we _have_ a DFU'able device.
dev = usb.core.find(idVendor=NXP_DFU_VID, idProduct=NXP_DFU_PID)
if not dev:
raise DeviceNotFoundError
del dev
# If we have a DFU'able device, find the DFU stub and load it.
stub_path = find_dfu_stub(args)
if stub_path is None:
raise ValueError("Could not find the DFU stub!")
#
# FIXME: This isn't a good way to do things. It's being stubbed in
# for now, but it'd be better to talk DFU from python directly.
#
rc = subprocess.call(['dfu-util', '--device', format(NXP_DFU_VID, 'x'), format(NXP_DFU_PID, 'x'), '--alt', '0', '--download', stub_path])
if rc:
raise IOError("Error using DFU-util!")
def find_greatfet(args):
""" Finds a GreatFET matching the relevant arguments."""
# If we're prorgamming via DFU mode, look for a device that sports the DFU stub.
# Note that we only support a single DFU-mode device for now.
if args.dfu:
return GreatFET(serial_number=DFU_STUB_SERIAL)
# If we have an index argument, grab _all_ greatFETs and select by index.
elif args.index:
# Find _all_ GreatFETs...
devices = GreatFET(find_all=True)
# ... and then select the one with the provided index.
if len(devices) <= args.index:
raise DeviceNotFoundError
return devices[args.index]
# If we have a serial number, look only for a single device. Theoretically,
# we should never have more than one GreatFET with the same serial number.
# Technically, this is violable, but libusb doesn't properly handle searching
# by serial number if there are multiple devices with the same one, so we
# enforce this.
else:
return GreatFET(serial_number=args.serial)
def main():
# Set up a simple argument parser.
parser = GreatFETArgumentParser(dfu=True,
description="Utility for flashing firmware on GreatFET boards")
parser.add_argument('-a', '--address', metavar='<n>', type=int,
help="starting address (default: 0)", default=0)
parser.add_argument('-l', '--length', metavar='<n>', type=int,
help="number of bytes to read (default: {})".format(MAX_FLASH_LENGTH),
default=MAX_FLASH_LENGTH)
parser.add_argument('-r', '--read', dest='read', metavar='<filename>', type=str,
help="Read data into file", default='')
parser.add_argument('-w', '--write', dest='write', metavar='<filename>', type=str,
help="Write data from file", default='')
parser.add_argument('-R', '--reset', dest='reset', action='store_true',
help="Reset GreatFET after performing other operations.")
args = parser.parse_args()
# Validate our options.
# If we don't have an option, print our usage.
if not any((args.read, args.write, args.reset,)):
parser.print_help()
sys.exit(0)
# Determine whether we're going to log to the stdout, or not at all.
log_function = parser.get_log_function()
# If we're supposed to install firmware via a DFU stub, install it first.
if args.dfu:
try:
load_dfu_stub(args)
except DeviceNotFoundError:
print("Couldn't find a GreatFET-compatible board in DFU mode!", file=sys.stderr)
sys.exit(errno.ENODEV)
# Create our GreatFET connection.
log_function("Trying to find a GreatFET device...")
device = parser.find_specified_device()
log_function("{} found. (Serial number: {})".format(device.board_name(), device.serial_number()))
# Ensure that the device supports an onboard SPI flash.
try:
device.onboard_flash
except AttributeError:
print("The attached GreatFET ({}) doesn't appear to have an SPI flash to program!".format(device.board_name()), file=sys.stderr)
sys.exit(errno.ENOSYS)
# If we have a write command, write first, to match the behavior of hackrf_spiflash.
if args.write:
log_function("Writing data to SPI flash...")
spi_flash_write(device, args.write, args.address, log_function)
log_function("Write complete!")
if not (args.reset or args.dfu):
log_function("Reset not specified; new firmware will not start until next reset.")
# Handle any read commands.
if args.read:
log_function("Reading data from SPI flash...")
spi_flash_read(device, args.read, args.address, args.length, log_function)
log_function("Read complete!")
# Finally, reset the target
if args.reset or args.dfu:
log_function("Resetting GreatFET...")
device.reset(reconnect=False)
log_function("Reset complete!")
if __name__ == '__main__':
main()
| 35.521127 | 141 | 0.655432 |
c1c2f89d55f46c049bbfae0f5766bbcfe5990e93 | 2,752 | py | Python | facecruncher/src/watcher.py | siavash9000/famousfaces | 672c2fe6c6c8406622614a34f1def2c6b08e7a3c | [
"MIT"
] | null | null | null | facecruncher/src/watcher.py | siavash9000/famousfaces | 672c2fe6c6c8406622614a34f1def2c6b08e7a3c | [
"MIT"
] | 12 | 2020-07-18T01:17:51.000Z | 2022-02-18T07:55:47.000Z | facecruncher/src/watcher.py | siavash9000/famousfaces | 672c2fe6c6c8406622614a34f1def2c6b08e7a3c | [
"MIT"
] | null | null | null | import os
import json
from embedd_face import FaceEmbedder
from celebrity_nn import CelebrityTree
import psycopg2
import logging
import pika
import base64
submissions_folder = '/submissions/'
results_folder = '/results/'
def sorted_dir(folder):
def getmtime(name):
path = os.path.join(folder, name)
return os.path.getmtime(path)
files = os.listdir(folder)
filtered = []
for f in files:
if f.endswith(".jpg") or f.endswith(".jpeg"):
filtered.append(f)
return sorted(filtered, key=getmtime, reverse=False)
def process_submissions(faceEmbedder, celebrityTree):
files = sorted_dir(submissions_folder)
for file in files:
file_path = os.path.join(submissions_folder, file)
embedding = faceEmbedder.embedd_face(file_path)
result = celebrityTree.face_analysis(embedding)
result_filename = file.split('.')[0] + '.json'
with open(os.path.join(results_folder, result_filename), 'w') as fp:
json.dump(result, fp)
os.remove(file_path)
class Cruncher(object):
def __init__(self, faceEmbedder, celebTree):
self.faceEmbedder = faceEmbedder
self.celebTree = celebTree
def process_message(self, ch, method, properties, body):
body_json = json.loads(body)
image = body_json['base64_image']
image = base64.b64decode(image)
embedding = faceEmbedder.embedd_face(image)
result = self.celebTree.face_analysis(embedding)
connection = psycopg2.connect(dbname='postgres',
user='postgres',
host='postgresql',
password='postgres',
port=5432)
connection.autocommit = True
cursor = connection.cursor()
sql = "INSERT INTO face_analysis.results (uuid, nearest_faces) VALUES(%s, %s)"
cursor.execute(sql, (body_json['image_id'], json.dumps(result)))
cursor.close()
connection.close()
ch.basic_ack(delivery_tag=method.delivery_tag)
if __name__ == "__main__":
logging.warning("starting watcher process")
faceEmbedder = FaceEmbedder()
celebTree = CelebrityTree()
cruncher = Cruncher(faceEmbedder=faceEmbedder, celebTree=celebTree)
creds = pika.PlainCredentials("rabbit", "rabbit")
connection = pika.BlockingConnection(pika.ConnectionParameters(host='rabbitmq', credentials=creds))
channel = connection.channel()
channel.queue_declare(queue='facecrunch_queue', durable=True)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue='facecrunch_queue', on_message_callback=cruncher.process_message)
channel.start_consuming()
| 35.74026 | 103 | 0.662427 |
d988704201cfb281528b3f80bc61e8ca1db30fa8 | 943 | py | Python | todolistApp/urls.py | markawad/todolistApp | 1cde6c2752ad27701c5c7368ce9009ce10aacd67 | [
"MIT"
] | null | null | null | todolistApp/urls.py | markawad/todolistApp | 1cde6c2752ad27701c5c7368ce9009ce10aacd67 | [
"MIT"
] | 2 | 2020-03-27T23:27:43.000Z | 2021-06-10T22:42:00.000Z | todolistApp/urls.py | markawad/todolistApp | 1cde6c2752ad27701c5c7368ce9009ce10aacd67 | [
"MIT"
] | null | null | null | """todolistApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from lists import views as list_views
from lists import urls as list_urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', list_views.home_page, name='home'),
url(r'^lists/', include(list_urls)),
]
| 36.269231 | 79 | 0.707317 |
0dffeae9bb2098af1530d1ecb0c261b74d8e68e0 | 288 | py | Python | unittest/unittest_notequal.py | dineshkumar2509/learning-python | e8af11ff0b396da4c3f2cfe21d14131bae4b2adb | [
"MIT"
] | 86 | 2015-06-13T16:53:55.000Z | 2022-03-24T20:56:42.000Z | unittest/unittest_notequal.py | pei-zheng-yi/learning-python | 55e350dfe44cf04f7d4408e76e72d2f467bd42ce | [
"MIT"
] | 9 | 2015-05-27T07:52:44.000Z | 2022-03-29T21:52:40.000Z | unittest/unittest_notequal.py | pei-zheng-yi/learning-python | 55e350dfe44cf04f7d4408e76e72d2f467bd42ce | [
"MIT"
] | 124 | 2015-12-10T01:17:18.000Z | 2021-11-08T04:03:38.000Z | #!/usr/bin/env python
"""Test for inequality
"""
import unittest
class InequalityTest(unittest.TestCase):
def testEqual(self):
self.failIfEqual(1, 3 - 2)
def testNotEqual(self):
self.failUnlessEqual(2, 3 - 2)
if __name__ == '__main__':
unittest.main()
| 14.4 | 40 | 0.645833 |
548207d3f401189045895440d1795599a167d782 | 1,334 | py | Python | build/lib/accessible_output2/platform_utils/libloader.py | zywek123/accessible_output2 | 825688d5ee6b4680ca9b685d914f9e289f8bde7e | [
"MIT"
] | 5 | 2017-05-06T18:23:26.000Z | 2021-11-29T05:20:10.000Z | build/lib/accessible_output2/platform_utils/libloader.py | zywek123/accessible_output2 | 825688d5ee6b4680ca9b685d914f9e289f8bde7e | [
"MIT"
] | 2 | 2019-11-26T16:56:09.000Z | 2020-11-04T15:08:14.000Z | build/lib/accessible_output2/platform_utils/libloader.py | zywek123/accessible_output2 | 825688d5ee6b4680ca9b685d914f9e289f8bde7e | [
"MIT"
] | 4 | 2019-09-19T11:48:11.000Z | 2021-03-25T18:34:45.000Z | import ctypes
import collections
import platform
import os
TYPES = {
'Linux': {
'loader': ctypes.CDLL,
'functype': ctypes.CFUNCTYPE,
'prefix': 'lib',
'extension': '.so'
},
'Darwin': {
'loader': ctypes.CDLL,
'functype': ctypes.CFUNCTYPE,
'prefix': 'lib',
'extension': '.dylib'
},
}
if platform.system() == 'Windows':
TYPES['Windows'] = {
'loader': ctypes.WinDLL,
'functype': ctypes.WINFUNCTYPE,
'prefix': "",
'extension': '.dll'
}
class LibraryLoadError(Exception): pass
def load_library(library, x86_path='.', x64_path='.', *args, **kwargs):
lib = find_library_path(library, x86_path=x86_path, x64_path=x64_path)
loaded = _do_load(lib, *args, **kwargs)
if loaded is not None:
return loaded
raise LibraryLoadError('unable to load %r. Provided library path: %r' % (library, path))
def _do_load(file, *args, **kwargs):
loader = TYPES[platform.system()]['loader']
return loader(file, *args, **kwargs)
def find_library_path(libname, x86_path='.', x64_path='.'):
libname = '%s%s' % (TYPES[platform.system()]['prefix'], libname)
if platform.machine() == 'x86_64':
path = os.path.join(x64_path, libname)
else:
path = os.path.join(x86_path, libname)
ext = TYPES[platform.system()]['extension']
return '%s%s' % (path, ext)
def get_functype():
return TYPES[platform.system()]['functype']
| 25.653846 | 89 | 0.667916 |
231c9e891a207654dfe0cd5cc16faa1cf46cc87a | 1,449 | py | Python | py-polars/polars/utils.py | muyixi315/polars | 27173e78d471f4fa96d175d38a4f010076fb4c1f | [
"MIT"
] | null | null | null | py-polars/polars/utils.py | muyixi315/polars | 27173e78d471f4fa96d175d38a4f010076fb4c1f | [
"MIT"
] | null | null | null | py-polars/polars/utils.py | muyixi315/polars | 27173e78d471f4fa96d175d38a4f010076fb4c1f | [
"MIT"
] | null | null | null | import pyarrow as pa
def coerce_arrow(array: "pa.Array") -> "pa.Array":
if array.type == pa.timestamp("s"):
array = pa.compute.cast(
pa.compute.multiply(pa.compute.cast(array, pa.int64()), 1000),
pa.date64(),
)
elif array.type == pa.timestamp("ms"):
array = pa.compute.cast(pa.compute.cast(array, pa.int64()), pa.date64())
elif array.type == pa.timestamp("us"):
array = pa.compute.cast(
pa.compute.divide(pa.compute.cast(array, pa.int64()), 1000),
pa.date64(),
)
elif array.type == pa.timestamp("ns"):
array = pa.compute.cast(
pa.compute.divide(pa.compute.cast(array, pa.int64()), 1000000),
pa.date64(),
)
# note: Decimal256 could not be cast to float
elif isinstance(array.type, pa.Decimal128Type):
array = pa.compute.cast(array, pa.float64())
# simplest solution is to cast to (large)-string arrays
# this is copy and expensive
elif isinstance(array, pa.DictionaryArray):
if array.dictionary.type == pa.string():
array = pa.compute.cast(pa.compute.cast(array, pa.utf8()), pa.large_utf8())
else:
raise ValueError(
"polars does not support dictionary encoded types other than strings"
)
if hasattr(array, "num_chunks") and array.num_chunks > 1:
array = array.combine_chunks()
return array
| 37.153846 | 87 | 0.595583 |
169d38da3732fa1ba03b78a0503ada6724ed78ba | 1,196 | py | Python | tech_project/lib/python2.7/site-packages/phonenumbers/data/region_PG.py | priyamshah112/Project-Descripton-Blog | 8e01016c6be79776c4f5ca75563fa3daa839e39e | [
"MIT"
] | 27 | 2019-11-18T05:06:01.000Z | 2021-02-28T19:38:09.000Z | tech_project/lib/python2.7/site-packages/phonenumbers/data/region_PG.py | priyamshah112/Project-Descripton-Blog | 8e01016c6be79776c4f5ca75563fa3daa839e39e | [
"MIT"
] | 21 | 2020-12-29T21:29:31.000Z | 2022-03-12T00:53:57.000Z | tech_project/lib/python2.7/site-packages/phonenumbers/data/region_PG.py | priyamshah112/Project-Descripton-Blog | 8e01016c6be79776c4f5ca75563fa3daa839e39e | [
"MIT"
] | 6 | 2020-01-09T21:55:38.000Z | 2021-09-17T01:22:48.000Z | """Auto-generated file, do not edit by hand. PG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_PG = PhoneMetadata(id='PG', country_code=675, international_prefix='00|140[1-3]',
general_desc=PhoneNumberDesc(national_number_pattern='(?:180|[78]\\d{3})\\d{4}|(?:[2-589]\\d|64)\\d{5}', possible_length=(7, 8)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:64[1-9]|7730|85[02-46-9])\\d{4}|(?:3[0-2]|4[257]|5[34]|77[0-24]|9[78])\\d{5}', example_number='3123456', possible_length=(7, 8)),
mobile=PhoneNumberDesc(national_number_pattern='775\\d{5}|(?:7[0-689]|81)\\d{6}', example_number='70123456', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='180\\d{4}', example_number='1801234', possible_length=(7,)),
voip=PhoneNumberDesc(national_number_pattern='2(?:0[0-47]|7[568])\\d{4}', example_number='2751234', possible_length=(7,)),
preferred_international_prefix='00',
number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['18|[2-69]|85']),
NumberFormat(pattern='(\\d{4})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[78]'])])
| 92 | 188 | 0.688963 |
90c5a58fdcca0404703c979f3284ee5c24ff6fbf | 4,178 | py | Python | reconcile/test/test_ocm_additional_routers.py | bwplotka/qontract-reconcile | 08fa2472edba72bb6aa8d6264579506f0bf2a830 | [
"Apache-2.0"
] | null | null | null | reconcile/test/test_ocm_additional_routers.py | bwplotka/qontract-reconcile | 08fa2472edba72bb6aa8d6264579506f0bf2a830 | [
"Apache-2.0"
] | null | null | null | reconcile/test/test_ocm_additional_routers.py | bwplotka/qontract-reconcile | 08fa2472edba72bb6aa8d6264579506f0bf2a830 | [
"Apache-2.0"
] | null | null | null | from unittest.mock import patch, call
from unittest import TestCase
from reconcile.utils.ocm import OCMMap
import reconcile.queries as queries
import reconcile.ocm_additional_routers as integ
from .fixtures import Fixtures
fxt = Fixtures('ocm_additional_routers')
class TestOCMAdditionalRouters(TestCase):
# integration test
@patch.object(queries, 'get_clusters')
def test_integ_fail(self, get_clusters):
fixture = fxt.get_anymarkup('state.yml')
clusters = fixture['gql_response']
for c in clusters:
c.pop('additionalRouters')
get_clusters.return_value = clusters
with self.assertRaises(SystemExit):
integ.run(False)
@patch.object(queries, 'get_app_interface_settings')
@patch.object(queries, 'get_clusters')
@patch.object(OCMMap, 'init_ocm_client')
@patch.object(OCMMap, 'get')
def test_integ(self, get, init_ocm_client, get_clusters,
get_app_interface_settings):
fixture = fxt.get_anymarkup('state.yml')
get_clusters.return_value = fixture['gql_response']
ocm = get.return_value
ocm.get_additional_routers.side_effect = \
lambda x: fixture['ocm_api'][x]
integ.run(False)
ocm_act = fixture['ocm_act']
router_create = ocm_act['create']
expected = []
for c in router_create.keys():
expected.append(call(c, router_create[c]))
calls = ocm.create_additional_router.call_args_list
self.assertEqual(calls, expected)
router_delete = ocm_act['delete']
expected = []
for c in router_delete.keys():
expected.append(call(c, router_delete[c]))
calls = ocm.delete_additional_router.call_args_list
self.assertEqual(calls, expected)
# unit test
@patch.object(queries, 'get_app_interface_settings')
@patch.object(OCMMap, 'init_ocm_client')
@patch.object(OCMMap, 'get')
def test_current_state(self, get, init_ocm_client,
get_app_interface_settings):
fixture = fxt.get_anymarkup('state.yml')
ocm_api = fixture['ocm_api']
clusters = []
for c in ocm_api.keys():
clusters.append({'name': c})
ocm = get.return_value
ocm.get_additional_routers.side_effect = \
lambda x: fixture['ocm_api'][x]
_, current_state = integ.fetch_current_state(clusters)
expected = fixture['current_state']
self.assertEqual(current_state, expected)
def test_desired_state(self):
fixture = fxt.get_anymarkup('state.yml')
gql_response = fixture['gql_response']
desired_state = integ.fetch_desired_state(gql_response)
expected = fixture['desired_state']
self.assertEqual(desired_state, expected)
def test_diffs(self):
fixture = fxt.get_anymarkup('state.yml')
current_state = fixture['current_state']
desired_state = fixture['desired_state']
diffs = integ.calculate_diff(current_state, desired_state)
expected = fixture['diffs']
self.assertEqual(diffs, expected)
@patch.object(OCMMap, 'init_ocm_client')
@patch.object(OCMMap, 'get')
def test_act(self, get, init_ocm_client):
fixture = fxt.get_anymarkup('state.yml')
ocm_api = fixture['ocm_api']
clusters = []
for c in ocm_api.keys():
clusters.append({'name': c})
ocm = get.return_value
ocm_map = OCMMap(fixture['gql_response'])
diffs = fixture['diffs']
integ.act(False, diffs, ocm_map)
ocm_act = fixture['ocm_act']
router_create = ocm_act['create']
expected = []
for c in router_create.keys():
expected.append(call(c, router_create[c]))
calls = ocm.create_additional_router.call_args_list
self.assertEqual(calls, expected)
router_delete = ocm_act['delete']
expected = []
for c in router_delete.keys():
expected.append(call(c, router_delete[c]))
calls = ocm.delete_additional_router.call_args_list
self.assertEqual(calls, expected)
| 32.387597 | 66 | 0.648157 |
3da1b0d23c856d32a545a56f860179ca8be90137 | 1,591 | py | Python | test/vanilla/Expected/AcceptanceTests/BodyBoolean/bodyboolean/_auto_rest_bool_test_service.py | fearthecowboy/autorest.python | a251e361218598b55b0621db2275aafcb7158a5c | [
"MIT"
] | null | null | null | test/vanilla/Expected/AcceptanceTests/BodyBoolean/bodyboolean/_auto_rest_bool_test_service.py | fearthecowboy/autorest.python | a251e361218598b55b0621db2275aafcb7158a5c | [
"MIT"
] | null | null | null | test/vanilla/Expected/AcceptanceTests/BodyBoolean/bodyboolean/_auto_rest_bool_test_service.py | fearthecowboy/autorest.python | a251e361218598b55b0621db2275aafcb7158a5c | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import AutoRestBoolTestServiceConfiguration
from .operations import BoolModelOperations
from . import models
class AutoRestBoolTestService(SDKClient):
"""Test Infrastructure for AutoRest
:ivar config: Configuration for client.
:vartype config: AutoRestBoolTestServiceConfiguration
:ivar bool_model: BoolModel operations
:vartype bool_model: bodyboolean.operations.BoolModelOperations
:param str base_url: Service URL
"""
def __init__(
self, base_url=None):
self.config = AutoRestBoolTestServiceConfiguration(base_url)
super(AutoRestBoolTestService, self).__init__(None, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '1.0.0'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.bool_model = BoolModelOperations(
self._client, self.config, self._serialize, self._deserialize)
| 35.355556 | 89 | 0.675047 |
1dde70ebb4b1b64d42b245f6e13b905a3abdb7dc | 41,711 | py | Python | pandas/tests/groupby/transform/test_transform.py | ChrisAlbertsen/pandas | da0156a302f3f7c25bfee0b410bb23865b76f3dd | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/groupby/transform/test_transform.py | ChrisAlbertsen/pandas | da0156a302f3f7c25bfee0b410bb23865b76f3dd | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/groupby/transform/test_transform.py | ChrisAlbertsen/pandas | da0156a302f3f7c25bfee0b410bb23865b76f3dd | [
"BSD-3-Clause"
] | null | null | null | """ test with the .transform """
from io import StringIO
import numpy as np
import pytest
from pandas.core.dtypes.common import (
ensure_platform_int,
is_timedelta64_dtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.groupby.base import maybe_normalize_deprecated_kernels
from pandas.core.groupby.generic import (
DataFrameGroupBy,
SeriesGroupBy,
)
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def test_transform():
data = Series(np.arange(9) // 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
# GH 8046
# make sure that we preserve the input order
df = DataFrame(
np.arange(6, dtype="int64").reshape(3, 2), columns=["a", "b"], index=[0, 2, 1]
)
key = [0, 0, 1]
expected = (
df.sort_index()
.groupby(key)
.transform(lambda x: x - x.mean())
.groupby(key)
.mean()
)
result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(key).mean()
tm.assert_frame_equal(result, expected)
def demean(arr):
return arr - arr.mean()
people = DataFrame(
np.random.randn(5, 5),
columns=["a", "b", "c", "d", "e"],
index=["Joe", "Steve", "Wes", "Jim", "Travis"],
)
key = ["one", "two", "one", "two", "one"]
result = people.groupby(key).transform(demean).groupby(key).mean()
expected = people.groupby(key).apply(demean).groupby(key).mean()
tm.assert_frame_equal(result, expected)
# GH 8430
df = tm.makeTimeDataFrame()
g = df.groupby(pd.Grouper(freq="M"))
g.transform(lambda x: x - 1)
# GH 9700
df = DataFrame({"a": range(5, 10), "b": range(5)})
result = df.groupby("a").transform(max)
expected = DataFrame({"b": range(5)})
tm.assert_frame_equal(result, expected)
def test_transform_fast():
df = DataFrame({"id": np.arange(100000) / 3, "val": np.random.randn(100000)})
grp = df.groupby("id")["val"]
values = np.repeat(grp.mean().values, ensure_platform_int(grp.count().values))
expected = Series(values, index=df.index, name="val")
result = grp.transform(np.mean)
tm.assert_series_equal(result, expected)
result = grp.transform("mean")
tm.assert_series_equal(result, expected)
# GH 12737
df = DataFrame(
{
"grouping": [0, 1, 1, 3],
"f": [1.1, 2.1, 3.1, 4.5],
"d": date_range("2014-1-1", "2014-1-4"),
"i": [1, 2, 3, 4],
},
columns=["grouping", "f", "i", "d"],
)
result = df.groupby("grouping").transform("first")
dates = [
Timestamp("2014-1-1"),
Timestamp("2014-1-2"),
Timestamp("2014-1-2"),
Timestamp("2014-1-4"),
]
expected = DataFrame(
{"f": [1.1, 2.1, 2.1, 4.5], "d": dates, "i": [1, 2, 2, 4]},
columns=["f", "i", "d"],
)
tm.assert_frame_equal(result, expected)
# selection
result = df.groupby("grouping")[["f", "i"]].transform("first")
expected = expected[["f", "i"]]
tm.assert_frame_equal(result, expected)
# dup columns
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["g", "a", "a"])
result = df.groupby("g").transform("first")
expected = df.drop("g", axis=1)
tm.assert_frame_equal(result, expected)
def test_transform_broadcast(tsframe, ts):
grouped = ts.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, ts.index)
for _, gp in grouped:
assert_fp_equal(result.reindex(gp.index), gp.mean())
grouped = tsframe.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
for _, gp in grouped:
agged = gp.mean()
res = result.reindex(gp.index)
for col in tsframe:
assert_fp_equal(res[col], agged[col])
# group columns
grouped = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
tm.assert_index_equal(result.columns, tsframe.columns)
for _, gp in grouped:
agged = gp.mean(1)
res = result.reindex(columns=gp.columns)
for idx in gp.index:
assert_fp_equal(res.xs(idx), agged[idx])
def test_transform_axis_1(request, transformation_func):
# GH 36308
# TODO(2.0) Remove after pad/backfill deprecation enforced
transformation_func = maybe_normalize_deprecated_kernels(transformation_func)
warn = None
if transformation_func == "tshift":
warn = FutureWarning
request.node.add_marker(pytest.mark.xfail(reason="tshift is deprecated"))
args = ("ffill",) if transformation_func == "fillna" else ()
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}, index=["x", "y"])
with tm.assert_produces_warning(warn):
result = df.groupby([0, 0, 1], axis=1).transform(transformation_func, *args)
expected = df.T.groupby([0, 0, 1]).transform(transformation_func, *args).T
if transformation_func in ["diff", "shift"]:
# Result contains nans, so transpose coerces to float
expected["b"] = expected["b"].astype("int64")
# cumcount returns Series; the rest are DataFrame
tm.assert_equal(result, expected)
def test_transform_axis_ts(tsframe):
# make sure that we are setting the axes
# correctly when on axis=0 or 1
# in the presence of a non-monotonic indexer
# GH12713
base = tsframe.iloc[0:5]
r = len(base.index)
c = len(base.columns)
tso = DataFrame(
np.random.randn(r, c), index=base.index, columns=base.columns, dtype="float64"
)
# monotonic
ts = tso
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform("mean")
expected = grouped.apply(lambda x: x - x.mean())
tm.assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform("mean")
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
tm.assert_frame_equal(result, expected)
# non-monotonic
ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform("mean")
expected = grouped.apply(lambda x: x - x.mean())
tm.assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform("mean")
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
tm.assert_frame_equal(result, expected)
def test_transform_dtype():
# GH 9807
# Check transform dtype output is preserved
df = DataFrame([[1, 3], [2, 3]])
result = df.groupby(1).transform("mean")
expected = DataFrame([[1.5], [1.5]])
tm.assert_frame_equal(result, expected)
def test_transform_bug():
# GH 5712
# transforming on a datetime column
df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)})
result = df.groupby("A")["B"].transform(lambda x: x.rank(ascending=False))
expected = Series(np.arange(5, 0, step=-1), name="B", dtype="float64")
tm.assert_series_equal(result, expected)
def test_transform_numeric_to_boolean():
# GH 16875
# inconsistency in transforming boolean values
expected = Series([True, True], name="A")
df = DataFrame({"A": [1.1, 2.2], "B": [1, 2]})
result = df.groupby("B").A.transform(lambda x: True)
tm.assert_series_equal(result, expected)
df = DataFrame({"A": [1, 2], "B": [1, 2]})
result = df.groupby("B").A.transform(lambda x: True)
tm.assert_series_equal(result, expected)
def test_transform_datetime_to_timedelta():
# GH 15429
# transforming a datetime to timedelta
df = DataFrame({"A": Timestamp("20130101"), "B": np.arange(5)})
expected = Series([Timestamp("20130101") - Timestamp("20130101")] * 5, name="A")
# this does date math without changing result type in transform
base_time = df["A"][0]
result = (
df.groupby("A")["A"].transform(lambda x: x.max() - x.min() + base_time)
- base_time
)
tm.assert_series_equal(result, expected)
# this does date math and causes the transform to return timedelta
result = df.groupby("A")["A"].transform(lambda x: x.max() - x.min())
tm.assert_series_equal(result, expected)
def test_transform_datetime_to_numeric():
# GH 10972
# convert dt to float
df = DataFrame({"a": 1, "b": date_range("2015-01-01", periods=2, freq="D")})
result = df.groupby("a").b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.mean()
)
expected = Series([-0.5, 0.5], name="b")
tm.assert_series_equal(result, expected)
# convert dt to int
df = DataFrame({"a": 1, "b": date_range("2015-01-01", periods=2, freq="D")})
result = df.groupby("a").b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.min()
)
expected = Series([0, 1], name="b")
tm.assert_series_equal(result, expected)
def test_transform_casting():
# 13046
data = """
idx A ID3 DATETIME
0 B-028 b76cd912ff "2014-10-08 13:43:27"
1 B-054 4a57ed0b02 "2014-10-08 14:26:19"
2 B-076 1a682034f8 "2014-10-08 14:29:01"
3 B-023 b76cd912ff "2014-10-08 18:39:34"
4 B-023 f88g8d7sds "2014-10-08 18:40:18"
5 B-033 b76cd912ff "2014-10-08 18:44:30"
6 B-032 b76cd912ff "2014-10-08 18:46:00"
7 B-037 b76cd912ff "2014-10-08 18:52:15"
8 B-046 db959faf02 "2014-10-08 18:59:59"
9 B-053 b76cd912ff "2014-10-08 19:17:48"
10 B-065 b76cd912ff "2014-10-08 19:21:38"
"""
df = pd.read_csv(
StringIO(data), sep=r"\s+", index_col=[0], parse_dates=["DATETIME"]
)
result = df.groupby("ID3")["DATETIME"].transform(lambda x: x.diff())
assert is_timedelta64_dtype(result.dtype)
result = df[["ID3", "DATETIME"]].groupby("ID3").transform(lambda x: x.diff())
assert is_timedelta64_dtype(result.DATETIME.dtype)
def test_transform_multiple(ts):
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
grouped.transform(lambda x: x * 2)
grouped.transform(np.mean)
def test_dispatch_transform(tsframe):
df = tsframe[::5].reindex(tsframe.index)
grouped = df.groupby(lambda x: x.month)
filled = grouped.fillna(method="pad")
fillit = lambda x: x.fillna(method="pad")
expected = df.groupby(lambda x: x.month).transform(fillit)
tm.assert_frame_equal(filled, expected)
def test_transform_transformation_func(request, transformation_func):
# GH 30918
df = DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "bar", "bar", "baz"],
"B": [1, 2, np.nan, 3, 3, np.nan, 4],
},
index=date_range("2020-01-01", "2020-01-07"),
)
# TODO(2.0) Remove after pad/backfill deprecation enforced
transformation_func = maybe_normalize_deprecated_kernels(transformation_func)
if transformation_func == "cumcount":
test_op = lambda x: x.transform("cumcount")
mock_op = lambda x: Series(range(len(x)), x.index)
elif transformation_func == "fillna":
test_op = lambda x: x.transform("fillna", value=0)
mock_op = lambda x: x.fillna(value=0)
elif transformation_func == "tshift":
msg = (
"Current behavior of groupby.tshift is inconsistent with other "
"transformations. See GH34452 for more details"
)
request.node.add_marker(pytest.mark.xfail(reason=msg))
else:
test_op = lambda x: x.transform(transformation_func)
mock_op = lambda x: getattr(x, transformation_func)()
result = test_op(df.groupby("A"))
groups = [df[["B"]].iloc[:4], df[["B"]].iloc[4:6], df[["B"]].iloc[6:]]
expected = concat([mock_op(g) for g in groups])
if transformation_func == "cumcount":
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def test_transform_select_columns(df):
f = lambda x: x.mean()
result = df.groupby("A")[["C", "D"]].transform(f)
selection = df[["C", "D"]]
expected = selection.groupby(df["A"]).transform(f)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("duplicates", [True, False])
def test_transform_exclude_nuisance(df, duplicates):
# case that goes through _transform_item_by_item
if duplicates:
# make sure we work with duplicate columns GH#41427
df.columns = ["A", "C", "C", "D"]
# this also tests orderings in transform between
# series/frame to make sure it's consistent
expected = {}
grouped = df.groupby("A")
gbc = grouped["C"]
warn = FutureWarning if duplicates else None
with tm.assert_produces_warning(warn, match="Dropping invalid columns"):
expected["C"] = gbc.transform(np.mean)
if duplicates:
# squeeze 1-column DataFrame down to Series
expected["C"] = expected["C"]["C"]
assert isinstance(gbc.obj, DataFrame)
assert isinstance(gbc, DataFrameGroupBy)
else:
assert isinstance(gbc, SeriesGroupBy)
assert isinstance(gbc.obj, Series)
expected["D"] = grouped["D"].transform(np.mean)
expected = DataFrame(expected)
with tm.assert_produces_warning(FutureWarning, match="Dropping invalid columns"):
result = df.groupby("A").transform(np.mean)
tm.assert_frame_equal(result, expected)
def test_transform_function_aliases(df):
with tm.assert_produces_warning(FutureWarning, match="Dropping invalid columns"):
result = df.groupby("A").transform("mean")
expected = df.groupby("A").transform(np.mean)
tm.assert_frame_equal(result, expected)
result = df.groupby("A")["C"].transform("mean")
expected = df.groupby("A")["C"].transform(np.mean)
tm.assert_series_equal(result, expected)
def test_series_fast_transform_date():
# GH 13191
df = DataFrame(
{"grouping": [np.nan, 1, 1, 3], "d": date_range("2014-1-1", "2014-1-4")}
)
result = df.groupby("grouping")["d"].transform("first")
dates = [
pd.NaT,
Timestamp("2014-1-2"),
Timestamp("2014-1-2"),
Timestamp("2014-1-4"),
]
expected = Series(dates, name="d")
tm.assert_series_equal(result, expected)
def test_transform_length():
# GH 9697
df = DataFrame({"col1": [1, 1, 2, 2], "col2": [1, 2, 3, np.nan]})
expected = Series([3.0] * 4)
def nsum(x):
return np.nansum(x)
results = [
df.groupby("col1").transform(sum)["col2"],
df.groupby("col1")["col2"].transform(sum),
df.groupby("col1").transform(nsum)["col2"],
df.groupby("col1")["col2"].transform(nsum),
]
for result in results:
tm.assert_series_equal(result, expected, check_names=False)
def test_transform_coercion():
# 14457
# when we are transforming be sure to not coerce
# via assignment
df = DataFrame({"A": ["a", "a"], "B": [0, 1]})
g = df.groupby("A")
expected = g.transform(np.mean)
msg = "will return a scalar mean"
with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False):
result = g.transform(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(None):
result2 = g.transform(lambda x: np.mean(x, axis=0))
tm.assert_frame_equal(result2, expected)
def test_groupby_transform_with_int():
# GH 3740, make sure that we might upcast on item-by-item transform
# floats
df = DataFrame(
{
"A": [1, 1, 1, 2, 2, 2],
"B": Series(1, dtype="float64"),
"C": Series([1, 2, 3, 1, 2, 3], dtype="float64"),
"D": "foo",
}
)
with np.errstate(all="ignore"):
with tm.assert_produces_warning(
FutureWarning, match="Dropping invalid columns"
):
result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std())
expected = DataFrame(
{"B": np.nan, "C": Series([-1, 0, 1, -1, 0, 1], dtype="float64")}
)
tm.assert_frame_equal(result, expected)
# int case
df = DataFrame(
{
"A": [1, 1, 1, 2, 2, 2],
"B": 1,
"C": [1, 2, 3, 1, 2, 3],
"D": "foo",
}
)
with np.errstate(all="ignore"):
with tm.assert_produces_warning(
FutureWarning, match="Dropping invalid columns"
):
result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std())
expected = DataFrame({"B": np.nan, "C": [-1.0, 0.0, 1.0, -1.0, 0.0, 1.0]})
tm.assert_frame_equal(result, expected)
# int that needs float conversion
s = Series([2, 3, 4, 10, 5, -1])
df = DataFrame({"A": [1, 1, 1, 2, 2, 2], "B": 1, "C": s, "D": "foo"})
with np.errstate(all="ignore"):
with tm.assert_produces_warning(
FutureWarning, match="Dropping invalid columns"
):
result = df.groupby("A").transform(lambda x: (x - x.mean()) / x.std())
s1 = s.iloc[0:3]
s1 = (s1 - s1.mean()) / s1.std()
s2 = s.iloc[3:6]
s2 = (s2 - s2.mean()) / s2.std()
expected = DataFrame({"B": np.nan, "C": concat([s1, s2])})
tm.assert_frame_equal(result, expected)
# int doesn't get downcasted
with tm.assert_produces_warning(FutureWarning, match="Dropping invalid columns"):
result = df.groupby("A").transform(lambda x: x * 2 / 2)
expected = DataFrame({"B": 1.0, "C": [2.0, 3.0, 4.0, 10.0, 5.0, -1.0]})
tm.assert_frame_equal(result, expected)
def test_groupby_transform_with_nan_group():
# GH 9941
df = DataFrame({"a": range(10), "b": [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})
result = df.groupby(df.b)["a"].transform(max)
expected = Series([1.0, 1.0, 2.0, 3.0, np.nan, 6.0, 6.0, 9.0, 9.0, 9.0], name="a")
tm.assert_series_equal(result, expected)
def test_transform_mixed_type():
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]])
df = DataFrame(
{
"d": [1.0, 1.0, 1.0, 2.0, 2.0, 2.0],
"c": np.tile(["a", "b", "c"], 2),
"v": np.arange(1.0, 7.0),
},
index=index,
)
def f(group):
group["g"] = group["d"] * 2
return group[:1]
grouped = df.groupby("c")
result = grouped.apply(f)
assert result["d"].dtype == np.float64
# this is by definition a mutating operation!
with pd.option_context("mode.chained_assignment", None):
for key, group in grouped:
res = f(group)
tm.assert_frame_equal(res, result.loc[key])
@pytest.mark.parametrize(
"op, args, targop",
[
("cumprod", (), lambda x: x.cumprod()),
("cumsum", (), lambda x: x.cumsum()),
("shift", (-1,), lambda x: x.shift(-1)),
("shift", (1,), lambda x: x.shift()),
],
)
def test_cython_transform_series(op, args, targop):
# GH 4095
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
# series
for data in [s, s_missing]:
# print(data.head())
expected = data.groupby(labels).transform(targop)
tm.assert_series_equal(expected, data.groupby(labels).transform(op, *args))
tm.assert_series_equal(expected, getattr(data.groupby(labels), op)(*args))
@pytest.mark.parametrize("op", ["cumprod", "cumsum"])
@pytest.mark.parametrize("skipna", [False, True])
@pytest.mark.parametrize(
"input, exp",
[
# When everything is NaN
({"key": ["b"] * 10, "value": np.nan}, Series([np.nan] * 10, name="value")),
# When there is a single NaN
(
{"key": ["b"] * 10 + ["a"] * 2, "value": [3] * 3 + [np.nan] + [3] * 8},
{
("cumprod", False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0],
("cumprod", True): [
3.0,
9.0,
27.0,
np.nan,
81.0,
243.0,
729.0,
2187.0,
6561.0,
19683.0,
3.0,
9.0,
],
("cumsum", False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0],
("cumsum", True): [
3.0,
6.0,
9.0,
np.nan,
12.0,
15.0,
18.0,
21.0,
24.0,
27.0,
3.0,
6.0,
],
},
),
],
)
def test_groupby_cum_skipna(op, skipna, input, exp):
df = DataFrame(input)
result = df.groupby("key")["value"].transform(op, skipna=skipna)
if isinstance(exp, dict):
expected = exp[(op, skipna)]
else:
expected = exp
expected = Series(expected, name="value")
tm.assert_series_equal(expected, result)
@pytest.mark.slow
@pytest.mark.parametrize(
"op, args, targop",
[
("cumprod", (), lambda x: x.cumprod()),
("cumsum", (), lambda x: x.cumsum()),
("shift", (-1,), lambda x: x.shift(-1)),
("shift", (1,), lambda x: x.shift()),
],
)
def test_cython_transform_frame(op, args, targop):
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
strings = list("qwertyuiopasdfghjklz")
strings_missing = strings[:]
strings_missing[5] = np.nan
df = DataFrame(
{
"float": s,
"float_missing": s_missing,
"int": [1, 1, 1, 1, 2] * 200,
"datetime": date_range("1990-1-1", periods=1000),
"timedelta": pd.timedelta_range(1, freq="s", periods=1000),
"string": strings * 50,
"string_missing": strings_missing * 50,
},
columns=[
"float",
"float_missing",
"int",
"datetime",
"timedelta",
"string",
"string_missing",
],
)
df["cat"] = df["string"].astype("category")
df2 = df.copy()
df2.index = MultiIndex.from_product([range(100), range(10)])
# DataFrame - Single and MultiIndex,
# group by values, index level, columns
for df in [df, df2]:
for gb_target in [
{"by": labels},
{"level": 0},
{"by": "string"},
]: # {"by": 'string_missing'}]:
# {"by": ['int','string']}]:
gb = df.groupby(**gb_target)
# allowlisted methods set the selection before applying
# bit a of hack to make sure the cythonized shift
# is equivalent to pre 0.17.1 behavior
if op == "shift":
gb._set_group_selection()
if op != "shift" and "int" not in gb_target:
# numeric apply fastpath promotes dtype so have
# to apply separately and concat
i = gb[["int"]].apply(targop)
f = gb[["float", "float_missing"]].apply(targop)
expected = concat([f, i], axis=1)
else:
expected = gb.apply(targop)
expected = expected.sort_index(axis=1)
tm.assert_frame_equal(expected, gb.transform(op, *args).sort_index(axis=1))
tm.assert_frame_equal(expected, getattr(gb, op)(*args).sort_index(axis=1))
# individual columns
for c in df:
if (
c not in ["float", "int", "float_missing"]
and op != "shift"
and not (c == "timedelta" and op == "cumsum")
):
msg = "|".join(
[
"does not support .* operations",
".* is not supported for object dtype",
"is not implemented for this dtype",
]
)
with pytest.raises(TypeError, match=msg):
gb[c].transform(op)
with pytest.raises(TypeError, match=msg):
getattr(gb[c], op)()
else:
expected = gb[c].apply(targop)
expected.name = c
tm.assert_series_equal(expected, gb[c].transform(op, *args))
tm.assert_series_equal(expected, getattr(gb[c], op)(*args))
def test_transform_with_non_scalar_group():
# GH 10165
cols = MultiIndex.from_tuples(
[
("syn", "A"),
("mis", "A"),
("non", "A"),
("syn", "C"),
("mis", "C"),
("non", "C"),
("syn", "T"),
("mis", "T"),
("non", "T"),
("syn", "G"),
("mis", "G"),
("non", "G"),
]
)
df = DataFrame(
np.random.randint(1, 10, (4, 12)), columns=cols, index=["A", "C", "G", "T"]
)
msg = "transform must return a scalar value for each group.*"
with pytest.raises(ValueError, match=msg):
df.groupby(axis=1, level=1).transform(lambda z: z.div(z.sum(axis=1), axis=0))
@pytest.mark.parametrize(
"cols,expected",
[
("a", Series([1, 1, 1], name="a")),
(
["a", "c"],
DataFrame({"a": [1, 1, 1], "c": [1, 1, 1]}),
),
],
)
@pytest.mark.parametrize("agg_func", ["count", "rank", "size"])
def test_transform_numeric_ret(cols, expected, agg_func):
# GH#19200 and GH#27469
df = DataFrame(
{"a": date_range("2018-01-01", periods=3), "b": range(3), "c": range(7, 10)}
)
result = df.groupby("b")[cols].transform(agg_func)
if agg_func == "rank":
expected = expected.astype("float")
elif agg_func == "size" and cols == ["a", "c"]:
# transform("size") returns a Series
expected = expected["a"].rename(None)
tm.assert_equal(result, expected)
def test_transform_ffill():
# GH 24211
data = [["a", 0.0], ["a", float("nan")], ["b", 1.0], ["b", float("nan")]]
df = DataFrame(data, columns=["key", "values"])
result = df.groupby("key").transform("ffill")
expected = DataFrame({"values": [0.0, 0.0, 1.0, 1.0]})
tm.assert_frame_equal(result, expected)
result = df.groupby("key")["values"].transform("ffill")
expected = Series([0.0, 0.0, 1.0, 1.0], name="values")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("mix_groupings", [True, False])
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize("val1,val2", [("foo", "bar"), (1, 2), (1.0, 2.0)])
@pytest.mark.parametrize(
"fill_method,limit,exp_vals",
[
(
"ffill",
None,
[np.nan, np.nan, "val1", "val1", "val1", "val2", "val2", "val2"],
),
("ffill", 1, [np.nan, np.nan, "val1", "val1", np.nan, "val2", "val2", np.nan]),
(
"bfill",
None,
["val1", "val1", "val1", "val2", "val2", "val2", np.nan, np.nan],
),
("bfill", 1, [np.nan, "val1", "val1", np.nan, "val2", "val2", np.nan, np.nan]),
],
)
def test_group_fill_methods(
mix_groupings, as_series, val1, val2, fill_method, limit, exp_vals
):
vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan]
_exp_vals = list(exp_vals)
# Overwrite placeholder values
for index, exp_val in enumerate(_exp_vals):
if exp_val == "val1":
_exp_vals[index] = val1
elif exp_val == "val2":
_exp_vals[index] = val2
# Need to modify values and expectations depending on the
# Series / DataFrame that we ultimately want to generate
if mix_groupings: # ['a', 'b', 'a, 'b', ...]
keys = ["a", "b"] * len(vals)
def interweave(list_obj):
temp = []
for x in list_obj:
temp.extend([x, x])
return temp
_exp_vals = interweave(_exp_vals)
vals = interweave(vals)
else: # ['a', 'a', 'a', ... 'b', 'b', 'b']
keys = ["a"] * len(vals) + ["b"] * len(vals)
_exp_vals = _exp_vals * 2
vals = vals * 2
df = DataFrame({"key": keys, "val": vals})
if as_series:
result = getattr(df.groupby("key")["val"], fill_method)(limit=limit)
exp = Series(_exp_vals, name="val")
tm.assert_series_equal(result, exp)
else:
result = getattr(df.groupby("key"), fill_method)(limit=limit)
exp = DataFrame({"val": _exp_vals})
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("fill_method", ["ffill", "bfill"])
def test_pad_stable_sorting(fill_method):
# GH 21207
x = [0] * 20
y = [np.nan] * 10 + [1] * 10
if fill_method == "bfill":
y = y[::-1]
df = DataFrame({"x": x, "y": y})
expected = df.drop("x", axis=1)
result = getattr(df.groupby("x"), fill_method)()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("test_series", [True, False])
@pytest.mark.parametrize(
"freq",
[
None,
pytest.param(
"D",
marks=pytest.mark.xfail(
reason="GH#23918 before method uses freq in vectorized approach"
),
),
],
)
@pytest.mark.parametrize("periods", [1, -1])
@pytest.mark.parametrize("fill_method", ["ffill", "bfill", None])
@pytest.mark.parametrize("limit", [None, 1])
def test_pct_change(test_series, freq, periods, fill_method, limit):
# GH 21200, 21621, 30463
vals = [3, np.nan, np.nan, np.nan, 1, 2, 4, 10, np.nan, 4]
keys = ["a", "b"]
key_v = np.repeat(keys, len(vals))
df = DataFrame({"key": key_v, "vals": vals * 2})
df_g = df
if fill_method is not None:
df_g = getattr(df.groupby("key"), fill_method)(limit=limit)
grp = df_g.groupby(df.key)
expected = grp["vals"].obj / grp["vals"].shift(periods) - 1
if test_series:
result = df.groupby("key")["vals"].pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq
)
tm.assert_series_equal(result, expected)
else:
result = df.groupby("key").pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq
)
tm.assert_frame_equal(result, expected.to_frame("vals"))
@pytest.mark.parametrize(
"func, expected_status",
[
("ffill", ["shrt", "shrt", "lng", np.nan, "shrt", "ntrl", "ntrl"]),
("bfill", ["shrt", "lng", "lng", "shrt", "shrt", "ntrl", np.nan]),
],
)
def test_ffill_bfill_non_unique_multilevel(func, expected_status):
# GH 19437
date = pd.to_datetime(
[
"2018-01-01",
"2018-01-01",
"2018-01-01",
"2018-01-01",
"2018-01-02",
"2018-01-01",
"2018-01-02",
]
)
symbol = ["MSFT", "MSFT", "MSFT", "AAPL", "AAPL", "TSLA", "TSLA"]
status = ["shrt", np.nan, "lng", np.nan, "shrt", "ntrl", np.nan]
df = DataFrame({"date": date, "symbol": symbol, "status": status})
df = df.set_index(["date", "symbol"])
result = getattr(df.groupby("symbol")["status"], func)()
index = MultiIndex.from_tuples(
tuples=list(zip(*[date, symbol])), names=["date", "symbol"]
)
expected = Series(expected_status, index=index, name="status")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", [np.any, np.all])
def test_any_all_np_func(func):
# GH 20653
df = DataFrame(
[["foo", True], [np.nan, True], ["foo", True]], columns=["key", "val"]
)
exp = Series([True, np.nan, True], name="val")
res = df.groupby("key")["val"].transform(func)
tm.assert_series_equal(res, exp)
def test_groupby_transform_rename():
# https://github.com/pandas-dev/pandas/issues/23461
def demean_rename(x):
result = x - x.mean()
if isinstance(x, Series):
return result
result = result.rename(columns={c: "{c}_demeaned" for c in result.columns})
return result
df = DataFrame({"group": list("ababa"), "value": [1, 1, 1, 2, 2]})
expected = DataFrame({"value": [-1.0 / 3, -0.5, -1.0 / 3, 0.5, 2.0 / 3]})
result = df.groupby("group").transform(demean_rename)
tm.assert_frame_equal(result, expected)
result_single = df.groupby("group").value.transform(demean_rename)
tm.assert_series_equal(result_single, expected["value"])
@pytest.mark.parametrize("func", [min, max, np.min, np.max, "first", "last"])
def test_groupby_transform_timezone_column(func):
# GH 24198
ts = pd.to_datetime("now", utc=True).tz_convert("Asia/Singapore")
result = DataFrame({"end_time": [ts], "id": [1]})
result["max_end_time"] = result.groupby("id").end_time.transform(func)
expected = DataFrame([[ts, 1, ts]], columns=["end_time", "id", "max_end_time"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func, values",
[
("idxmin", ["1/1/2011"] * 2 + ["1/3/2011"] * 7 + ["1/10/2011"]),
("idxmax", ["1/2/2011"] * 2 + ["1/9/2011"] * 7 + ["1/10/2011"]),
],
)
def test_groupby_transform_with_datetimes(func, values):
# GH 15306
dates = date_range("1/1/2011", periods=10, freq="D")
stocks = DataFrame({"price": np.arange(10.0)}, index=dates)
stocks["week_id"] = dates.isocalendar().week
result = stocks.groupby(stocks["week_id"])["price"].transform(func)
expected = Series(data=pd.to_datetime(values), index=dates, name="price")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["cumsum", "cumprod", "cummin", "cummax"])
def test_transform_absent_categories(func):
# GH 16771
# cython transforms with more groups than rows
x_vals = [1]
x_cats = range(2)
y = [1]
df = DataFrame({"x": Categorical(x_vals, x_cats), "y": y})
result = getattr(df.y.groupby(df.x), func)()
expected = df.y
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["ffill", "bfill", "shift"])
@pytest.mark.parametrize("key, val", [("level", 0), ("by", Series([0]))])
def test_ffill_not_in_axis(func, key, val):
# GH 21521
df = DataFrame([[np.nan]])
result = getattr(df.groupby(**{key: val}), func)()
expected = df
tm.assert_frame_equal(result, expected)
def test_transform_invalid_name_raises():
# GH#27486
df = DataFrame({"a": [0, 1, 1, 2]})
g = df.groupby(["a", "b", "b", "c"])
with pytest.raises(ValueError, match="not a valid function name"):
g.transform("some_arbitrary_name")
# method exists on the object, but is not a valid transformation/agg
assert hasattr(g, "aggregate") # make sure the method exists
with pytest.raises(ValueError, match="not a valid function name"):
g.transform("aggregate")
# Test SeriesGroupBy
g = df["a"].groupby(["a", "b", "b", "c"])
with pytest.raises(ValueError, match="not a valid function name"):
g.transform("some_arbitrary_name")
@pytest.mark.parametrize(
"obj",
[
DataFrame(
{"a": [0, 0, 0, 1, 1, 1], "b": range(6)},
index=["A", "B", "C", "D", "E", "F"],
),
Series([0, 0, 0, 1, 1, 1], index=["A", "B", "C", "D", "E", "F"]),
],
)
def test_transform_agg_by_name(request, reduction_func, obj):
func = reduction_func
g = obj.groupby(np.repeat([0, 1], 3))
if func == "ngroup": # GH#27468
request.node.add_marker(
pytest.mark.xfail(reason="TODO: g.transform('ngroup') doesn't work")
)
if func == "corrwith" and isinstance(obj, Series): # GH#32293
request.node.add_marker(
pytest.mark.xfail(reason="TODO: implement SeriesGroupBy.corrwith")
)
args = {"nth": [0], "quantile": [0.5], "corrwith": [obj]}.get(func, [])
result = g.transform(func, *args)
# this is the *definition* of a transformation
tm.assert_index_equal(result.index, obj.index)
if func != "size" and obj.ndim == 2:
# size returns a Series, unlike other transforms
tm.assert_index_equal(result.columns, obj.columns)
# verify that values were broadcasted across each group
assert len(set(DataFrame(result).iloc[-3:, -1])) == 1
def test_transform_lambda_with_datetimetz():
# GH 27496
df = DataFrame(
{
"time": [
Timestamp("2010-07-15 03:14:45"),
Timestamp("2010-11-19 18:47:06"),
],
"timezone": ["Etc/GMT+4", "US/Eastern"],
}
)
result = df.groupby(["timezone"])["time"].transform(
lambda x: x.dt.tz_localize(x.name)
)
expected = Series(
[
Timestamp("2010-07-15 03:14:45", tz="Etc/GMT+4"),
Timestamp("2010-11-19 18:47:06", tz="US/Eastern"),
],
name="time",
)
tm.assert_series_equal(result, expected)
def test_transform_fastpath_raises():
# GH#29631 case where fastpath defined in groupby.generic _choose_path
# raises, but slow_path does not
df = DataFrame({"A": [1, 1, 2, 2], "B": [1, -1, 1, 2]})
gb = df.groupby("A")
def func(grp):
# we want a function such that func(frame) fails but func.apply(frame)
# works
if grp.ndim == 2:
# Ensure that fast_path fails
raise NotImplementedError("Don't cross the streams")
return grp * 2
# Check that the fastpath raises, see _transform_general
obj = gb._obj_with_exclusions
gen = gb.grouper.get_iterator(obj, axis=gb.axis)
fast_path, slow_path = gb._define_paths(func)
_, group = next(gen)
with pytest.raises(NotImplementedError, match="Don't cross the streams"):
fast_path(group)
result = gb.transform(func)
expected = DataFrame([2, -2, 2, 4], columns=["B"])
tm.assert_frame_equal(result, expected)
def test_transform_lambda_indexing():
# GH 7883
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "flux", "foo", "flux"],
"B": ["one", "one", "two", "three", "two", "six", "five", "three"],
"C": range(8),
"D": range(8),
"E": range(8),
}
)
df = df.set_index(["A", "B"])
df = df.sort_index()
result = df.groupby(level="A").transform(lambda x: x.iloc[-1])
expected = DataFrame(
{
"C": [3, 3, 7, 7, 4, 4, 4, 4],
"D": [3, 3, 7, 7, 4, 4, 4, 4],
"E": [3, 3, 7, 7, 4, 4, 4, 4],
},
index=MultiIndex.from_tuples(
[
("bar", "one"),
("bar", "three"),
("flux", "six"),
("flux", "three"),
("foo", "five"),
("foo", "one"),
("foo", "two"),
("foo", "two"),
],
names=["A", "B"],
),
)
tm.assert_frame_equal(result, expected)
def test_categorical_and_not_categorical_key(observed):
# Checks that groupby-transform, when grouping by both a categorical
# and a non-categorical key, doesn't try to expand the output to include
# non-observed categories but instead matches the input shape.
# GH 32494
df_with_categorical = DataFrame(
{
"A": Categorical(["a", "b", "a"], categories=["a", "b", "c"]),
"B": [1, 2, 3],
"C": ["a", "b", "a"],
}
)
df_without_categorical = DataFrame(
{"A": ["a", "b", "a"], "B": [1, 2, 3], "C": ["a", "b", "a"]}
)
# DataFrame case
result = df_with_categorical.groupby(["A", "C"], observed=observed).transform("sum")
expected = df_without_categorical.groupby(["A", "C"]).transform("sum")
tm.assert_frame_equal(result, expected)
expected_explicit = DataFrame({"B": [4, 2, 4]})
tm.assert_frame_equal(result, expected_explicit)
# Series case
result = df_with_categorical.groupby(["A", "C"], observed=observed)["B"].transform(
"sum"
)
expected = df_without_categorical.groupby(["A", "C"])["B"].transform("sum")
tm.assert_series_equal(result, expected)
expected_explicit = Series([4, 2, 4], name="B")
tm.assert_series_equal(result, expected_explicit)
def test_string_rank_grouping():
# GH 19354
df = DataFrame({"A": [1, 1, 2], "B": [1, 2, 3]})
result = df.groupby("A").transform("rank")
expected = DataFrame({"B": [1.0, 2.0, 1.0]})
tm.assert_frame_equal(result, expected)
def test_transform_cumcount():
# GH 27472
df = DataFrame({"a": [0, 0, 0, 1, 1, 1], "b": range(6)})
grp = df.groupby(np.repeat([0, 1], 3))
result = grp.cumcount()
expected = Series([0, 1, 2, 0, 1, 2])
tm.assert_series_equal(result, expected)
result = grp.transform("cumcount")
tm.assert_series_equal(result, expected)
def test_null_group_lambda_self():
# GH 17093
df = DataFrame({"A": [1, np.nan], "B": [1, 1]})
result = df.groupby("A").transform(lambda x: x)
expected = DataFrame([1], columns=["B"])
tm.assert_frame_equal(result, expected)
| 32.334109 | 88 | 0.566493 |
b785d40a114d084516e15a18afeaf89b950c9d50 | 516 | py | Python | repeater.py | Mem2019/Zulip-CTF-Bot | 1e5f8bba97a9370f2a1fc12abf78e60e99bc9f18 | [
"MIT"
] | null | null | null | repeater.py | Mem2019/Zulip-CTF-Bot | 1e5f8bba97a9370f2a1fc12abf78e60e99bc9f18 | [
"MIT"
] | null | null | null | repeater.py | Mem2019/Zulip-CTF-Bot | 1e5f8bba97a9370f2a1fc12abf78e60e99bc9f18 | [
"MIT"
] | null | null | null | # 人类的本质是复读机,所以此repeater.py乃是最强人工智能
class Repeater:
def __init__(self, send_message, threshold):
self.records = {}
self.send_message = send_message
assert threshold > 1
self.threshold = threshold
def update(self, stream, subject, content):
r = self.records.get((stream, subject))
if r and r[0] == content:
self.records[(stream, subject)] = (content, r[1] + 1)
if r[1] + 1 == self.threshold:
self.send_message(stream, subject, content)
else:
self.records[(stream, subject)] = (content, 1)
| 32.25 | 56 | 0.689922 |
c2c22e6295ed3238a95321ff44e0194179423f6f | 32,020 | py | Python | sympy/utilities/tests/test_iterables.py | utkarshdeorah/sympy | dcdf59bbc6b13ddbc329431adf72fcee294b6389 | [
"BSD-3-Clause"
] | 1 | 2020-09-09T20:40:17.000Z | 2020-09-09T20:40:17.000Z | sympy/utilities/tests/test_iterables.py | utkarshdeorah/sympy | dcdf59bbc6b13ddbc329431adf72fcee294b6389 | [
"BSD-3-Clause"
] | 14 | 2018-02-08T10:11:03.000Z | 2019-04-16T10:32:46.000Z | sympy/utilities/tests/test_iterables.py | utkarshdeorah/sympy | dcdf59bbc6b13ddbc329431adf72fcee294b6389 | [
"BSD-3-Clause"
] | 1 | 2022-02-04T13:50:29.000Z | 2022-02-04T13:50:29.000Z | from textwrap import dedent
from itertools import islice, product
from sympy.core.basic import Basic
from sympy.core.numbers import Integer
from sympy.core.sorting import ordered
from sympy.core.symbol import (Dummy, symbols)
from sympy.functions.combinatorial.factorials import factorial
from sympy.matrices.dense import Matrix
from sympy.combinatorics import RGS_enum, RGS_unrank, Permutation
from sympy.utilities.iterables import (
_partition, _set_partitions, binary_partitions, bracelets, capture,
cartes, common_prefix, common_suffix, connected_components, dict_merge,
filter_symbols, flatten, generate_bell, generate_derangements,
generate_involutions, generate_oriented_forest, group, has_dups, ibin,
iproduct, kbins, minlex, multiset, multiset_combinations,
multiset_partitions, multiset_permutations, necklaces, numbered_symbols,
partitions, permutations, postfixes,
prefixes, reshape, rotate_left, rotate_right, runs, sift,
strongly_connected_components, subsets, take, topological_sort, unflatten,
uniq, variations, ordered_partitions, rotations, is_palindromic, iterable,
NotIterable, multiset_derangements)
from sympy.utilities.enumerative import (
factoring_visitor, multiset_partitions_taocp )
from sympy.core.singleton import S
from sympy.testing.pytest import raises
w, x, y, z = symbols('w,x,y,z')
def test_is_palindromic():
assert is_palindromic('')
assert is_palindromic('x')
assert is_palindromic('xx')
assert is_palindromic('xyx')
assert not is_palindromic('xy')
assert not is_palindromic('xyzx')
assert is_palindromic('xxyzzyx', 1)
assert not is_palindromic('xxyzzyx', 2)
assert is_palindromic('xxyzzyx', 2, -1)
assert is_palindromic('xxyzzyx', 2, 6)
assert is_palindromic('xxyzyx', 1)
assert not is_palindromic('xxyzyx', 2)
assert is_palindromic('xxyzyx', 2, 2 + 3)
def test_flatten():
assert flatten((1, (1,))) == [1, 1]
assert flatten((x, (x,))) == [x, x]
ls = [[(-2, -1), (1, 2)], [(0, 0)]]
assert flatten(ls, levels=0) == ls
assert flatten(ls, levels=1) == [(-2, -1), (1, 2), (0, 0)]
assert flatten(ls, levels=2) == [-2, -1, 1, 2, 0, 0]
assert flatten(ls, levels=3) == [-2, -1, 1, 2, 0, 0]
raises(ValueError, lambda: flatten(ls, levels=-1))
class MyOp(Basic):
pass
assert flatten([MyOp(x, y), z]) == [MyOp(x, y), z]
assert flatten([MyOp(x, y), z], cls=MyOp) == [x, y, z]
assert flatten({1, 11, 2}) == list({1, 11, 2})
def test_iproduct():
assert list(iproduct()) == [()]
assert list(iproduct([])) == []
assert list(iproduct([1,2,3])) == [(1,),(2,),(3,)]
assert sorted(iproduct([1, 2], [3, 4, 5])) == [
(1,3),(1,4),(1,5),(2,3),(2,4),(2,5)]
assert sorted(iproduct([0,1],[0,1],[0,1])) == [
(0,0,0),(0,0,1),(0,1,0),(0,1,1),(1,0,0),(1,0,1),(1,1,0),(1,1,1)]
assert iterable(iproduct(S.Integers)) is True
assert iterable(iproduct(S.Integers, S.Integers)) is True
assert (3,) in iproduct(S.Integers)
assert (4, 5) in iproduct(S.Integers, S.Integers)
assert (1, 2, 3) in iproduct(S.Integers, S.Integers, S.Integers)
triples = set(islice(iproduct(S.Integers, S.Integers, S.Integers), 1000))
for n1, n2, n3 in triples:
assert isinstance(n1, Integer)
assert isinstance(n2, Integer)
assert isinstance(n3, Integer)
for t in set(product(*([range(-2, 3)]*3))):
assert t in iproduct(S.Integers, S.Integers, S.Integers)
def test_group():
assert group([]) == []
assert group([], multiple=False) == []
assert group([1]) == [[1]]
assert group([1], multiple=False) == [(1, 1)]
assert group([1, 1]) == [[1, 1]]
assert group([1, 1], multiple=False) == [(1, 2)]
assert group([1, 1, 1]) == [[1, 1, 1]]
assert group([1, 1, 1], multiple=False) == [(1, 3)]
assert group([1, 2, 1]) == [[1], [2], [1]]
assert group([1, 2, 1], multiple=False) == [(1, 1), (2, 1), (1, 1)]
assert group([1, 1, 2, 2, 2, 1, 3, 3]) == [[1, 1], [2, 2, 2], [1], [3, 3]]
assert group([1, 1, 2, 2, 2, 1, 3, 3], multiple=False) == [(1, 2),
(2, 3), (1, 1), (3, 2)]
def test_subsets():
# combinations
assert list(subsets([1, 2, 3], 0)) == [()]
assert list(subsets([1, 2, 3], 1)) == [(1,), (2,), (3,)]
assert list(subsets([1, 2, 3], 2)) == [(1, 2), (1, 3), (2, 3)]
assert list(subsets([1, 2, 3], 3)) == [(1, 2, 3)]
l = list(range(4))
assert list(subsets(l, 0, repetition=True)) == [()]
assert list(subsets(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)]
assert list(subsets(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2),
(0, 3), (1, 1), (1, 2),
(1, 3), (2, 2), (2, 3),
(3, 3)]
assert list(subsets(l, 3, repetition=True)) == [(0, 0, 0), (0, 0, 1),
(0, 0, 2), (0, 0, 3),
(0, 1, 1), (0, 1, 2),
(0, 1, 3), (0, 2, 2),
(0, 2, 3), (0, 3, 3),
(1, 1, 1), (1, 1, 2),
(1, 1, 3), (1, 2, 2),
(1, 2, 3), (1, 3, 3),
(2, 2, 2), (2, 2, 3),
(2, 3, 3), (3, 3, 3)]
assert len(list(subsets(l, 4, repetition=True))) == 35
assert list(subsets(l[:2], 3, repetition=False)) == []
assert list(subsets(l[:2], 3, repetition=True)) == [(0, 0, 0),
(0, 0, 1),
(0, 1, 1),
(1, 1, 1)]
assert list(subsets([1, 2], repetition=True)) == \
[(), (1,), (2,), (1, 1), (1, 2), (2, 2)]
assert list(subsets([1, 2], repetition=False)) == \
[(), (1,), (2,), (1, 2)]
assert list(subsets([1, 2, 3], 2)) == \
[(1, 2), (1, 3), (2, 3)]
assert list(subsets([1, 2, 3], 2, repetition=True)) == \
[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
def test_variations():
# permutations
l = list(range(4))
assert list(variations(l, 0, repetition=False)) == [()]
assert list(variations(l, 1, repetition=False)) == [(0,), (1,), (2,), (3,)]
assert list(variations(l, 2, repetition=False)) == [(0, 1), (0, 2), (0, 3), (1, 0), (1, 2), (1, 3), (2, 0), (2, 1), (2, 3), (3, 0), (3, 1), (3, 2)]
assert list(variations(l, 3, repetition=False)) == [(0, 1, 2), (0, 1, 3), (0, 2, 1), (0, 2, 3), (0, 3, 1), (0, 3, 2), (1, 0, 2), (1, 0, 3), (1, 2, 0), (1, 2, 3), (1, 3, 0), (1, 3, 2), (2, 0, 1), (2, 0, 3), (2, 1, 0), (2, 1, 3), (2, 3, 0), (2, 3, 1), (3, 0, 1), (3, 0, 2), (3, 1, 0), (3, 1, 2), (3, 2, 0), (3, 2, 1)]
assert list(variations(l, 0, repetition=True)) == [()]
assert list(variations(l, 1, repetition=True)) == [(0,), (1,), (2,), (3,)]
assert list(variations(l, 2, repetition=True)) == [(0, 0), (0, 1), (0, 2),
(0, 3), (1, 0), (1, 1),
(1, 2), (1, 3), (2, 0),
(2, 1), (2, 2), (2, 3),
(3, 0), (3, 1), (3, 2),
(3, 3)]
assert len(list(variations(l, 3, repetition=True))) == 64
assert len(list(variations(l, 4, repetition=True))) == 256
assert list(variations(l[:2], 3, repetition=False)) == []
assert list(variations(l[:2], 3, repetition=True)) == [
(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)
]
def test_cartes():
assert list(cartes([1, 2], [3, 4, 5])) == \
[(1, 3), (1, 4), (1, 5), (2, 3), (2, 4), (2, 5)]
assert list(cartes()) == [()]
assert list(cartes('a')) == [('a',)]
assert list(cartes('a', repeat=2)) == [('a', 'a')]
assert list(cartes(list(range(2)))) == [(0,), (1,)]
def test_filter_symbols():
s = numbered_symbols()
filtered = filter_symbols(s, symbols("x0 x2 x3"))
assert take(filtered, 3) == list(symbols("x1 x4 x5"))
def test_numbered_symbols():
s = numbered_symbols(cls=Dummy)
assert isinstance(next(s), Dummy)
assert next(numbered_symbols('C', start=1, exclude=[symbols('C1')])) == \
symbols('C2')
def test_sift():
assert sift(list(range(5)), lambda _: _ % 2) == {1: [1, 3], 0: [0, 2, 4]}
assert sift([x, y], lambda _: _.has(x)) == {False: [y], True: [x]}
assert sift([S.One], lambda _: _.has(x)) == {False: [1]}
assert sift([0, 1, 2, 3], lambda x: x % 2, binary=True) == (
[1, 3], [0, 2])
assert sift([0, 1, 2, 3], lambda x: x % 3 == 1, binary=True) == (
[1], [0, 2, 3])
raises(ValueError, lambda:
sift([0, 1, 2, 3], lambda x: x % 3, binary=True))
def test_take():
X = numbered_symbols()
assert take(X, 5) == list(symbols('x0:5'))
assert take(X, 5) == list(symbols('x5:10'))
assert take([1, 2, 3, 4, 5], 5) == [1, 2, 3, 4, 5]
def test_dict_merge():
assert dict_merge({}, {1: x, y: z}) == {1: x, y: z}
assert dict_merge({1: x, y: z}, {}) == {1: x, y: z}
assert dict_merge({2: z}, {1: x, y: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: x, y: z}, {2: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: y, 2: z}, {1: x, y: z}) == {1: x, 2: z, y: z}
assert dict_merge({1: x, y: z}, {1: y, 2: z}) == {1: y, 2: z, y: z}
def test_prefixes():
assert list(prefixes([])) == []
assert list(prefixes([1])) == [[1]]
assert list(prefixes([1, 2])) == [[1], [1, 2]]
assert list(prefixes([1, 2, 3, 4, 5])) == \
[[1], [1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 5]]
def test_postfixes():
assert list(postfixes([])) == []
assert list(postfixes([1])) == [[1]]
assert list(postfixes([1, 2])) == [[2], [1, 2]]
assert list(postfixes([1, 2, 3, 4, 5])) == \
[[5], [4, 5], [3, 4, 5], [2, 3, 4, 5], [1, 2, 3, 4, 5]]
def test_topological_sort():
V = [2, 3, 5, 7, 8, 9, 10, 11]
E = [(7, 11), (7, 8), (5, 11),
(3, 8), (3, 10), (11, 2),
(11, 9), (11, 10), (8, 9)]
assert topological_sort((V, E)) == [3, 5, 7, 8, 11, 2, 9, 10]
assert topological_sort((V, E), key=lambda v: -v) == \
[7, 5, 11, 3, 10, 8, 9, 2]
raises(ValueError, lambda: topological_sort((V, E + [(10, 7)])))
def test_strongly_connected_components():
assert strongly_connected_components(([], [])) == []
assert strongly_connected_components(([1, 2, 3], [])) == [[1], [2], [3]]
V = [1, 2, 3]
E = [(1, 2), (1, 3), (2, 1), (2, 3), (3, 1)]
assert strongly_connected_components((V, E)) == [[1, 2, 3]]
V = [1, 2, 3, 4]
E = [(1, 2), (2, 3), (3, 2), (3, 4)]
assert strongly_connected_components((V, E)) == [[4], [2, 3], [1]]
V = [1, 2, 3, 4]
E = [(1, 2), (2, 1), (3, 4), (4, 3)]
assert strongly_connected_components((V, E)) == [[1, 2], [3, 4]]
def test_connected_components():
assert connected_components(([], [])) == []
assert connected_components(([1, 2, 3], [])) == [[1], [2], [3]]
V = [1, 2, 3]
E = [(1, 2), (1, 3), (2, 1), (2, 3), (3, 1)]
assert connected_components((V, E)) == [[1, 2, 3]]
V = [1, 2, 3, 4]
E = [(1, 2), (2, 3), (3, 2), (3, 4)]
assert connected_components((V, E)) == [[1, 2, 3, 4]]
V = [1, 2, 3, 4]
E = [(1, 2), (3, 4)]
assert connected_components((V, E)) == [[1, 2], [3, 4]]
def test_rotate():
A = [0, 1, 2, 3, 4]
assert rotate_left(A, 2) == [2, 3, 4, 0, 1]
assert rotate_right(A, 1) == [4, 0, 1, 2, 3]
A = []
B = rotate_right(A, 1)
assert B == []
B.append(1)
assert A == []
B = rotate_left(A, 1)
assert B == []
B.append(1)
assert A == []
def test_multiset_partitions():
A = [0, 1, 2, 3, 4]
assert list(multiset_partitions(A, 5)) == [[[0], [1], [2], [3], [4]]]
assert len(list(multiset_partitions(A, 4))) == 10
assert len(list(multiset_partitions(A, 3))) == 25
assert list(multiset_partitions([1, 1, 1, 2, 2], 2)) == [
[[1, 1, 1, 2], [2]], [[1, 1, 1], [2, 2]], [[1, 1, 2, 2], [1]],
[[1, 1, 2], [1, 2]], [[1, 1], [1, 2, 2]]]
assert list(multiset_partitions([1, 1, 2, 2], 2)) == [
[[1, 1, 2], [2]], [[1, 1], [2, 2]], [[1, 2, 2], [1]],
[[1, 2], [1, 2]]]
assert list(multiset_partitions([1, 2, 3, 4], 2)) == [
[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]],
[[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]],
[[1], [2, 3, 4]]]
assert list(multiset_partitions([1, 2, 2], 2)) == [
[[1, 2], [2]], [[1], [2, 2]]]
assert list(multiset_partitions(3)) == [
[[0, 1, 2]], [[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]],
[[0], [1], [2]]]
assert list(multiset_partitions(3, 2)) == [
[[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]]]
assert list(multiset_partitions([1] * 3, 2)) == [[[1], [1, 1]]]
assert list(multiset_partitions([1] * 3)) == [
[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]]
a = [3, 2, 1]
assert list(multiset_partitions(a)) == \
list(multiset_partitions(sorted(a)))
assert list(multiset_partitions(a, 5)) == []
assert list(multiset_partitions(a, 1)) == [[[1, 2, 3]]]
assert list(multiset_partitions(a + [4], 5)) == []
assert list(multiset_partitions(a + [4], 1)) == [[[1, 2, 3, 4]]]
assert list(multiset_partitions(2, 5)) == []
assert list(multiset_partitions(2, 1)) == [[[0, 1]]]
assert list(multiset_partitions('a')) == [[['a']]]
assert list(multiset_partitions('a', 2)) == []
assert list(multiset_partitions('ab')) == [[['a', 'b']], [['a'], ['b']]]
assert list(multiset_partitions('ab', 1)) == [[['a', 'b']]]
assert list(multiset_partitions('aaa', 1)) == [['aaa']]
assert list(multiset_partitions([1, 1], 1)) == [[[1, 1]]]
ans = [('mpsyy',), ('mpsy', 'y'), ('mps', 'yy'), ('mps', 'y', 'y'),
('mpyy', 's'), ('mpy', 'sy'), ('mpy', 's', 'y'), ('mp', 'syy'),
('mp', 'sy', 'y'), ('mp', 's', 'yy'), ('mp', 's', 'y', 'y'),
('msyy', 'p'), ('msy', 'py'), ('msy', 'p', 'y'), ('ms', 'pyy'),
('ms', 'py', 'y'), ('ms', 'p', 'yy'), ('ms', 'p', 'y', 'y'),
('myy', 'ps'), ('myy', 'p', 's'), ('my', 'psy'), ('my', 'ps', 'y'),
('my', 'py', 's'), ('my', 'p', 'sy'), ('my', 'p', 's', 'y'),
('m', 'psyy'), ('m', 'psy', 'y'), ('m', 'ps', 'yy'),
('m', 'ps', 'y', 'y'), ('m', 'pyy', 's'), ('m', 'py', 'sy'),
('m', 'py', 's', 'y'), ('m', 'p', 'syy'),
('m', 'p', 'sy', 'y'), ('m', 'p', 's', 'yy'),
('m', 'p', 's', 'y', 'y')]
assert list(tuple("".join(part) for part in p)
for p in multiset_partitions('sympy')) == ans
factorings = [[24], [8, 3], [12, 2], [4, 6], [4, 2, 3],
[6, 2, 2], [2, 2, 2, 3]]
assert list(factoring_visitor(p, [2,3]) for
p in multiset_partitions_taocp([3, 1])) == factorings
def test_multiset_combinations():
ans = ['iii', 'iim', 'iip', 'iis', 'imp', 'ims', 'ipp', 'ips',
'iss', 'mpp', 'mps', 'mss', 'pps', 'pss', 'sss']
assert [''.join(i) for i in
list(multiset_combinations('mississippi', 3))] == ans
M = multiset('mississippi')
assert [''.join(i) for i in
list(multiset_combinations(M, 3))] == ans
assert [''.join(i) for i in multiset_combinations(M, 30)] == []
assert list(multiset_combinations([[1], [2, 3]], 2)) == [[[1], [2, 3]]]
assert len(list(multiset_combinations('a', 3))) == 0
assert len(list(multiset_combinations('a', 0))) == 1
assert list(multiset_combinations('abc', 1)) == [['a'], ['b'], ['c']]
raises(ValueError, lambda: list(multiset_combinations({0: 3, 1: -1}, 2)))
def test_multiset_permutations():
ans = ['abby', 'abyb', 'aybb', 'baby', 'bayb', 'bbay', 'bbya', 'byab',
'byba', 'yabb', 'ybab', 'ybba']
assert [''.join(i) for i in multiset_permutations('baby')] == ans
assert [''.join(i) for i in multiset_permutations(multiset('baby'))] == ans
assert list(multiset_permutations([0, 0, 0], 2)) == [[0, 0]]
assert list(multiset_permutations([0, 2, 1], 2)) == [
[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]
assert len(list(multiset_permutations('a', 0))) == 1
assert len(list(multiset_permutations('a', 3))) == 0
for nul in ([], {}, ''):
assert list(multiset_permutations(nul)) == [[]]
assert list(multiset_permutations(nul, 0)) == [[]]
# impossible requests give no result
assert list(multiset_permutations(nul, 1)) == []
assert list(multiset_permutations(nul, -1)) == []
def test():
for i in range(1, 7):
print(i)
for p in multiset_permutations([0, 0, 1, 0, 1], i):
print(p)
assert capture(lambda: test()) == dedent('''\
1
[0]
[1]
2
[0, 0]
[0, 1]
[1, 0]
[1, 1]
3
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
4
[0, 0, 0, 1]
[0, 0, 1, 0]
[0, 0, 1, 1]
[0, 1, 0, 0]
[0, 1, 0, 1]
[0, 1, 1, 0]
[1, 0, 0, 0]
[1, 0, 0, 1]
[1, 0, 1, 0]
[1, 1, 0, 0]
5
[0, 0, 0, 1, 1]
[0, 0, 1, 0, 1]
[0, 0, 1, 1, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 1, 0]
[0, 1, 1, 0, 0]
[1, 0, 0, 0, 1]
[1, 0, 0, 1, 0]
[1, 0, 1, 0, 0]
[1, 1, 0, 0, 0]
6\n''')
raises(ValueError, lambda: list(multiset_permutations({0: 3, 1: -1})))
def test_partitions():
ans = [[{}], [(0, {})]]
for i in range(2):
assert list(partitions(0, size=i)) == ans[i]
assert list(partitions(1, 0, size=i)) == ans[i]
assert list(partitions(6, 2, 2, size=i)) == ans[i]
assert list(partitions(6, 2, None, size=i)) != ans[i]
assert list(partitions(6, None, 2, size=i)) != ans[i]
assert list(partitions(6, 2, 0, size=i)) == ans[i]
assert [p for p in partitions(6, k=2)] == [
{2: 3}, {1: 2, 2: 2}, {1: 4, 2: 1}, {1: 6}]
assert [p for p in partitions(6, k=3)] == [
{3: 2}, {1: 1, 2: 1, 3: 1}, {1: 3, 3: 1}, {2: 3}, {1: 2, 2: 2},
{1: 4, 2: 1}, {1: 6}]
assert [p for p in partitions(8, k=4, m=3)] == [
{4: 2}, {1: 1, 3: 1, 4: 1}, {2: 2, 4: 1}, {2: 1, 3: 2}] == [
i for i in partitions(8, k=4, m=3) if all(k <= 4 for k in i)
and sum(i.values()) <=3]
assert [p for p in partitions(S(3), m=2)] == [
{3: 1}, {1: 1, 2: 1}]
assert [i for i in partitions(4, k=3)] == [
{1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}] == [
i for i in partitions(4) if all(k <= 3 for k in i)]
# Consistency check on output of _partitions and RGS_unrank.
# This provides a sanity test on both routines. Also verifies that
# the total number of partitions is the same in each case.
# (from pkrathmann2)
for n in range(2, 6):
i = 0
for m, q in _set_partitions(n):
assert q == RGS_unrank(i, n)
i += 1
assert i == RGS_enum(n)
def test_binary_partitions():
assert [i[:] for i in binary_partitions(10)] == [[8, 2], [8, 1, 1],
[4, 4, 2], [4, 4, 1, 1], [4, 2, 2, 2], [4, 2, 2, 1, 1],
[4, 2, 1, 1, 1, 1], [4, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2],
[2, 2, 2, 2, 1, 1], [2, 2, 2, 1, 1, 1, 1], [2, 2, 1, 1, 1, 1, 1, 1],
[2, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
assert len([j[:] for j in binary_partitions(16)]) == 36
def test_bell_perm():
assert [len(set(generate_bell(i))) for i in range(1, 7)] == [
factorial(i) for i in range(1, 7)]
assert list(generate_bell(3)) == [
(0, 1, 2), (0, 2, 1), (2, 0, 1), (2, 1, 0), (1, 2, 0), (1, 0, 2)]
# generate_bell and trotterjohnson are advertised to return the same
# permutations; this is not technically necessary so this test could
# be removed
for n in range(1, 5):
p = Permutation(range(n))
b = generate_bell(n)
for bi in b:
assert bi == tuple(p.array_form)
p = p.next_trotterjohnson()
raises(ValueError, lambda: list(generate_bell(0))) # XXX is this consistent with other permutation algorithms?
def test_involutions():
lengths = [1, 2, 4, 10, 26, 76]
for n, N in enumerate(lengths):
i = list(generate_involutions(n + 1))
assert len(i) == N
assert len({Permutation(j)**2 for j in i}) == 1
def test_derangements():
assert len(list(generate_derangements(list(range(6))))) == 265
assert ''.join(''.join(i) for i in generate_derangements('abcde')) == (
'badecbaecdbcaedbcdeabceadbdaecbdeacbdecabeacdbedacbedcacabedcadebcaebd'
'cdaebcdbeacdeabcdebaceabdcebadcedabcedbadabecdaebcdaecbdcaebdcbeadceab'
'dcebadeabcdeacbdebacdebcaeabcdeadbceadcbecabdecbadecdabecdbaedabcedacb'
'edbacedbca')
assert list(generate_derangements([0, 1, 2, 3])) == [
[1, 0, 3, 2], [1, 2, 3, 0], [1, 3, 0, 2], [2, 0, 3, 1],
[2, 3, 0, 1], [2, 3, 1, 0], [3, 0, 1, 2], [3, 2, 0, 1], [3, 2, 1, 0]]
assert list(generate_derangements([0, 1, 2, 2])) == [
[2, 2, 0, 1], [2, 2, 1, 0]]
assert list(generate_derangements('ba')) == [list('ab')]
# multiset_derangements
D = multiset_derangements
assert list(D('abb')) == []
assert [''.join(i) for i in D('ab')] == ['ba']
assert [''.join(i) for i in D('abc')] == ['bca', 'cab']
assert [''.join(i) for i in D('aabb')] == ['bbaa']
assert [''.join(i) for i in D('aabbcccc')] == [
'ccccaabb', 'ccccabab', 'ccccabba', 'ccccbaab', 'ccccbaba',
'ccccbbaa']
assert [''.join(i) for i in D('aabbccc')] == [
'cccabba', 'cccabab', 'cccaabb', 'ccacbba', 'ccacbab',
'ccacabb', 'cbccbaa', 'cbccaba', 'cbccaab', 'bcccbaa',
'bcccaba', 'bcccaab']
def test_necklaces():
def count(n, k, f):
return len(list(necklaces(n, k, f)))
m = []
for i in range(1, 8):
m.append((
i, count(i, 2, 0), count(i, 2, 1), count(i, 3, 1)))
assert Matrix(m) == Matrix([
[1, 2, 2, 3],
[2, 3, 3, 6],
[3, 4, 4, 10],
[4, 6, 6, 21],
[5, 8, 8, 39],
[6, 14, 13, 92],
[7, 20, 18, 198]])
def test_bracelets():
bc = [i for i in bracelets(2, 4)]
assert Matrix(bc) == Matrix([
[0, 0],
[0, 1],
[0, 2],
[0, 3],
[1, 1],
[1, 2],
[1, 3],
[2, 2],
[2, 3],
[3, 3]
])
bc = [i for i in bracelets(4, 2)]
assert Matrix(bc) == Matrix([
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 1],
[0, 1, 0, 1],
[0, 1, 1, 1],
[1, 1, 1, 1]
])
def test_generate_oriented_forest():
assert list(generate_oriented_forest(5)) == [[0, 1, 2, 3, 4],
[0, 1, 2, 3, 3], [0, 1, 2, 3, 2], [0, 1, 2, 3, 1], [0, 1, 2, 3, 0],
[0, 1, 2, 2, 2], [0, 1, 2, 2, 1], [0, 1, 2, 2, 0], [0, 1, 2, 1, 2],
[0, 1, 2, 1, 1], [0, 1, 2, 1, 0], [0, 1, 2, 0, 1], [0, 1, 2, 0, 0],
[0, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 1, 1, 0, 1], [0, 1, 1, 0, 0],
[0, 1, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0]]
assert len(list(generate_oriented_forest(10))) == 1842
def test_unflatten():
r = list(range(10))
assert unflatten(r) == list(zip(r[::2], r[1::2]))
assert unflatten(r, 5) == [tuple(r[:5]), tuple(r[5:])]
raises(ValueError, lambda: unflatten(list(range(10)), 3))
raises(ValueError, lambda: unflatten(list(range(10)), -2))
def test_common_prefix_suffix():
assert common_prefix([], [1]) == []
assert common_prefix(list(range(3))) == [0, 1, 2]
assert common_prefix(list(range(3)), list(range(4))) == [0, 1, 2]
assert common_prefix([1, 2, 3], [1, 2, 5]) == [1, 2]
assert common_prefix([1, 2, 3], [1, 3, 5]) == [1]
assert common_suffix([], [1]) == []
assert common_suffix(list(range(3))) == [0, 1, 2]
assert common_suffix(list(range(3)), list(range(3))) == [0, 1, 2]
assert common_suffix(list(range(3)), list(range(4))) == []
assert common_suffix([1, 2, 3], [9, 2, 3]) == [2, 3]
assert common_suffix([1, 2, 3], [9, 7, 3]) == [3]
def test_minlex():
assert minlex([1, 2, 0]) == (0, 1, 2)
assert minlex((1, 2, 0)) == (0, 1, 2)
assert minlex((1, 0, 2)) == (0, 2, 1)
assert minlex((1, 0, 2), directed=False) == (0, 1, 2)
assert minlex('aba') == 'aab'
assert minlex(('bb', 'aaa', 'c', 'a'), key=len) == ('c', 'a', 'bb', 'aaa')
def test_ordered():
assert list(ordered((x, y), hash, default=False)) in [[x, y], [y, x]]
assert list(ordered((x, y), hash, default=False)) == \
list(ordered((y, x), hash, default=False))
assert list(ordered((x, y))) == [x, y]
seq, keys = [[[1, 2, 1], [0, 3, 1], [1, 1, 3], [2], [1]],
(lambda x: len(x), lambda x: sum(x))]
assert list(ordered(seq, keys, default=False, warn=False)) == \
[[1], [2], [1, 2, 1], [0, 3, 1], [1, 1, 3]]
raises(ValueError, lambda:
list(ordered(seq, keys, default=False, warn=True)))
def test_runs():
assert runs([]) == []
assert runs([1]) == [[1]]
assert runs([1, 1]) == [[1], [1]]
assert runs([1, 1, 2]) == [[1], [1, 2]]
assert runs([1, 2, 1]) == [[1, 2], [1]]
assert runs([2, 1, 1]) == [[2], [1], [1]]
from operator import lt
assert runs([2, 1, 1], lt) == [[2, 1], [1]]
def test_reshape():
seq = list(range(1, 9))
assert reshape(seq, [4]) == \
[[1, 2, 3, 4], [5, 6, 7, 8]]
assert reshape(seq, (4,)) == \
[(1, 2, 3, 4), (5, 6, 7, 8)]
assert reshape(seq, (2, 2)) == \
[(1, 2, 3, 4), (5, 6, 7, 8)]
assert reshape(seq, (2, [2])) == \
[(1, 2, [3, 4]), (5, 6, [7, 8])]
assert reshape(seq, ((2,), [2])) == \
[((1, 2), [3, 4]), ((5, 6), [7, 8])]
assert reshape(seq, (1, [2], 1)) == \
[(1, [2, 3], 4), (5, [6, 7], 8)]
assert reshape(tuple(seq), ([[1], 1, (2,)],)) == \
(([[1], 2, (3, 4)],), ([[5], 6, (7, 8)],))
assert reshape(tuple(seq), ([1], 1, (2,))) == \
(([1], 2, (3, 4)), ([5], 6, (7, 8)))
assert reshape(list(range(12)), [2, [3], {2}, (1, (3,), 1)]) == \
[[0, 1, [2, 3, 4], {5, 6}, (7, (8, 9, 10), 11)]]
raises(ValueError, lambda: reshape([0, 1], [-1]))
raises(ValueError, lambda: reshape([0, 1], [3]))
def test_uniq():
assert list(uniq(p for p in partitions(4))) == \
[{4: 1}, {1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}]
assert list(uniq(x % 2 for x in range(5))) == [0, 1]
assert list(uniq('a')) == ['a']
assert list(uniq('ababc')) == list('abc')
assert list(uniq([[1], [2, 1], [1]])) == [[1], [2, 1]]
assert list(uniq(permutations(i for i in [[1], 2, 2]))) == \
[([1], 2, 2), (2, [1], 2), (2, 2, [1])]
assert list(uniq([2, 3, 2, 4, [2], [1], [2], [3], [1]])) == \
[2, 3, 4, [2], [1], [3]]
f = [1]
raises(RuntimeError, lambda: [f.remove(i) for i in uniq(f)])
f = [[1]]
raises(RuntimeError, lambda: [f.remove(i) for i in uniq(f)])
def test_kbins():
assert len(list(kbins('1123', 2, ordered=1))) == 24
assert len(list(kbins('1123', 2, ordered=11))) == 36
assert len(list(kbins('1123', 2, ordered=10))) == 10
assert len(list(kbins('1123', 2, ordered=0))) == 5
assert len(list(kbins('1123', 2, ordered=None))) == 3
def test1():
for orderedval in [None, 0, 1, 10, 11]:
print('ordered =', orderedval)
for p in kbins([0, 0, 1], 2, ordered=orderedval):
print(' ', p)
assert capture(lambda : test1()) == dedent('''\
ordered = None
[[0], [0, 1]]
[[0, 0], [1]]
ordered = 0
[[0, 0], [1]]
[[0, 1], [0]]
ordered = 1
[[0], [0, 1]]
[[0], [1, 0]]
[[1], [0, 0]]
ordered = 10
[[0, 0], [1]]
[[1], [0, 0]]
[[0, 1], [0]]
[[0], [0, 1]]
ordered = 11
[[0], [0, 1]]
[[0, 0], [1]]
[[0], [1, 0]]
[[0, 1], [0]]
[[1], [0, 0]]
[[1, 0], [0]]\n''')
def test2():
for orderedval in [None, 0, 1, 10, 11]:
print('ordered =', orderedval)
for p in kbins(list(range(3)), 2, ordered=orderedval):
print(' ', p)
assert capture(lambda : test2()) == dedent('''\
ordered = None
[[0], [1, 2]]
[[0, 1], [2]]
ordered = 0
[[0, 1], [2]]
[[0, 2], [1]]
[[0], [1, 2]]
ordered = 1
[[0], [1, 2]]
[[0], [2, 1]]
[[1], [0, 2]]
[[1], [2, 0]]
[[2], [0, 1]]
[[2], [1, 0]]
ordered = 10
[[0, 1], [2]]
[[2], [0, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[0], [1, 2]]
[[1, 2], [0]]
ordered = 11
[[0], [1, 2]]
[[0, 1], [2]]
[[0], [2, 1]]
[[0, 2], [1]]
[[1], [0, 2]]
[[1, 0], [2]]
[[1], [2, 0]]
[[1, 2], [0]]
[[2], [0, 1]]
[[2, 0], [1]]
[[2], [1, 0]]
[[2, 1], [0]]\n''')
def test_has_dups():
assert has_dups(set()) is False
assert has_dups(list(range(3))) is False
assert has_dups([1, 2, 1]) is True
def test__partition():
assert _partition('abcde', [1, 0, 1, 2, 0]) == [
['b', 'e'], ['a', 'c'], ['d']]
assert _partition('abcde', [1, 0, 1, 2, 0], 3) == [
['b', 'e'], ['a', 'c'], ['d']]
output = (3, [1, 0, 1, 2, 0])
assert _partition('abcde', *output) == [['b', 'e'], ['a', 'c'], ['d']]
def test_ordered_partitions():
from sympy.functions.combinatorial.numbers import nT
f = ordered_partitions
assert list(f(0, 1)) == [[]]
assert list(f(1, 0)) == [[]]
for i in range(1, 7):
for j in [None] + list(range(1, i)):
assert (
sum(1 for p in f(i, j, 1)) ==
sum(1 for p in f(i, j, 0)) ==
nT(i, j))
def test_rotations():
assert list(rotations('ab')) == [['a', 'b'], ['b', 'a']]
assert list(rotations(range(3))) == [[0, 1, 2], [1, 2, 0], [2, 0, 1]]
assert list(rotations(range(3), dir=-1)) == [[0, 1, 2], [2, 0, 1], [1, 2, 0]]
def test_ibin():
assert ibin(3) == [1, 1]
assert ibin(3, 3) == [0, 1, 1]
assert ibin(3, str=True) == '11'
assert ibin(3, 3, str=True) == '011'
assert list(ibin(2, 'all')) == [(0, 0), (0, 1), (1, 0), (1, 1)]
assert list(ibin(2, '', str=True)) == ['00', '01', '10', '11']
raises(ValueError, lambda: ibin(-.5))
raises(ValueError, lambda: ibin(2, 1))
def test_iterable():
assert iterable(0) is False
assert iterable(1) is False
assert iterable(None) is False
class Test1(NotIterable):
pass
assert iterable(Test1()) is False
class Test2(NotIterable):
_iterable = True
assert iterable(Test2()) is True
class Test3:
pass
assert iterable(Test3()) is False
class Test4:
_iterable = True
assert iterable(Test4()) is True
class Test5:
def __iter__(self):
yield 1
assert iterable(Test5()) is True
class Test6(Test5):
_iterable = False
assert iterable(Test6()) is False
| 36.678121 | 319 | 0.460462 |
8502eba08699283d28ee9873c5c3b9cac295c93e | 6,259 | py | Python | postgresqleu/invoices/forms.py | dlangille/pgeu-system | 3f1910010063bab118e94a55ed757b23f1d36bf5 | [
"MIT"
] | null | null | null | postgresqleu/invoices/forms.py | dlangille/pgeu-system | 3f1910010063bab118e94a55ed757b23f1d36bf5 | [
"MIT"
] | null | null | null | postgresqleu/invoices/forms.py | dlangille/pgeu-system | 3f1910010063bab118e94a55ed757b23f1d36bf5 | [
"MIT"
] | null | null | null | from django import forms
from django.core.validators import MinValueValidator, MaxValueValidator
from django.forms import ValidationError
from django.forms import widgets
from django.contrib.auth.models import User
from django.conf import settings
from decimal import Decimal
from postgresqleu.util.widgets import HtmlDateInput
from .models import Invoice, InvoiceRow, InvoicePaymentMethod
from postgresqleu.accounting.models import Account, Object
from postgresqleu.invoices.models import VatRate
class InvoiceForm(forms.ModelForm):
hidden_until_finalized = ('total_amount', 'total_vat', 'remindersent', )
available_in_finalized = ('recipient_user', 'recipient_email', 'allowedmethods', 'extra_bcc_list', )
selectize_multiple_fields = ['recipient_user', ]
accounting_account = forms.ChoiceField(choices=[], required=False)
accounting_object = forms.ChoiceField(choices=[], required=False)
def __init__(self, *args, **kwargs):
super(InvoiceForm, self).__init__(*args, **kwargs)
# Some fields are hidden until the invoice is final
if not self.instance.finalized:
for fld in self.hidden_until_finalized:
del self.fields[fld]
if not settings.EU_VAT:
del self.fields['reverse_vat']
if 'data' in kwargs and 'recipient_user' in kwargs['data'] and kwargs['data']['recipient_user'] != '':
# Postback with this field, so allow this specifi cuser
self.fields['recipient_user'].queryset = User.objects.filter(pk=kwargs['data']['recipient_user'])
elif self.instance and self.instance.recipient_user:
self.fields['recipient_user'].queryset = User.objects.filter(pk=self.instance.recipient_user.pk)
else:
self.fields['recipient_user'].queryset = User.objects.filter(pk=-1)
self.fields['recipient_user'].label_from_instance = lambda u: '{0} {1} ({2})'.format(u.first_name, u.last_name, u.username)
self.fields['canceltime'].widget = widgets.DateTimeInput()
self.fields['allowedmethods'].widget = forms.CheckboxSelectMultiple()
self.fields['allowedmethods'].queryset = InvoicePaymentMethod.objects.filter()
self.fields['allowedmethods'].label_from_instance = lambda x: "{0}{1}".format(x.internaldescription, x.active and " " or " (INACTIVE)")
self.fields['accounting_account'].choices = [(0, '----'), ] + [(a.num, "%s: %s" % (a.num, a.name)) for a in Account.objects.filter(availableforinvoicing=True)]
self.fields['accounting_object'].choices = [('', '----'), ] + [(o.name, o.name) for o in Object.objects.filter(active=True)]
if self.instance.finalized:
# All fields should be read-only for finalized invoices
for fn, f in list(self.fields.items()):
if self.instance.ispaid or fn not in self.available_in_finalized:
f.required = False
if type(f.widget).__name__ in ('TextInput', 'Textarea', 'DateInput', 'DateTimeInput'):
f.widget.attrs['readonly'] = "readonly"
else:
f.widget.attrs['disabled'] = True
class Meta:
model = Invoice
exclude = ['finalized', 'pdf_invoice', 'pdf_receipt', 'paidat', 'paymentdetails', 'paidusing', 'processor', 'processorid', 'deleted', 'deletion_reason', 'refund', 'recipient_secret']
widgets = {
# Can't use HtmlDateInput since that truncates to just date
# 'invoicedate': HtmlDateInput(),
'duedate': HtmlDateInput(),
}
def clean(self):
if not self.cleaned_data['recipient_user'] and self.cleaned_data.get('recipient_email', None):
# User not specified. If we can find one by email, auto-populate
# the field.
matches = User.objects.filter(email=self.cleaned_data['recipient_email'].lower())
if len(matches) == 1:
self.cleaned_data['recipient_user'] = matches[0]
if self.cleaned_data['accounting_account'] == "0":
# Can't figure out how to store NULL automatically, so overwrite
# it when we've seen the magic value of zero.
self.cleaned_data['accounting_account'] = None
return self.cleaned_data
class InvoiceRowForm(forms.ModelForm):
class Meta:
model = InvoiceRow
exclude = []
def __init__(self, *args, **kwargs):
super(InvoiceRowForm, self).__init__(*args, **kwargs)
self.fields['rowcount'].widget.attrs['class'] = "sumfield"
self.fields['rowamount'].widget.attrs['class'] = "sumfield"
self.fields['vatrate'].widget.attrs['class'] = "sumfield"
self.fields['vatrate'].required = False
def clean_rowamount(self):
if self.cleaned_data['rowamount'] == 0:
raise ValidationError("Must specify an amount!")
return self.cleaned_data['rowamount']
def clean_rowcount(self):
if self.cleaned_data['rowcount'] <= 0:
raise ValidationError("Must specify a count!")
return self.cleaned_data['rowcount']
class RefundForm(forms.Form):
amount = forms.DecimalField(required=True, label="Amount ex VAT", validators=[MinValueValidator(1), ])
vatrate = forms.ModelChoiceField(queryset=VatRate.objects.all(), required=False)
reason = forms.CharField(max_length=100, required=True, help_text="Note! Included in communication to invoice recipient!")
confirm = forms.BooleanField()
def __init__(self, invoice, *args, **kwargs):
super(RefundForm, self).__init__(*args, **kwargs)
self.invoice = invoice
self.fields['amount'].validators.append(MaxValueValidator(invoice.total_refunds['remaining']['amount']))
if self.data and 'amount' in self.data and 'reason' in self.data:
if invoice.can_autorefund:
self.fields['confirm'].help_text = "Check this box to confirm that you want to generate an <b>automatic</b> refund of this invoice."
else:
self.fields['confirm'].help_text = "check this box to confirm that you have <b>already</b> manually refunded this invoice."
else:
del self.fields['confirm']
| 48.898438 | 190 | 0.658412 |
616fcb97e05fda43b4292875c40af8553011f484 | 7,835 | py | Python | contrib/bitrpc/bitrpc.py | BitTokens/BitToken | b5f3272537ab5b33b398092b8efa60347cf087ce | [
"MIT"
] | 3 | 2018-02-06T10:07:27.000Z | 2021-06-12T07:49:03.000Z | contrib/bitrpc/bitrpc.py | Penny-Admixture/BitToken | b5f3272537ab5b33b398092b8efa60347cf087ce | [
"MIT"
] | null | null | null | contrib/bitrpc/bitrpc.py | Penny-Admixture/BitToken | b5f3272537ab5b33b398092b8efa60347cf087ce | [
"MIT"
] | 4 | 2017-07-20T10:30:49.000Z | 2021-06-12T08:55:43.000Z | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8224")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8224")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | 24.182099 | 79 | 0.668283 |
dde97eb58f453de4c245aa53d3e8c90bb8219848 | 730 | py | Python | kitsune/notifications/tests/test_models.py | navgurukul-shivani18/kitsune | a7cf49ab1bfcf4e770938116968824b2b0fa5bb1 | [
"BSD-3-Clause"
] | 4 | 2021-05-17T11:38:08.000Z | 2021-08-19T06:42:39.000Z | kitsune/notifications/tests/test_models.py | navgurukul-shivani18/kitsune | a7cf49ab1bfcf4e770938116968824b2b0fa5bb1 | [
"BSD-3-Clause"
] | 32 | 2021-04-15T22:35:58.000Z | 2022-01-04T21:30:05.000Z | kitsune/notifications/tests/test_models.py | navgurukul-shivani18/kitsune | a7cf49ab1bfcf4e770938116968824b2b0fa5bb1 | [
"BSD-3-Clause"
] | 3 | 2020-06-14T06:59:46.000Z | 2020-06-15T14:45:56.000Z | from datetime import datetime
from nose.tools import eq_
from kitsune.notifications.tests import NotificationFactory
from kitsune.sumo.tests import TestCase
class TestNotificationModel(TestCase):
def test_is_read_false(self):
n = NotificationFactory(read_at=None)
eq_(n.is_read, False)
def test_is_read_true(self):
n = NotificationFactory(read_at=datetime.now())
eq_(n.is_read, True)
def test_set_is_read_true(self):
n = NotificationFactory(read_at=None)
n.is_read = True
assert n.read_at is not None
def test_set_is_read_false(self):
n = NotificationFactory(read_at=datetime.now())
n.is_read = False
assert n.read_at is None
| 26.071429 | 59 | 0.70274 |
9cecf3cf772671a8858d386c6fc70f5ad05f2f21 | 1,609 | py | Python | hub_module/modules/text/text_generation/plato2_en_large/tasks/next_sentence_prediction.py | T-baby/PaddleHub | 95f3a2a4cff30ac23e7e8d96a140521f8c8caeea | [
"Apache-2.0"
] | 4 | 2021-02-25T03:27:38.000Z | 2021-05-15T03:20:23.000Z | hub_module/modules/text/text_generation/plato2_en_large/tasks/next_sentence_prediction.py | KLGR123/PaddleHub | cc78bd12f4a7e3d76ce669cdbff28906d3c9ae43 | [
"Apache-2.0"
] | null | null | null | hub_module/modules/text/text_generation/plato2_en_large/tasks/next_sentence_prediction.py | KLGR123/PaddleHub | cc78bd12f4a7e3d76ce669cdbff28906d3c9ae43 | [
"Apache-2.0"
] | 2 | 2021-03-01T07:04:01.000Z | 2021-05-14T05:54:18.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Next sentence prediction task."""
from plato2_en_large.readers.nsp_reader import NSPReader
from plato2_en_large.tasks import register_task
from plato2_en_large.tasks.task_base import Task
from plato2_en_large.utils.args import str2bool
@register_task("NextSentencePrediction")
class NextSentencePrediction(Task):
"""
Define dialogue response generation.
"""
@classmethod
def add_cmdline_args(cls, parser):
"""Add cmdline argurments."""
group = NSPReader.add_cmdline_args(parser)
return group
def __init__(self, args):
super(NextSentencePrediction, self).__init__(args)
self.reader = NSPReader(args)
return
def _post_process_infer_output(self, predictions):
predictions = [{
"data_id": data_id.tolist()[0],
"score": score.tolist()[1]
} for data_id, score in zip(predictions["data_id"],
predictions["scores"])]
return predictions
| 34.978261 | 74 | 0.702921 |
550f9f2f31fce7736780e7878d951427781d791e | 12,780 | py | Python | perfkitbenchmarker/linux_packages/hpcc.py | msidana/PerfKitBenchmarker | 2784642d3e6b20b3f474c4e27edb1ef163804f66 | [
"Apache-2.0"
] | 1 | 2018-08-28T19:33:21.000Z | 2018-08-28T19:33:21.000Z | perfkitbenchmarker/linux_packages/hpcc.py | msidana/PerfKitBenchmarker | 2784642d3e6b20b3f474c4e27edb1ef163804f66 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/linux_packages/hpcc.py | msidana/PerfKitBenchmarker | 2784642d3e6b20b3f474c4e27edb1ef163804f66 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing HPCC installation and cleanup functions.
The HPC Challenge is a collection of High Performance Computing benchmarks,
including High Performance Linpack (HPL). More information can be found here:
http://icl.cs.utk.edu/hpcc/
"""
import os
import posixpath
import re
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import amdblis
from perfkitbenchmarker.linux_packages import INSTALL_DIR
from perfkitbenchmarker.linux_packages import openblas
PACKAGE_NAME = 'hpcc'
HPCC_TAR = 'hpcc-1.5.0.tar.gz'
HPCC_URL = 'https://icl.cs.utk.edu/projectsfiles/hpcc/download/' + HPCC_TAR
PREPROVISIONED_DATA = {
HPCC_TAR: '0a6fef7ab9f3347e549fed65ebb98234feea9ee18aea0c8f59baefbe3cf7ffb8'
}
PACKAGE_DATA_URL = {
HPCC_TAR: HPCC_URL
}
HPCC_DIR = '%s/hpcc-1.5.0' % INSTALL_DIR
HPCC_VERSION = '1.5.0'
MAKE_FLAVOR_CBLAS = 'Linux_PII_CBLAS'
MAKE_FLAVOR_MKL = 'intel64'
MAKE_FLAVOR_OPEN_BLAS = 'OPEN_BLAS'
MAKE_FLAVOR_AMD_BLIS = 'AMD_BLIS'
HPCC_MAKEFILE_CBLAS = 'Make.%s' % MAKE_FLAVOR_CBLAS
HPCC_MAKEFILE_MKL = 'Make.%s' % MAKE_FLAVOR_MKL
HPCC_MAKEFILE_OPEN_BLAS = 'Make.%s' % MAKE_FLAVOR_OPEN_BLAS
HPCC_MAKEFILE_AMD_BLIS = 'Make.%s' % MAKE_FLAVOR_AMD_BLIS
HPCC_MAKEFILE_PATH_MKL = '%s/hpl/%s' % (HPCC_DIR, HPCC_MAKEFILE_MKL)
HPCC_MAKEFILE_PATH_OPEN_BLAS = '%s/hpl/%s' % (HPCC_DIR, HPCC_MAKEFILE_OPEN_BLAS)
HPCC_MAKEFILE_PATH_AMD_BLIS = '%s/hpl/%s' % (HPCC_DIR, HPCC_MAKEFILE_AMD_BLIS)
HPCC_MATH_LIBRARY_OPEN_BLAS = 'openblas'
HPCC_MATH_LIBRARY_AMD_BLIS = 'amdblis'
HPCC_MATH_LIBRARY_MKL = 'mkl'
# A dict mapping HPCC benchmarks to dicts mapping summary result names to units.
# The name of the summary result is added as a metric with that name and the
# specified units.
HPCC_METRIC_MAP = {
'MPI RandomAccess': {
'MPIRandomAccess_time': 'seconds',
'MPIRandomAccess_CheckTime': 'seconds',
'MPIRandomAccess_ExeUpdates': 'updates',
'MPIRandomAccess_GUPs': 'GUP/s',
},
'StarRandomAccess': {
'StarRandomAccess_GUPs': 'GUP/s',
},
'SingleRandomAccess': {
'SingleRandomAccess_GUPs': 'GUP/s',
},
'MPI RandomAccess LCG': {
'MPIRandomAccess_LCG_time': 'seconds',
'MPIRandomAccess_LCG_CheckTime': 'seconds',
'MPIRandomAccess_LCG_ExeUpdates': 'updates',
'MPIRandomAccess_LCG_GUPs': 'GUP/s',
},
'StarRandomAccess LCG': {
'StarRandomAccess_LCG_GUPs': 'GUP/s',
},
'SingleRandomAccess LCG': {
'SingleRandomAccess_LCG_GUPs': 'GUP/s',
},
'PTRANS': {
'PTRANS_GBs': 'GB/s',
'PTRANS_time': 'seconds',
},
'StarDGEMM': {
'StarDGEMM_Gflops': 'Gflop/s',
},
'SingleDGEMM': {
'SingleDGEMM_Gflops': 'Gflop/s',
},
'StarSTREAM': {
'StarSTREAM_Copy': 'GB/s',
'StarSTREAM_Scale': 'GB/s',
'StarSTREAM_Add': 'GB/s',
'StarSTREAM_Triad': 'GB/s',
},
'SingleSTREAM': {
'SingleSTREAM_Copy': 'GB/s',
'SingleSTREAM_Scale': 'GB/s',
'SingleSTREAM_Add': 'GB/s',
'SingleSTREAM_Triad': 'GB/s',
},
'MPIFFT': {
'MPIFFT_Gflops': 'Gflop/s',
'MPIFFT_time0': 'seconds',
'MPIFFT_time1': 'seconds',
'MPIFFT_time2': 'seconds',
'MPIFFT_time3': 'seconds',
'MPIFFT_time4': 'seconds',
'MPIFFT_time5': 'seconds',
'MPIFFT_time6': 'seconds',
},
'StarFFT': {
'StarFFT_Gflops': 'Gflop/s',
},
'SingleFFT': {
'SingleFFT_Gflops': 'Gflop/s',
},
'Latency/Bandwidth': {
'MaxPingPongLatency_usec': 'usec',
'RandomlyOrderedRingLatency_usec': 'usec',
'MinPingPongBandwidth_GBytes': 'GB',
'NaturallyOrderedRingBandwidth_GBytes': 'GB',
'RandomlyOrderedRingBandwidth_GBytes': 'GB',
'MinPingPongLatency_usec': 'usec',
'AvgPingPongLatency_usec': 'usec',
'MaxPingPongBandwidth_GBytes': 'GB',
'AvgPingPongBandwidth_GBytes': 'GB',
'NaturallyOrderedRingLatency_usec': 'usec',
},
'HPL': {
'HPL_Tflops': 'Tflop/s',
'HPL_time': 'seconds',
},
}
# A dict mapping HPCC benchmarks to sets of summary result names that should be
# added to the metadata for a benchmark.
HPCC_METADATA_MAP = {
'MPI RandomAccess': {
'MPIRandomAccess_N',
'MPIRandomAccess_Errors',
'MPIRandomAccess_ErrorsFraction',
'MPIRandomAccess_TimeBound',
'MPIRandomAccess_Algorithm',
},
'StarRandomAccess': {'RandomAccess_N'},
'SingleRandomAccess': {'RandomAccess_N'},
'MPI RandomAccess LCG': {
'MPIRandomAccess_LCG_N',
'MPIRandomAccess_LCG_Errors',
'MPIRandomAccess_LCG_ErrorsFraction',
'MPIRandomAccess_LCG_TimeBound',
'MPIRandomAccess_LCG_Algorithm',
},
'StarRandomAccess LCG': {'RandomAccess_LCG_N'},
'SingleRandomAccess LCG': {'RandomAccess_LCG_N'},
'PTRANS': {
'PTRANS_residual',
'PTRANS_n',
'PTRANS_nb',
'PTRANS_nprow',
'PTRANS_npcol',
},
'StarDGEMM': {'DGEMM_N'},
'SingleDGEMM': {'DGEMM_N'},
'StarSTREAM': {
'STREAM_Threads',
'STREAM_VectorSize',
},
'SingleSTREAM': {
'STREAM_Threads',
'STREAM_VectorSize',
},
'MPIFFT': {
'MPIFFT_N',
'MPIFFT_maxErr',
'MPIFFT_Procs',
},
'StarFFT': {'FFT_N'},
'SingleFFT': {'FFT_N'},
'Latency/Bandwidth': {},
'HPL': {
'HPL_N',
'HPL_NB',
'HPL_nprow',
'HPL_npcol',
'HPL_depth',
'HPL_nbdiv',
'HPL_nbmin',
'HPL_ctop',
},
}
# The names of the benchmarks.
HPCC_BENCHMARKS = sorted(HPCC_METRIC_MAP)
flags.DEFINE_enum(
'hpcc_math_library', HPCC_MATH_LIBRARY_OPEN_BLAS, [
HPCC_MATH_LIBRARY_OPEN_BLAS, HPCC_MATH_LIBRARY_MKL,
HPCC_MATH_LIBRARY_AMD_BLIS
], 'The math library to use when compiling hpcc: openblas, mkl, or '
'amdblis. The default is openblas.')
flags.DEFINE_list(
'hpcc_benchmarks', [], 'A list of benchmarks in HPCC to run. If none are '
'specified (the default), then all of the benchmarks are run. In 1.5.0, '
'the benchmarks may include the following: %s' % ', '.join(HPCC_BENCHMARKS))
flags.register_validator(
'hpcc_benchmarks',
lambda hpcc_benchmarks: set(hpcc_benchmarks).issubset(set(HPCC_BENCHMARKS)))
FLAGS = flags.FLAGS
def _LimitBenchmarksToRun(vm, selected_hpcc_benchmarks):
"""Limits the benchmarks to run.
This function copies hpcc.c to the local machine, comments out code that runs
benchmarks not listed in selected_hpcc_benchmarks, and then copies hpcc.c back
to the remote machine.
Args:
vm: The machine where hpcc.c was installed.
selected_hpcc_benchmarks: A set of benchmarks to run.
"""
remote_hpcc_path = posixpath.join(HPCC_DIR, 'src', 'hpcc.c')
local_hpcc_path = os.path.join(vm_util.GetTempDir(), 'hpcc.c')
vm.PullFile(local_hpcc_path, remote_hpcc_path)
with open(local_hpcc_path) as f:
lines = f.readlines()
# Process the main file, commenting out benchmarks that should not be run.
commenting = False
with open(local_hpcc_path, 'w') as f:
for line in lines:
# Decide whether to continue commenting out code for each benchmark. This
# is determined by searching for the comment that starts each benchmark.
match = re.search(r'\/\*\s+(.*?)\s+\*\/', line)
if match and match.group(1) in HPCC_BENCHMARKS:
commenting = match.group(1) not in selected_hpcc_benchmarks
# Start writing once the per-benchmark code is complete. This happens at
# the hpcc_end: label.
if re.search('hpcc_end:', line):
commenting = False
f.write('// %s' % line if commenting else line)
vm.PushFile(local_hpcc_path, remote_hpcc_path)
def _Install(vm):
"""Installs the HPCC package on the VM."""
vm.Install('wget')
vm.Install('openmpi')
vm.InstallPreprovisionedPackageData(
PACKAGE_NAME, PREPROVISIONED_DATA.keys(), INSTALL_DIR)
vm.RemoteCommand('cd %s && tar xvfz %s' % (INSTALL_DIR, HPCC_TAR))
if FLAGS.hpcc_benchmarks:
_LimitBenchmarksToRun(vm, set(FLAGS.hpcc_benchmarks))
if FLAGS.hpcc_math_library == HPCC_MATH_LIBRARY_OPEN_BLAS:
_CompileHpccOpenblas(vm)
elif FLAGS.hpcc_math_library == HPCC_MATH_LIBRARY_MKL:
_CompileHpccMKL(vm)
elif FLAGS.hpcc_math_library == HPCC_MATH_LIBRARY_AMD_BLIS:
_CompileHpccAmdBlis(vm)
else:
raise errors.Setup.InvalidFlagConfigurationError(
'Unexpected hpcc_math_library option encountered.')
def _CompileHpccOpenblas(vm):
"""Compile HPCC with OpenBlas."""
vm.Install('openblas')
vm.RemoteCommand(
'cp %s/hpl/setup/%s %s' %
(HPCC_DIR, HPCC_MAKEFILE_CBLAS, HPCC_MAKEFILE_PATH_OPEN_BLAS))
sed_cmd = ('sed -i -e "/^MP/d" -e "s/gcc/mpicc/" -e "s/g77/mpicc/" '
'-e "s/\\$(HOME)\\/netlib\\/ARCHIVES\\/Linux_PII/%s/" '
'-e "s/libcblas.*/libopenblas.a/" '
'-e "s/-funroll-loops/-funroll-loops -std=c99/" '
'-e "s/\\-lm/\\-lgfortran \\-lm/" %s' %
(re.escape(openblas.OPENBLAS_DIR), HPCC_MAKEFILE_PATH_OPEN_BLAS))
vm.RemoteCommand(sed_cmd)
vm.RemoteCommand('cd %s; make arch=OPEN_BLAS' % HPCC_DIR)
def _CompileHpccAmdBlis(vm):
"""Compile HPCC with AMD BLIS."""
vm.Install('amdblis')
vm.RemoteCommand('cp %s/hpl/setup/%s %s' %
(HPCC_DIR, HPCC_MAKEFILE_CBLAS, HPCC_MAKEFILE_PATH_AMD_BLIS))
sed_cmd = ('sed -i -e "/^MP/d" -e "s/gcc/mpicc/" -e "s/g77/mpicc/" '
'-e "s/\\$(HOME)\\/netlib\\/ARCHIVES\\/Linux_PII/%s/" '
'-e "s/libcblas.*/lib\\/zen\\/libblis.a/" '
'-e "s/-funroll-loops/-funroll-loops -std=c99/" '
'-e "s/\\-lm/\\-lgfortran \\-lm/" %s' %
(re.escape(amdblis.AMDBLIS_DIR), HPCC_MAKEFILE_PATH_AMD_BLIS))
vm.RemoteCommand(sed_cmd)
vm.RemoteCommand('cd %s; make arch=AMD_BLIS' % HPCC_DIR)
def _CompileHpccMKL(vm):
"""Compiling HPCC with Intel MKL.
The following link provides instructions of using intel MKL in hpcc_benchmark.
https://software.intel.com/en-us/articles/performance-tools-for-software-developers-use-of-intel-mkl-in-hpcc-benchmark
TODO(yanfeiren):The free version MKL pacakage does not have
'interfaces/fftw2x_cdft' which is the MPI FFTW 2.x interfaces to the
Intel MKL Cluster FFT. Such that we have to at first install OpenBlas and
build hpcc binary using OpenBlas. Need to investigate how to build hpcc
binary without 'interfaces/fftw2x_cdft'.
Args:
vm: VirtualMachine object. The VM to install hpcc.
"""
_CompileHpccOpenblas(vm)
vm.RemoteCommand('cd %s; rm hpcc' % HPCC_DIR)
vm.Install('mkl')
vm.RemoteCommand('cp %s/hpl/setup/%s %s' % (HPCC_DIR, HPCC_MAKEFILE_CBLAS,
HPCC_MAKEFILE_PATH_MKL))
mkl_lalib = ('-Wl,--start-group $(LAdir)/libfftw2xc_double_gnu.a '
'$(LAdir)/libfftw2xf_double_gnu.a '
'$(LAdir)/libmkl_intel_lp64.a '
'$(LAdir)/libmkl_intel_thread.a '
'$(LAdir)/libmkl_core.a '
'$(LAdir)/libmkl_blas95_lp64.a '
'-Wl,--end-group')
mkl_ccflags = (' -Wl,--no-as-needed -ldl -lmpi -liomp5 -lpthread -lm '
'-DUSING_FFTW -DMKL_INT=long -DLONG_IS_64BITS')
sed_cmd_mkl = (
'sed -i -e "/^MP/d" -e "s/gcc/mpicc/" -e "s/g77/mpicc/" '
'-e "s/\\$(HOME)\\/netlib\\/ARCHIVES\\/Linux_PII/'
'\\/opt\\/intel\\/mkl\\/lib\\/intel64/" '
'-e "s/\\$(LAdir)\\/libcblas.*/%s/" '
'-e "s/\\-lm/\\-lgfortran \\-lm/" '
'-e "/CCFLAGS / s/$/%s/" %s' %
(re.escape(mkl_lalib), re.escape(mkl_ccflags), HPCC_MAKEFILE_PATH_MKL))
vm.RemoteCommand(sed_cmd_mkl)
vm.RemoteCommand('source /opt/intel/compilers_and_libraries/linux/bin/'
'compilervars.sh -arch intel64 -platform linux && '
'cd %s; make arch=intel64' % HPCC_DIR)
def YumInstall(vm):
"""Installs the HPCC package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the HPCC package on the VM."""
_Install(vm)
| 34.918033 | 120 | 0.657903 |
1de23bd35acabf9dfbe451c9730462e793d53928 | 13,914 | py | Python | aesara/tensor/random/op.py | fshart/aesara | 1ddf96a7b8e8503fb8773b09c3ca77483fd884c4 | [
"BSD-3-Clause"
] | 111 | 2021-01-29T06:12:58.000Z | 2021-06-04T20:27:51.000Z | aesara/tensor/random/op.py | fshart/aesara | 1ddf96a7b8e8503fb8773b09c3ca77483fd884c4 | [
"BSD-3-Clause"
] | 253 | 2020-02-07T15:19:38.000Z | 2021-01-27T20:26:55.000Z | aesara/tensor/random/op.py | fshart/aesara | 1ddf96a7b8e8503fb8773b09c3ca77483fd884c4 | [
"BSD-3-Clause"
] | 38 | 2020-07-20T12:09:06.000Z | 2021-01-27T13:38:50.000Z | from collections.abc import Sequence
from copy import copy
from typing import List, Optional, Tuple
import numpy as np
import aesara
from aesara.configdefaults import config
from aesara.graph.basic import Apply, Variable
from aesara.graph.op import Op
from aesara.misc.safe_asarray import _asarray
from aesara.scalar import ScalarVariable
from aesara.tensor.basic import (
as_tensor_variable,
constant,
get_scalar_constant_value,
get_vector_length,
infer_broadcastable,
)
from aesara.tensor.random.type import RandomType
from aesara.tensor.random.utils import normalize_size_param, params_broadcast_shapes
from aesara.tensor.shape import shape_tuple
from aesara.tensor.type import TensorType, all_dtypes
from aesara.tensor.var import TensorVariable
def default_shape_from_params(
ndim_supp, dist_params, rep_param_idx=0, param_shapes=None
):
"""Infer the dimensions for the output of a `RandomVariable`.
This is a function that derives a random variable's support
shape/dimensions from one of its parameters.
XXX: It's not always possible to determine a random variable's support
shape from its parameters, so this function has fundamentally limited
applicability and must be replaced by custom logic in such cases.
XXX: This function is not expected to handle `ndim_supp = 0` (i.e.
scalars), since that is already definitively handled in the `Op` that
calls this.
TODO: Consider using `aesara.compile.ops.shape_i` alongside `ShapeFeature`.
Parameters
----------
ndim_supp: int
Total number of dimensions for a single draw of the random variable
(e.g. a multivariate normal draw is 1D, so `ndim_supp = 1`).
dist_params: list of `aesara.graph.basic.Variable`
The distribution parameters.
param_shapes: list of tuple of `ScalarVariable` (optional)
Symbolic shapes for each distribution parameter. These will
be used in place of distribution parameter-generated shapes.
rep_param_idx: int (optional)
The index of the distribution parameter to use as a reference
In other words, a parameter in `dist_param` with a shape corresponding
to the support's shape.
The default is the first parameter (i.e. the value 0).
Results
-------
out: a tuple representing the support shape for a distribution with the
given `dist_params`.
"""
if ndim_supp <= 0:
raise ValueError("ndim_supp must be greater than 0")
if param_shapes is not None:
ref_param = param_shapes[rep_param_idx]
return (ref_param[-ndim_supp],)
else:
ref_param = dist_params[rep_param_idx]
if ref_param.ndim < ndim_supp:
raise ValueError(
(
"Reference parameter does not match the "
f"expected dimensions; {ref_param} has less than {ndim_supp} dim(s)."
)
)
return ref_param.shape[-ndim_supp:]
class RandomVariable(Op):
"""An `Op` that produces a sample from a random variable.
This is essentially `RandomFunction`, except that it removes the
`outtype` dependency and handles shape dimension information more
directly.
"""
__props__ = ("name", "ndim_supp", "ndims_params", "dtype", "inplace")
default_output = 1
def __init__(
self,
name=None,
ndim_supp=None,
ndims_params=None,
dtype=None,
inplace=None,
):
"""Create a random variable `Op`.
Parameters
----------
name: str
The `Op`'s display name.
ndim_supp: int
Total number of dimensions for a single draw of the random variable
(e.g. a multivariate normal draw is 1D, so ``ndim_supp = 1``).
ndims_params: list of int
Number of dimensions for each distribution parameter when the
parameters only specify a single drawn of the random variable
(e.g. a multivariate normal's mean is 1D and covariance is 2D, so
``ndims_params = [1, 2]``).
dtype: str (optional)
The dtype of the sampled output. If the value ``"floatX"`` is
given, then ``dtype`` is set to ``aesara.config.floatX``. If
``None`` (the default), the `dtype` keyword must be set when
`RandomVariable.make_node` is called.
inplace: boolean (optional)
Determine whether or not the underlying rng state is updated
in-place or not (i.e. copied).
"""
super().__init__()
self.name = name or getattr(self, "name")
self.ndim_supp = (
ndim_supp if ndim_supp is not None else getattr(self, "ndim_supp")
)
self.ndims_params = (
ndims_params if ndims_params is not None else getattr(self, "ndims_params")
)
self.dtype = dtype or getattr(self, "dtype", None)
self.inplace = (
inplace if inplace is not None else getattr(self, "inplace", False)
)
if not isinstance(self.ndims_params, Sequence):
raise TypeError("Parameter ndims_params must be sequence type.")
self.ndims_params = tuple(self.ndims_params)
if self.inplace:
self.destroy_map = {0: [0]}
def _shape_from_params(self, dist_params, **kwargs):
"""Determine the shape of a `RandomVariable`'s output given its parameters.
This does *not* consider the extra dimensions added by the `size` parameter.
Defaults to `param_supp_shape_fn`.
"""
return default_shape_from_params(self.ndim_supp, dist_params, **kwargs)
def rng_fn(self, rng, *args, **kwargs):
"""Sample a numeric random variate."""
return getattr(rng, self.name)(*args, **kwargs)
def __str__(self):
props_str = ", ".join((f"{getattr(self, prop)}" for prop in self.__props__[1:]))
return f"{self.name}_rv{{{props_str}}}"
def _infer_shape(
self,
size: Tuple[TensorVariable],
dist_params: List[TensorVariable],
param_shapes: Optional[List[Tuple[TensorVariable]]] = None,
) -> Tuple[ScalarVariable]:
"""Compute the output shape given the size and distribution parameters.
Parameters
----------
size
The size parameter specified for this `RandomVariable`.
dist_params
The symbolic parameter for this `RandomVariable`'s distribution.
param_shapes
The shapes of the `dist_params` as given by `ShapeFeature`'s
via `Op.infer_shape`'s `input_shapes` argument. This parameter's
values are essentially more accurate versions of ``[d.shape for d
in dist_params]``.
"""
size_len = get_vector_length(size)
if self.ndim_supp == 0 and size_len > 0:
# In this case, we have a univariate distribution with a non-empty
# `size` parameter, which means that the `size` parameter
# completely determines the shape of the random variable. More
# importantly, the `size` parameter may be the only correct source
# of information for the output shape, in that we would be misled
# by the `dist_params` if we tried to infer the relevant parts of
# the output shape from those.
return size
# Broadcast the parameters
param_shapes = params_broadcast_shapes(
param_shapes or [shape_tuple(p) for p in dist_params], self.ndims_params
)
def slice_ind_dims(p, ps, n):
shape = tuple(ps)
if n == 0:
return (p, shape)
ind_slice = (slice(None),) * (p.ndim - n) + (0,) * n
ind_shape = [
s if b is False else constant(1, "int64")
for s, b in zip(shape[:-n], p.broadcastable[:-n])
]
return (
p[ind_slice],
ind_shape,
)
# These are versions of our actual parameters with the anticipated
# dimensions (i.e. support dimensions) removed so that only the
# independent variate dimensions are left.
params_ind_slice = tuple(
slice_ind_dims(p, ps, n)
for p, ps, n in zip(dist_params, param_shapes, self.ndims_params)
)
if len(params_ind_slice) == 1:
ind_param, ind_shape = params_ind_slice[0]
ndim_ind = len(ind_shape)
shape_ind = ind_shape
elif len(params_ind_slice) > 1:
# If there are multiple parameters, the dimensions of their
# independent variates should broadcast together.
p_slices, p_shapes = zip(*params_ind_slice)
shape_ind = aesara.tensor.extra_ops.broadcast_shape_iter(
p_shapes, arrays_are_shapes=True
)
ndim_ind = len(shape_ind)
else:
ndim_ind = 0
if self.ndim_supp == 0:
shape_supp = tuple()
shape_reps = tuple(size)
if ndim_ind > 0:
shape_reps = shape_reps[:-ndim_ind]
ndim_reps = len(shape_reps)
else:
shape_supp = self._shape_from_params(
dist_params,
param_shapes=param_shapes,
)
ndim_reps = size_len
shape_reps = size
ndim_shape = self.ndim_supp + ndim_ind + ndim_reps
if ndim_shape == 0:
shape = constant([], dtype="int64")
else:
shape = tuple(shape_reps) + tuple(shape_ind) + tuple(shape_supp)
# if shape is None:
# raise ShapeError()
return shape
def infer_shape(self, fgraph, node, input_shapes):
_, size, _, *dist_params = node.inputs
_, size_shape, _, *param_shapes = input_shapes
try:
size_len = get_vector_length(size)
except ValueError:
size_len = get_scalar_constant_value(size_shape[0])
size = tuple(size[n] for n in range(size_len))
shape = self._infer_shape(size, dist_params, param_shapes=param_shapes)
return [None, [s for s in shape]]
def __call__(self, *args, size=None, name=None, rng=None, dtype=None, **kwargs):
res = super().__call__(rng, size, dtype, *args, **kwargs)
if name is not None:
res.name = name
return res
def make_node(self, rng, size, dtype, *dist_params):
"""Create a random variable node.
Parameters
----------
rng: RandomGeneratorType or RandomStateType
Existing Aesara `Generator` or `RandomState` object to be used. Creates a
new one, if `None`.
size: int or Sequence
Numpy-like size of the output (i.e. replications).
dtype: str
The dtype of the sampled output. If the value ``"floatX"`` is
given, then ``dtype`` is set to ``aesara.config.floatX``. This
value is only used when `self.dtype` isn't set.
dist_params: list
Distribution parameters.
Results
-------
out: `Apply`
A node with inputs `(rng, size, dtype) + dist_args` and outputs
`(rng_var, out_var)`.
"""
size = normalize_size_param(size)
dist_params = tuple(
as_tensor_variable(p) if not isinstance(p, Variable) else p
for p in dist_params
)
if rng is None:
rng = aesara.shared(np.random.default_rng())
elif not isinstance(rng.type, RandomType):
raise TypeError(
"The type of rng should be an instance of either RandomGeneratorType or RandomStateType"
)
shape = self._infer_shape(size, dist_params)
_, bcast = infer_broadcastable(shape)
dtype = self.dtype or dtype
if dtype == "floatX":
dtype = config.floatX
elif dtype is None or (isinstance(dtype, str) and dtype not in all_dtypes):
raise TypeError("dtype is unspecified")
if isinstance(dtype, str):
dtype_idx = constant(all_dtypes.index(dtype), dtype="int64")
else:
dtype_idx = constant(dtype, dtype="int64")
dtype = all_dtypes[dtype_idx.data]
outtype = TensorType(dtype=dtype, broadcastable=bcast)
out_var = outtype()
inputs = (rng, size, dtype_idx) + dist_params
outputs = (rng.type(), out_var)
return Apply(self, inputs, outputs)
def perform(self, node, inputs, outputs):
rng_var_out, smpl_out = outputs
rng, size, dtype, *args = inputs
out_var = node.outputs[1]
# If `size == []`, that means no size is enforced, and NumPy is trusted
# to draw the appropriate number of samples, NumPy uses `size=None` to
# represent that. Otherwise, NumPy expects a tuple.
if np.size(size) == 0:
size = None
else:
size = tuple(size)
# Draw from `rng` if `self.inplace` is `True`, and from a copy of `rng`
# otherwise.
if not self.inplace:
rng = copy(rng)
rng_var_out[0] = rng
smpl_val = self.rng_fn(rng, *(args + [size]))
if (
not isinstance(smpl_val, np.ndarray)
or str(smpl_val.dtype) != out_var.type.dtype
):
smpl_val = _asarray(smpl_val, dtype=out_var.type.dtype)
smpl_out[0] = smpl_val
def grad(self, inputs, outputs):
return [
aesara.gradient.grad_undefined(
self, k, inp, "No gradient defined for random variables"
)
for k, inp in enumerate(inputs)
]
def R_op(self, inputs, eval_points):
return [None for i in eval_points]
| 34.61194 | 104 | 0.607877 |
cc07b918fefe214c5eeb71b38437cb681ab077ea | 632 | py | Python | PythonExercicio/numPrimos.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | PythonExercicio/numPrimos.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | PythonExercicio/numPrimos.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
# Tamanho dos números
num = 200
# Cria a tabela dos num primeiros números inteiros
is_prime = np.ones((num,), dtype=bool)
# Elinina os 0 e 1 que não são primos
is_prime[:2] = 0
N_max = int(np.sqrt(len(is_prime) - 1))
is_busca = np.ones((N_max+1))
is_busca[:2] = 0
for i in range(2, N_max+1):
is_busca[2*i::i] = False
busca = np.flatnonzero(is_busca)
#busca = np.arange(2,N_max+1)
print(busca)
for j in busca:
is_prime[2*j::j] = False
#print("Após retirar os múltiplos de {} a lista primos ficou:".format(j))
#print(is_prime)
print(np.flatnonzero(is_prime)) | 21.066667 | 77 | 0.686709 |
eccea58eca9705da2d857800dd36b1240e6cb71e | 7,733 | py | Python | hack/monitor_prow.py | aliok/release | 4d70335b71653ebcdcb93bd16e911c5803dd5019 | [
"Apache-2.0"
] | 1 | 2020-09-13T20:51:56.000Z | 2020-09-13T20:51:56.000Z | hack/monitor_prow.py | aliok/release | 4d70335b71653ebcdcb93bd16e911c5803dd5019 | [
"Apache-2.0"
] | null | null | null | hack/monitor_prow.py | aliok/release | 4d70335b71653ebcdcb93bd16e911c5803dd5019 | [
"Apache-2.0"
] | null | null | null | #!/bin/env python3
import glob
import multiprocessing.dummy as multiprocessing
import subprocess
import sys
import tempfile
import time
import json
import os
exec_cmd = lambda *cmd: subprocess.check_output(cmd).decode('utf-8')
RED = exec_cmd('tput', 'setaf', '1')
GREEN = exec_cmd('tput', 'setaf', '2')
YELLOW = exec_cmd('tput', 'setaf', '3')
BOLD = exec_cmd('tput', 'bold')
RESET = exec_cmd('tput', 'sgr0')
CLEAR = exec_cmd('tput', 'clear')
BLACKLIST = [
"Failed to GET .",
"The following repos define a policy or require context",
"requested job is unknown to prow: rehearse",
"requested job is unknown to prow: promote",
"Not enough reviewers found in OWNERS files for files touched by this PR",
"failed to get path: failed to resolve sym link: failed to read",
"nil pointer evaluating *v1.Refs.Repo",
"unrecognized directory name (expected int64)",
"failed to get reader for GCS object: storage: object doesn't exist",
"failed to get reader for GCS object: storage: object doesn't exist",
"googleapi: Error 401: Anonymous caller does not have storage.objects.list access to origin-ci-private., required",
"has required jobs but has protect: false",
"Couldn't find/suggest approvers for each files.",
"remote error: upload-pack: not our ref",
"fatal: remote error: upload-pack: not our ref",
"Error getting ProwJob name for source",
"the cache is not started, can not read objects",
"owner mismatch request by",
"Get : unsupported protocol scheme",
"No available resource",
"context deadline exceeded",
"owner mismatch request by ci-op"
]
def run_oc(args):
command = ['oc', '--loglevel', '10', '--namespace', 'ci'] + args
try:
process = subprocess.run(command, capture_output=True, check=True)
except subprocess.CalledProcessError as exc:
print(exc.stderr.decode('utf-8'))
raise
return process.stdout.decode('utf-8')
def debug(msg):
if os.environ.get("DEBUG", "") == "true":
print(msg)
def main():
dcs = run_oc(['get', 'deployment', '--selector', 'app=prow', '--output', 'jsonpath={.items[*].metadata.name}']).split()
with tempfile.TemporaryDirectory() as log_dir:
fs = [(display, log_dir), *((highlight, log_dir, x) for x in dcs)]
with multiprocessing.Pool(len(fs)) as pool:
for _ in pool.imap_unordered(lambda x: x[0](*x[1:]), fs):
pass # a check for exceptions is implicit in the iteration
def display(log_dir):
logs = log_dir + '/*.log'
while True:
sys.stdout.write(CLEAR)
for log in sorted(glob.glob(logs)):
with open(log) as f:
if sys.stdout.write(f.read()):
sys.stdout.write('\n')
time.sleep(5)
def highlight(log_dir, dc):
warn = '"level":"warning"'
error = '"level":"error"'
fatal = '"level":"fatal"'
log = '{}/{}.log'.format(log_dir, dc)
while True:
debug("deployment/{}: gathering info".format(dc))
header = renderHeader(dc)
lines = []
log_lines = []
for pod in run_oc(['get', 'pods', '--selector', 'component={}'.format(dc), '--output', 'jsonpath={.items[*].metadata.name}']).split():
debug("deployment/{}: pod/{}: gathering info".format(dc, pod))
lines.extend(renderFlavor(pod, dc))
cmd = ['logs', '--since', '20m', 'pod/{}'.format(pod)]
if dc == 'deck-internal':
cmd += ['--container', 'deck']
if dc == 'boskos':
cmd += ['--container', 'boskos']
debug("deployment/{}: pod/{}: getting logs".format(dc, pod))
try:
for l in run_oc(cmd).splitlines():
if any(word in l for word in BLACKLIST):
continue
if warn in l:
log_lines.append(YELLOW + l + RESET)
elif error in l or fatal in l:
log_lines.append(RED + l + RESET)
except subprocess.CalledProcessError:
debug("deployment/{}: pod/{}: getting logs failed".format(dc, pod))
if not log_lines and not lines:
header = "{} {}{}{}".format(header, GREEN, "OK", RESET)
with open(log, 'w') as f:
f.write('\n'.join([header, *lines, *log_lines[-5:]]))
time.sleep(60)
def renderHeader(dc):
debug("deployment/{}: rendering header".format(dc))
rawdc = json.loads(run_oc(['get', 'deployment/{}'.format(dc), '--output', 'json']))
spec = rawdc.get("spec", {})
status = rawdc.get("status", {})
desired = spec.get("replicas", 0)
current = status.get("replicas", 0)
updated = status.get("updatedReplicas", 0)
available = status.get("availableReplicas", 0)
version = "<unknown-version>"
containers = spec.get("template", {}).get("spec", {}).get("containers", [])
for container in containers:
if dc == "boskos-metrics":
container_name = "metrics"
elif dc == "jenkins-dev-operator":
container_name = "jenkins-operator"
elif dc == "deck-internal":
container_name = "deck"
else:
container_name = dc
if container.get("name") == container_name:
image = container.get("image", "")
version = image.split(":")[-1]
headerColor = ''
if desired != current:
headerColor = RED
message = '{} at {} [{}/{}]'.format(dc, version, current, desired)
if updated != desired:
message += ' ({} stale replicas)'.format(desired - updated)
if available != desired:
message += ' ({} unavailable replicas)'.format(desired - available)
header = '{}{}{}:{}'.format(BOLD, headerColor, message, RESET)
debug("deployment/{}: got header {}".format(dc, header))
return header
def renderFlavor(pod, dc):
debug("deployment/{}: pod/{}: rendering flavor".format(dc, pod))
lines = []
raw = json.loads(run_oc(['get', 'pod/{}'.format(pod), '--output', 'json']))
status = raw.get("status", {})
phase = status.get("phase", "")
if phase != "Running":
reason = status.get("reason", "")
message = status.get("message", "")
color = YELLOW
if phase in ["Failed", "Unknown", "CrashLoopBackOff"]:
color = RED
lines.append(color + "pod {} is {}: {}, {}".format(pod, phase, reason, message))
for container in status.get("containerStatuses", []):
debug("pod/{}: handling status for container {}".format(pod, container.get("name", "")))
if container.get("name") == dc:
state = container.get("state", {})
if "running" not in state:
if "waiting" in state:
reason = state["waiting"].get("reason")
message = state["waiting"].get("message")
lines.append(YELLOW + "pod {} is waiting: {}".format(pod, reason) + RESET)
lines.append(YELLOW + "\t{}".format(message) + RESET)
if "terminated" in state:
reason = state["terminated"].get("reason")
message = state["terminated"].get("message")
lines.append(RED + "pod {} is terminated: {}".format(pod, reason) + RESET)
lines.append(RED + "\t{}".format(message) + RESET)
restartCount = container.get("restartCount", 0)
if restartCount != 0:
lines.append(RED + "pod {} has restarted {} times".format(pod, restartCount) + RESET)
debug("deployment/{}: pod/{}: got flavor {}".format(dc, pod, lines))
return lines
if __name__ == '__main__':
main()
| 39.860825 | 142 | 0.574163 |
9e30b18f1d7616d4eaf9d01afe9afc40e64801f8 | 38,497 | py | Python | environment/lib/python3.8/site-packages/seaborn/regression.py | 123972/PCA-nutricion | aff3c51a71c887c3fa367dbf9d599be5915c80cc | [
"MIT"
] | null | null | null | environment/lib/python3.8/site-packages/seaborn/regression.py | 123972/PCA-nutricion | aff3c51a71c887c3fa367dbf9d599be5915c80cc | [
"MIT"
] | 3 | 2020-07-27T19:51:37.000Z | 2020-11-04T14:25:09.000Z | environment/lib/python3.8/site-packages/seaborn/regression.py | 123972/PCA-nutricion | aff3c51a71c887c3fa367dbf9d599be5915c80cc | [
"MIT"
] | 3 | 2020-07-26T20:09:46.000Z | 2020-11-04T16:17:57.000Z | """Plotting functions for linear models (broadly construed)."""
import copy
from textwrap import dedent
import warnings
import numpy as np
import pandas as pd
from scipy.spatial import distance
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
import statsmodels
assert statsmodels
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from . import utils
from . import algorithms as algo
from .axisgrid import FacetGrid, _facet_docs
__all__ = ["lmplot", "regplot", "residplot"]
class _LinearPlotter(object):
"""Base class for plotting relational data in tidy format.
To get anything useful done you'll have to inherit from this, but setup
code that can be abstracted out should be put here.
"""
def establish_variables(self, data, **kws):
"""Extract variables from data or use directly."""
self.data = data
# Validate the inputs
any_strings = any([isinstance(v, str) for v in kws.values()])
if any_strings and data is None:
raise ValueError("Must pass `data` if using named variables.")
# Set the variables
for var, val in kws.items():
if isinstance(val, str):
vector = data[val]
elif isinstance(val, list):
vector = np.asarray(val)
else:
vector = val
if vector is not None and vector.shape != (1,):
vector = np.squeeze(vector)
if np.ndim(vector) > 1:
err = "regplot inputs must be 1d"
raise ValueError(err)
setattr(self, var, vector)
def dropna(self, *vars):
"""Remove observations with missing data."""
vals = [getattr(self, var) for var in vars]
vals = [v for v in vals if v is not None]
not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)
for var in vars:
val = getattr(self, var)
if val is not None:
setattr(self, var, val[not_na])
def plot(self, ax):
raise NotImplementedError
class _RegressionPlotter(_LinearPlotter):
"""Plotter for numeric independent variables with regression model.
This does the computations and drawing for the `regplot` function, and
is thus also used indirectly by `lmplot`.
"""
def __init__(self, x, y, data=None, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, seed=None, order=1, logistic=False, lowess=False,
robust=False, logx=False, x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
color=None, label=None):
# Set member attributes
self.x_estimator = x_estimator
self.ci = ci
self.x_ci = ci if x_ci == "ci" else x_ci
self.n_boot = n_boot
self.seed = seed
self.scatter = scatter
self.fit_reg = fit_reg
self.order = order
self.logistic = logistic
self.lowess = lowess
self.robust = robust
self.logx = logx
self.truncate = truncate
self.x_jitter = x_jitter
self.y_jitter = y_jitter
self.color = color
self.label = label
# Validate the regression options:
if sum((order > 1, logistic, robust, lowess, logx)) > 1:
raise ValueError("Mutually exclusive regression options.")
# Extract the data vals from the arguments or passed dataframe
self.establish_variables(data, x=x, y=y, units=units,
x_partial=x_partial, y_partial=y_partial)
# Drop null observations
if dropna:
self.dropna("x", "y", "units", "x_partial", "y_partial")
# Regress nuisance variables out of the data
if self.x_partial is not None:
self.x = self.regress_out(self.x, self.x_partial)
if self.y_partial is not None:
self.y = self.regress_out(self.y, self.y_partial)
# Possibly bin the predictor variable, which implies a point estimate
if x_bins is not None:
self.x_estimator = np.mean if x_estimator is None else x_estimator
x_discrete, x_bins = self.bin_predictor(x_bins)
self.x_discrete = x_discrete
else:
self.x_discrete = self.x
# Disable regression in case of singleton inputs
if len(self.x) <= 1:
self.fit_reg = False
# Save the range of the x variable for the grid later
if self.fit_reg:
self.x_range = self.x.min(), self.x.max()
@property
def scatter_data(self):
"""Data where each observation is a point."""
x_j = self.x_jitter
if x_j is None:
x = self.x
else:
x = self.x + np.random.uniform(-x_j, x_j, len(self.x))
y_j = self.y_jitter
if y_j is None:
y = self.y
else:
y = self.y + np.random.uniform(-y_j, y_j, len(self.y))
return x, y
@property
def estimate_data(self):
"""Data with a point estimate and CI for each discrete x value."""
x, y = self.x_discrete, self.y
vals = sorted(np.unique(x))
points, cis = [], []
for val in vals:
# Get the point estimate of the y variable
_y = y[x == val]
est = self.x_estimator(_y)
points.append(est)
# Compute the confidence interval for this estimate
if self.x_ci is None:
cis.append(None)
else:
units = None
if self.x_ci == "sd":
sd = np.std(_y)
_ci = est - sd, est + sd
else:
if self.units is not None:
units = self.units[x == val]
boots = algo.bootstrap(_y,
func=self.x_estimator,
n_boot=self.n_boot,
units=units,
seed=self.seed)
_ci = utils.ci(boots, self.x_ci)
cis.append(_ci)
return vals, points, cis
def fit_regression(self, ax=None, x_range=None, grid=None):
"""Fit the regression model."""
# Create the grid for the regression
if grid is None:
if self.truncate:
x_min, x_max = self.x_range
else:
if ax is None:
x_min, x_max = x_range
else:
x_min, x_max = ax.get_xlim()
grid = np.linspace(x_min, x_max, 100)
ci = self.ci
# Fit the regression
if self.order > 1:
yhat, yhat_boots = self.fit_poly(grid, self.order)
elif self.logistic:
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial
yhat, yhat_boots = self.fit_statsmodels(grid, GLM,
family=Binomial())
elif self.lowess:
ci = None
grid, yhat = self.fit_lowess()
elif self.robust:
from statsmodels.robust.robust_linear_model import RLM
yhat, yhat_boots = self.fit_statsmodels(grid, RLM)
elif self.logx:
yhat, yhat_boots = self.fit_logx(grid)
else:
yhat, yhat_boots = self.fit_fast(grid)
# Compute the confidence interval at each grid point
if ci is None:
err_bands = None
else:
err_bands = utils.ci(yhat_boots, ci, axis=0)
return grid, yhat, err_bands
def fit_fast(self, grid):
"""Low-level regression and prediction using linear algebra."""
def reg_func(_x, _y):
return np.linalg.pinv(_x).dot(_y)
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y,
func=reg_func,
n_boot=self.n_boot,
units=self.units,
seed=self.seed).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def fit_poly(self, grid, order):
"""Regression using numpy polyfit for higher-order trends."""
def reg_func(_x, _y):
return np.polyval(np.polyfit(_x, _y, order), grid)
x, y = self.x, self.y
yhat = reg_func(x, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(x, y,
func=reg_func,
n_boot=self.n_boot,
units=self.units,
seed=self.seed)
return yhat, yhat_boots
def fit_statsmodels(self, grid, model, **kwargs):
"""More general regression function using statsmodels objects."""
import statsmodels.genmod.generalized_linear_model as glm
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
def reg_func(_x, _y):
try:
yhat = model(_y, _x, **kwargs).fit().predict(grid)
except glm.PerfectSeparationError:
yhat = np.empty(len(grid))
yhat.fill(np.nan)
return yhat
yhat = reg_func(X, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(X, y,
func=reg_func,
n_boot=self.n_boot,
units=self.units,
seed=self.seed)
return yhat, yhat_boots
def fit_lowess(self):
"""Fit a locally-weighted regression, which returns its own grid."""
from statsmodels.nonparametric.smoothers_lowess import lowess
grid, yhat = lowess(self.y, self.x).T
return grid, yhat
def fit_logx(self, grid):
"""Fit the model in log-space."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), np.log(grid)]
def reg_func(_x, _y):
_x = np.c_[_x[:, 0], np.log(_x[:, 1])]
return np.linalg.pinv(_x).dot(_y)
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y,
func=reg_func,
n_boot=self.n_boot,
units=self.units,
seed=self.seed).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def bin_predictor(self, bins):
"""Discretize a predictor by assigning value to closest bin."""
x = self.x
if np.isscalar(bins):
percentiles = np.linspace(0, 100, bins + 2)[1:-1]
bins = np.c_[np.percentile(x, percentiles)]
else:
bins = np.c_[np.ravel(bins)]
dist = distance.cdist(np.c_[x], bins)
x_binned = bins[np.argmin(dist, axis=1)].ravel()
return x_binned, bins.ravel()
def regress_out(self, a, b):
"""Regress b from a keeping a's original mean."""
a_mean = a.mean()
a = a - a_mean
b = b - b.mean()
b = np.c_[b]
a_prime = a - b.dot(np.linalg.pinv(b).dot(a))
return np.asarray(a_prime + a_mean).reshape(a.shape)
def plot(self, ax, scatter_kws, line_kws):
"""Draw the full plot."""
# Insert the plot label into the correct set of keyword arguments
if self.scatter:
scatter_kws["label"] = self.label
else:
line_kws["label"] = self.label
# Use the current color cycle state as a default
if self.color is None:
lines, = ax.plot([], [])
color = lines.get_color()
lines.remove()
else:
color = self.color
# Ensure that color is hex to avoid matplotlib weirdness
color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))
# Let color in keyword arguments override overall plot color
scatter_kws.setdefault("color", color)
line_kws.setdefault("color", color)
# Draw the constituent plots
if self.scatter:
self.scatterplot(ax, scatter_kws)
if self.fit_reg:
self.lineplot(ax, line_kws)
# Label the axes
if hasattr(self.x, "name"):
ax.set_xlabel(self.x.name)
if hasattr(self.y, "name"):
ax.set_ylabel(self.y.name)
def scatterplot(self, ax, kws):
"""Draw the data."""
# Treat the line-based markers specially, explicitly setting larger
# linewidth than is provided by the seaborn style defaults.
# This would ideally be handled better in matplotlib (i.e., distinguish
# between edgewidth for solid glyphs and linewidth for line glyphs
# but this should do for now.
line_markers = ["1", "2", "3", "4", "+", "x", "|", "_"]
if self.x_estimator is None:
if "marker" in kws and kws["marker"] in line_markers:
lw = mpl.rcParams["lines.linewidth"]
else:
lw = mpl.rcParams["lines.markeredgewidth"]
kws.setdefault("linewidths", lw)
if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:
kws.setdefault("alpha", .8)
x, y = self.scatter_data
ax.scatter(x, y, **kws)
else:
# TODO abstraction
ci_kws = {"color": kws["color"]}
ci_kws["linewidth"] = mpl.rcParams["lines.linewidth"] * 1.75
kws.setdefault("s", 50)
xs, ys, cis = self.estimate_data
if [ci for ci in cis if ci is not None]:
for x, ci in zip(xs, cis):
ax.plot([x, x], ci, **ci_kws)
ax.scatter(xs, ys, **kws)
def lineplot(self, ax, kws):
"""Draw the model."""
# Fit the regression model
grid, yhat, err_bands = self.fit_regression(ax)
edges = grid[0], grid[-1]
# Get set default aesthetics
fill_color = kws["color"]
lw = kws.pop("lw", mpl.rcParams["lines.linewidth"] * 1.5)
kws.setdefault("linewidth", lw)
# Draw the regression line and confidence interval
line, = ax.plot(grid, yhat, **kws)
line.sticky_edges.x[:] = edges # Prevent mpl from adding margin
if err_bands is not None:
ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)
_regression_docs = dict(
model_api=dedent("""\
There are a number of mutually exclusive options for estimating the
regression model. See the :ref:`tutorial <regression_tutorial>` for more
information.\
"""),
regplot_vs_lmplot=dedent("""\
The :func:`regplot` and :func:`lmplot` functions are closely related, but
the former is an axes-level function while the latter is a figure-level
function that combines :func:`regplot` and :class:`FacetGrid`.\
"""),
x_estimator=dedent("""\
x_estimator : callable that maps vector -> scalar, optional
Apply this function to each unique value of ``x`` and plot the
resulting estimate. This is useful when ``x`` is a discrete variable.
If ``x_ci`` is given, this estimate will be bootstrapped and a
confidence interval will be drawn.\
"""),
x_bins=dedent("""\
x_bins : int or vector, optional
Bin the ``x`` variable into discrete bins and then estimate the central
tendency and a confidence interval. This binning only influences how
the scatterplot is drawn; the regression is still fit to the original
data. This parameter is interpreted either as the number of
evenly-sized (not necessary spaced) bins or the positions of the bin
centers. When this parameter is used, it implies that the default of
``x_estimator`` is ``numpy.mean``.\
"""),
x_ci=dedent("""\
x_ci : "ci", "sd", int in [0, 100] or None, optional
Size of the confidence interval used when plotting a central tendency
for discrete values of ``x``. If ``"ci"``, defer to the value of the
``ci`` parameter. If ``"sd"``, skip bootstrapping and show the
standard deviation of the observations in each bin.\
"""),
scatter=dedent("""\
scatter : bool, optional
If ``True``, draw a scatterplot with the underlying observations (or
the ``x_estimator`` values).\
"""),
fit_reg=dedent("""\
fit_reg : bool, optional
If ``True``, estimate and plot a regression model relating the ``x``
and ``y`` variables.\
"""),
ci=dedent("""\
ci : int in [0, 100] or None, optional
Size of the confidence interval for the regression estimate. This will
be drawn using translucent bands around the regression line. The
confidence interval is estimated using a bootstrap; for large
datasets, it may be advisable to avoid that computation by setting
this parameter to None.\
"""),
n_boot=dedent("""\
n_boot : int, optional
Number of bootstrap resamples used to estimate the ``ci``. The default
value attempts to balance time and stability; you may want to increase
this value for "final" versions of plots.\
"""),
units=dedent("""\
units : variable name in ``data``, optional
If the ``x`` and ``y`` observations are nested within sampling units,
those can be specified here. This will be taken into account when
computing the confidence intervals by performing a multilevel bootstrap
that resamples both units and observations (within unit). This does not
otherwise influence how the regression is estimated or drawn.\
"""),
seed=dedent("""\
seed : int, numpy.random.Generator, or numpy.random.RandomState, optional
Seed or random number generator for reproducible bootstrapping.\
"""),
order=dedent("""\
order : int, optional
If ``order`` is greater than 1, use ``numpy.polyfit`` to estimate a
polynomial regression.\
"""),
logistic=dedent("""\
logistic : bool, optional
If ``True``, assume that ``y`` is a binary variable and use
``statsmodels`` to estimate a logistic regression model. Note that this
is substantially more computationally intensive than linear regression,
so you may wish to decrease the number of bootstrap resamples
(``n_boot``) or set ``ci`` to None.\
"""),
lowess=dedent("""\
lowess : bool, optional
If ``True``, use ``statsmodels`` to estimate a nonparametric lowess
model (locally weighted linear regression). Note that confidence
intervals cannot currently be drawn for this kind of model.\
"""),
robust=dedent("""\
robust : bool, optional
If ``True``, use ``statsmodels`` to estimate a robust regression. This
will de-weight outliers. Note that this is substantially more
computationally intensive than standard linear regression, so you may
wish to decrease the number of bootstrap resamples (``n_boot``) or set
``ci`` to None.\
"""),
logx=dedent("""\
logx : bool, optional
If ``True``, estimate a linear regression of the form y ~ log(x), but
plot the scatterplot and regression model in the input space. Note that
``x`` must be positive for this to work.\
"""),
xy_partial=dedent("""\
{x,y}_partial : strings in ``data`` or matrices
Confounding variables to regress out of the ``x`` or ``y`` variables
before plotting.\
"""),
truncate=dedent("""\
truncate : bool, optional
If ``True``, the regression line is bounded by the data limits. If
``False``, it extends to the ``x`` axis limits.
"""),
xy_jitter=dedent("""\
{x,y}_jitter : floats, optional
Add uniform random noise of this size to either the ``x`` or ``y``
variables. The noise is added to a copy of the data after fitting the
regression, and only influences the look of the scatterplot. This can
be helpful when plotting variables that take discrete values.\
"""),
scatter_line_kws=dedent("""\
{scatter,line}_kws : dictionaries
Additional keyword arguments to pass to ``plt.scatter`` and
``plt.plot``.\
"""),
)
_regression_docs.update(_facet_docs)
def lmplot(x, y, data, hue=None, col=None, row=None, palette=None,
col_wrap=None, height=5, aspect=1, markers="o", sharex=True,
sharey=True, hue_order=None, col_order=None, row_order=None,
legend=True, legend_out=True, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, seed=None, order=1, logistic=False, lowess=False,
robust=False, logx=False, x_partial=None, y_partial=None,
truncate=True, x_jitter=None, y_jitter=None, scatter_kws=None,
line_kws=None, size=None):
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
# Reduce the dataframe to only needed columns
need_cols = [x, y, hue, col, row, units, x_partial, y_partial]
cols = np.unique([a for a in need_cols if a is not None]).tolist()
data = data[cols]
# Initialize the grid
facets = FacetGrid(data, row, col, hue, palette=palette,
row_order=row_order, col_order=col_order,
hue_order=hue_order, height=height, aspect=aspect,
col_wrap=col_wrap, sharex=sharex, sharey=sharey,
legend_out=legend_out)
# Add the markers here as FacetGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if facets.hue_names is None:
n_markers = 1
else:
n_markers = len(facets.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError(("markers must be a singeton or a list of markers "
"for each level of the hue variable"))
facets.hue_kws = {"marker": markers}
# Hack to set the x limits properly, which needs to happen here
# because the extent of the regression estimate is determined
# by the limits of the plot
if sharex:
for ax in facets.axes.flat:
ax.scatter(data[x], np.ones(len(data)) * data[y].mean()).remove()
# Draw the regression plot on each facet
regplot_kws = dict(
x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,
scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,
seed=seed, order=order, logistic=logistic, lowess=lowess,
robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,
truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,
scatter_kws=scatter_kws, line_kws=line_kws,
)
facets.map_dataframe(regplot, x, y, **regplot_kws)
# Add a legend
if legend and (hue is not None) and (hue not in [col, row]):
facets.add_legend()
return facets
lmplot.__doc__ = dedent("""\
Plot data and regression model fits across a FacetGrid.
This function combines :func:`regplot` and :class:`FacetGrid`. It is
intended as a convenient interface to fit regression models across
conditional subsets of a dataset.
When thinking about how to assign variables to different facets, a general
rule is that it makes sense to use ``hue`` for the most important
comparison, followed by ``col`` and ``row``. However, always think about
your particular dataset and the goals of the visualization you are
creating.
{model_api}
The parameters to this function span most of the options in
:class:`FacetGrid`, although there may be occasional cases where you will
want to use that class and :func:`regplot` directly.
Parameters
----------
x, y : strings, optional
Input variables; these should be column names in ``data``.
{data}
hue, col, row : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``*_order`` parameters to control
the order of levels of this variable.
{palette}
{col_wrap}
{height}
{aspect}
markers : matplotlib marker code or list of marker codes, optional
Markers for the scatterplot. If a list, each marker in the list will be
used for each level of the ``hue`` variable.
{share_xy}
{{hue,col,row}}_order : lists, optional
Order for the levels of the faceting variables. By default, this will
be the order that the levels appear in ``data`` or, if the variables
are pandas categoricals, the category order.
legend : bool, optional
If ``True`` and there is a ``hue`` variable, add a legend.
{legend_out}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{seed}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
{scatter_line_kws}
See Also
--------
regplot : Plot data and a conditional model fit.
FacetGrid : Subplot grid for plotting conditional relationships.
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
Notes
-----
{regplot_vs_lmplot}
Examples
--------
These examples focus on basic regression model plots to exhibit the
various faceting options; see the :func:`regplot` docs for demonstrations
of the other options for plotting the data and models. There are also
other examples for how to manipulate plot using the returned object on
the :class:`FacetGrid` docs.
Plot a simple linear relationship between two variables:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> g = sns.lmplot(x="total_bill", y="tip", data=tips)
Condition on a third variable and plot the levels in different colors:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips)
Use different markers as well as colors so the plot will reproduce to
black-and-white more easily:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... markers=["o", "x"])
Use a different color palette:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... palette="Set1")
Map ``hue`` levels to colors with a dictionary:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips,
... palette=dict(Yes="g", No="m"))
Plot the levels of the third variable across different columns:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", col="smoker", data=tips)
Change the height and aspect ratio of the facets:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="size", y="total_bill", hue="day", col="day",
... data=tips, height=6, aspect=.4, x_jitter=.1)
Wrap the levels of the column variable into multiple rows:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", col="day", hue="day",
... data=tips, col_wrap=2, height=3)
Condition on two variables to make a full grid:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", row="sex", col="time",
... data=tips, height=3)
Use methods on the returned :class:`FacetGrid` instance to further tweak
the plot:
.. plot::
:context: close-figs
>>> g = sns.lmplot(x="total_bill", y="tip", row="sex", col="time",
... data=tips, height=3)
>>> g = (g.set_axis_labels("Total bill (US Dollars)", "Tip")
... .set(xlim=(0, 60), ylim=(0, 12),
... xticks=[10, 30, 50], yticks=[2, 6, 10])
... .fig.subplots_adjust(wspace=.02))
""").format(**_regression_docs)
def regplot(x, y, data=None, x_estimator=None, x_bins=None, x_ci="ci",
scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
seed=None, order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None,
truncate=True, dropna=True, x_jitter=None, y_jitter=None,
label=None, color=None, marker="o",
scatter_kws=None, line_kws=None, ax=None):
plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,
scatter, fit_reg, ci, n_boot, units, seed,
order, logistic, lowess, robust, logx,
x_partial, y_partial, truncate, dropna,
x_jitter, y_jitter, color, label)
if ax is None:
ax = plt.gca()
scatter_kws = {} if scatter_kws is None else copy.copy(scatter_kws)
scatter_kws["marker"] = marker
line_kws = {} if line_kws is None else copy.copy(line_kws)
plotter.plot(ax, scatter_kws, line_kws)
return ax
regplot.__doc__ = dedent("""\
Plot data and a linear regression model fit.
{model_api}
Parameters
----------
x, y: string, series, or vector array
Input variables. If strings, these should correspond with column names
in ``data``. When pandas objects are used, axes will be labeled with
the series name.
{data}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{seed}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
label : string
Label to apply to either the scatterplot or regression line (if
``scatter`` is ``False``) for use in a legend.
color : matplotlib color
Color to apply to all plot elements; will be superseded by colors
passed in ``scatter_kws`` or ``line_kws``.
marker : matplotlib marker code
Marker to use for the scatterplot glyphs.
{scatter_line_kws}
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
Returns
-------
ax : matplotlib Axes
The Axes object containing the plot.
See Also
--------
lmplot : Combine :func:`regplot` and :class:`FacetGrid` to plot multiple
linear relationships in a dataset.
jointplot : Combine :func:`regplot` and :class:`JointGrid` (when used with
``kind="reg"``).
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
residplot : Plot the residuals of a linear regression model.
Notes
-----
{regplot_vs_lmplot}
It's also easy to combine combine :func:`regplot` and :class:`JointGrid` or
:class:`PairGrid` through the :func:`jointplot` and :func:`pairplot`
functions, although these do not directly accept all of :func:`regplot`'s
parameters.
Examples
--------
Plot the relationship between two variables in a DataFrame:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set(color_codes=True)
>>> tips = sns.load_dataset("tips")
>>> ax = sns.regplot(x="total_bill", y="tip", data=tips)
Plot with two variables defined as numpy arrays; use a different color:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(8)
>>> mean, cov = [4, 6], [(1.5, .7), (.7, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, 80).T
>>> ax = sns.regplot(x=x, y=y, color="g")
Plot with two variables defined as pandas Series; use a different marker:
.. plot::
:context: close-figs
>>> import pandas as pd
>>> x, y = pd.Series(x, name="x_var"), pd.Series(y, name="y_var")
>>> ax = sns.regplot(x=x, y=y, marker="+")
Use a 68% confidence interval, which corresponds with the standard error
of the estimate, and extend the regression line to the axis limits:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x=x, y=y, ci=68, truncate=False)
Plot with a discrete ``x`` variable and add some jitter:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips, x_jitter=.1)
Plot with a discrete ``x`` variable showing means and confidence intervals
for unique values:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips,
... x_estimator=np.mean)
Plot with a continuous variable divided into discrete bins:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x=x, y=y, x_bins=4)
Fit a higher-order polynomial regression:
.. plot::
:context: close-figs
>>> ans = sns.load_dataset("anscombe")
>>> ax = sns.regplot(x="x", y="y", data=ans.loc[ans.dataset == "II"],
... scatter_kws={{"s": 80}},
... order=2, ci=None)
Fit a robust regression and don't plot a confidence interval:
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="x", y="y", data=ans.loc[ans.dataset == "III"],
... scatter_kws={{"s": 80}},
... robust=True, ci=None)
Fit a logistic regression; jitter the y variable and use fewer bootstrap
iterations:
.. plot::
:context: close-figs
>>> tips["big_tip"] = (tips.tip / tips.total_bill) > .175
>>> ax = sns.regplot(x="total_bill", y="big_tip", data=tips,
... logistic=True, n_boot=500, y_jitter=.03)
Fit the regression model using log(x):
.. plot::
:context: close-figs
>>> ax = sns.regplot(x="size", y="total_bill", data=tips,
... x_estimator=np.mean, logx=True)
""").format(**_regression_docs)
def residplot(x, y, data=None, lowess=False, x_partial=None, y_partial=None,
order=1, robust=False, dropna=True, label=None, color=None,
scatter_kws=None, line_kws=None, ax=None):
"""Plot the residuals of a linear regression.
This function will regress y on x (possibly as a robust or polynomial
regression) and then draw a scatterplot of the residuals. You can
optionally fit a lowess smoother to the residual plot, which can
help in determining if there is structure to the residuals.
Parameters
----------
x : vector or string
Data or column name in `data` for the predictor variable.
y : vector or string
Data or column name in `data` for the response variable.
data : DataFrame, optional
DataFrame to use if `x` and `y` are column names.
lowess : boolean, optional
Fit a lowess smoother to the residual scatterplot.
{x, y}_partial : matrix or string(s) , optional
Matrix with same first dimension as `x`, or column name(s) in `data`.
These variables are treated as confounding and are removed from
the `x` or `y` variables before plotting.
order : int, optional
Order of the polynomial to fit when calculating the residuals.
robust : boolean, optional
Fit a robust linear regression when calculating the residuals.
dropna : boolean, optional
If True, ignore observations with missing data when fitting and
plotting.
label : string, optional
Label that will be used in any plot legends.
color : matplotlib color, optional
Color to use for all elements of the plot.
{scatter, line}_kws : dictionaries, optional
Additional keyword arguments passed to scatter() and plot() for drawing
the components of the plot.
ax : matplotlib axis, optional
Plot into this axis, otherwise grab the current axis or make a new
one if not existing.
Returns
-------
ax: matplotlib axes
Axes with the regression plot.
See Also
--------
regplot : Plot a simple linear regression model.
jointplot : Draw a :func:`residplot` with univariate marginal distributions
(when used with ``kind="resid"``).
"""
plotter = _RegressionPlotter(x, y, data, ci=None,
order=order, robust=robust,
x_partial=x_partial, y_partial=y_partial,
dropna=dropna, color=color, label=label)
if ax is None:
ax = plt.gca()
# Calculate the residual from a linear regression
_, yhat, _ = plotter.fit_regression(grid=plotter.x)
plotter.y = plotter.y - yhat
# Set the regression option on the plotter
if lowess:
plotter.lowess = True
else:
plotter.fit_reg = False
# Plot a horizontal line at 0
ax.axhline(0, ls=":", c=".2")
# Draw the scatterplot
scatter_kws = {} if scatter_kws is None else scatter_kws.copy()
line_kws = {} if line_kws is None else line_kws.copy()
plotter.plot(ax, scatter_kws, line_kws)
return ax
| 35.978505 | 79 | 0.586799 |
66c2d1e004e1bf49569cecc21328f18ab18c931f | 19,020 | py | Python | test/unit/common/middleware/test_ratelimit.py | citrix-openstack-build/swift | 34340ddf49a84f3b3398012c2b60be1215033559 | [
"Apache-2.0"
] | 1 | 2016-03-14T23:38:37.000Z | 2016-03-14T23:38:37.000Z | test/unit/common/middleware/test_ratelimit.py | vimeo/swift | 5eea524d3ea6d29c2b6f34927c0130090e7ed44d | [
"Apache-2.0"
] | null | null | null | test/unit/common/middleware/test_ratelimit.py | vimeo/swift | 5eea524d3ea6d29c2b6f34927c0130090e7ed44d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import time
import eventlet
from contextlib import contextmanager
from threading import Thread
from test.unit import FakeLogger
from swift.common.middleware import ratelimit
from swift.proxy.controllers.base import get_container_memcache_key, \
headers_to_container_info
from swift.common.memcached import MemcacheConnectionError
from swift.common.swob import Request
class FakeMemcache(object):
def __init__(self):
self.store = {}
self.error_on_incr = False
self.init_incr_return_neg = False
def get(self, key):
return self.store.get(key)
def set(self, key, value, serialize=False, time=0):
self.store[key] = value
return True
def incr(self, key, delta=1, time=0):
if self.error_on_incr:
raise MemcacheConnectionError('Memcache restarting')
if self.init_incr_return_neg:
# simulate initial hit, force reset of memcache
self.init_incr_return_neg = False
return -10000000
self.store[key] = int(self.store.setdefault(key, 0)) + int(delta)
if self.store[key] < 0:
self.store[key] = 0
return int(self.store[key])
def decr(self, key, delta=1, time=0):
return self.incr(key, delta=-delta, time=time)
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
def mock_http_connect(response, headers=None, with_exc=False):
class FakeConn(object):
def __init__(self, status, headers, with_exc):
self.status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.with_exc = with_exc
self.headers = headers
if self.headers is None:
self.headers = {}
def getresponse(self):
if self.with_exc:
raise Exception('test')
return self
def getheader(self, header):
return self.headers[header]
def read(self, amt=None):
return ''
def close(self):
return
return lambda *args, **kwargs: FakeConn(response, headers, with_exc)
class FakeApp(object):
def __call__(self, env, start_response):
return ['204 No Content']
def start_response(*args):
pass
def dummy_filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def limit_filter(app):
return ratelimit.RateLimitMiddleware(app, conf, logger=FakeLogger())
return limit_filter
time_ticker = 0
time_override = []
def mock_sleep(x):
global time_ticker
time_ticker += x
def mock_time():
global time_override
global time_ticker
if time_override:
cur_time = time_override.pop(0)
if cur_time is None:
time_override = [None if i is None else i + time_ticker
for i in time_override]
return time_ticker
return cur_time
return time_ticker
class TestRateLimit(unittest.TestCase):
def _reset_time(self):
global time_ticker
time_ticker = 0
def setUp(self):
self.was_sleep = eventlet.sleep
eventlet.sleep = mock_sleep
self.was_time = time.time
time.time = mock_time
self._reset_time()
def tearDown(self):
eventlet.sleep = self.was_sleep
time.time = self.was_time
def _run(self, callable_func, num, rate, check_time=True):
global time_ticker
begin = time.time()
for x in range(0, num):
callable_func()
end = time.time()
total_time = float(num) / rate - 1.0 / rate # 1st request not limited
# Allow for one second of variation in the total time.
time_diff = abs(total_time - (end - begin))
if check_time:
self.assertEquals(round(total_time, 1), round(time_ticker, 1))
return time_diff
def test_get_maxrate(self):
conf_dict = {'container_ratelimit_10': 200,
'container_ratelimit_50': 100,
'container_ratelimit_75': 30}
test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
self.assertEquals(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 0), None)
self.assertEquals(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 5), None)
self.assertEquals(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 10), 200)
self.assertEquals(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 60), 72)
self.assertEquals(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 160), 30)
def test_get_ratelimitable_key_tuples(self):
current_rate = 13
conf_dict = {'account_ratelimit': current_rate,
'container_ratelimit_3': 200}
fake_memcache = FakeMemcache()
fake_memcache.store[get_container_memcache_key('a', 'c')] = \
{'object_count': '5'}
the_app = ratelimit.RateLimitMiddleware(None, conf_dict,
logger=FakeLogger())
the_app.memcache_client = fake_memcache
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
'DELETE', 'a', None, None)), 0)
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
'PUT', 'a', 'c', None)), 1)
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
'DELETE', 'a', 'c', None)), 1)
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
'GET', 'a', 'c', 'o')), 0)
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
'PUT', 'a', 'c', 'o')), 1)
def test_memcached_container_info_dict(self):
mdict = headers_to_container_info({'x-container-object-count': '45'})
self.assertEquals(mdict['object_count'], '45')
def test_ratelimit_old_memcache_format(self):
current_rate = 13
conf_dict = {'account_ratelimit': current_rate,
'container_ratelimit_3': 200}
fake_memcache = FakeMemcache()
fake_memcache.store[get_container_memcache_key('a', 'c')] = \
{'container_size': 5}
the_app = ratelimit.RateLimitMiddleware(None, conf_dict,
logger=FakeLogger())
the_app.memcache_client = fake_memcache
tuples = the_app.get_ratelimitable_key_tuples('PUT', 'a', 'c', 'o')
self.assertEquals(tuples, [('ratelimit/a/c', 200.0)])
def test_account_ratelimit(self):
current_rate = 5
num_calls = 50
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
for meth, exp_time in [
('DELETE', 9.8), ('GET', 0), ('POST', 0), ('PUT', 9.8)]:
req = Request.blank('/v/a%s/c' % meth)
req.method = meth
req.environ['swift.cache'] = FakeMemcache()
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate,
check_time=bool(exp_time))
self.assertEquals(round(time.time() - begin, 1), exp_time)
self._reset_time()
def test_ratelimit_set_incr(self):
current_rate = 5
num_calls = 50
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
req = Request.blank('/v/a/c')
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].init_incr_return_neg = True
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate, check_time=False)
self.assertEquals(round(time.time() - begin, 1), 9.8)
def test_ratelimit_whitelist(self):
global time_ticker
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'max_sleep_time_seconds': 2,
'account_whitelist': 'a',
'account_blacklist': 'b'}
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
req = Request.blank('/v/a/c')
req.environ['swift.cache'] = FakeMemcache()
class rate_caller(Thread):
def __init__(self, parent):
Thread.__init__(self)
self.parent = parent
def run(self):
self.result = self.parent.test_ratelimit(req.environ,
start_response)
nt = 5
threads = []
for i in range(nt):
rc = rate_caller(self)
rc.start()
threads.append(rc)
for thread in threads:
thread.join()
the_498s = [
t for t in threads if ''.join(t.result).startswith('Slow down')]
self.assertEquals(len(the_498s), 0)
self.assertEquals(time_ticker, 0)
def test_ratelimit_blacklist(self):
global time_ticker
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'max_sleep_time_seconds': 2,
'account_whitelist': 'a',
'account_blacklist': 'b'}
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
self.test_ratelimit.BLACK_LIST_SLEEP = 0
ratelimit.http_connect = mock_http_connect(204)
req = Request.blank('/v/b/c')
req.environ['swift.cache'] = FakeMemcache()
class rate_caller(Thread):
def __init__(self, parent):
Thread.__init__(self)
self.parent = parent
def run(self):
self.result = self.parent.test_ratelimit(req.environ,
start_response)
nt = 5
threads = []
for i in range(nt):
rc = rate_caller(self)
rc.start()
threads.append(rc)
for thread in threads:
thread.join()
the_497s = [
t for t in threads if ''.join(t.result).startswith('Your account')]
self.assertEquals(len(the_497s), 5)
self.assertEquals(time_ticker, 0)
def test_ratelimit_max_rate_double(self):
global time_ticker
global time_override
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'clock_accuracy': 100,
'max_sleep_time_seconds': 1}
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
self.test_ratelimit.log_sleep_time_seconds = .00001
req = Request.blank('/v/a/c')
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
time_override = [0, 0, 0, 0, None]
# simulates 4 requests coming in at same time, then sleeping
r = self.test_ratelimit(req.environ, start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], 'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], 'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], '204 No Content')
def test_ratelimit_max_rate_double_container(self):
global time_ticker
global time_override
current_rate = 2
conf_dict = {'container_ratelimit_0': current_rate,
'clock_accuracy': 100,
'max_sleep_time_seconds': 1}
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
self.test_ratelimit.log_sleep_time_seconds = .00001
req = Request.blank('/v/a/c/o')
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].set(
ratelimit.get_container_memcache_key('a', 'c'),
{'container_size': 1})
time_override = [0, 0, 0, 0, None]
# simulates 4 requests coming in at same time, then sleeping
r = self.test_ratelimit(req.environ, start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], 'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], 'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], '204 No Content')
def test_ratelimit_max_rate_double_container_listing(self):
global time_ticker
global time_override
current_rate = 2
conf_dict = {'container_listing_ratelimit_0': current_rate,
'clock_accuracy': 100,
'max_sleep_time_seconds': 1}
self.test_ratelimit = dummy_filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
self.test_ratelimit.log_sleep_time_seconds = .00001
req = Request.blank('/v/a/c')
req.method = 'GET'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].set(
ratelimit.get_container_memcache_key('a', 'c'),
{'container_size': 1})
time_override = [0, 0, 0, 0, None]
# simulates 4 requests coming in at same time, then sleeping
r = self.test_ratelimit(req.environ, start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], 'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], 'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], '204 No Content')
def test_ratelimit_max_rate_multiple_acc(self):
num_calls = 4
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'max_sleep_time_seconds': 2}
fake_memcache = FakeMemcache()
the_app = ratelimit.RateLimitMiddleware(None, conf_dict,
logger=FakeLogger())
the_app.memcache_client = fake_memcache
req = lambda: None
req.method = 'PUT'
class rate_caller(Thread):
def __init__(self, name):
self.myname = name
Thread.__init__(self)
def run(self):
for j in range(num_calls):
self.result = the_app.handle_ratelimit(req, self.myname,
'c', None)
nt = 15
begin = time.time()
threads = []
for i in range(nt):
rc = rate_caller('a%s' % i)
rc.start()
threads.append(rc)
for thread in threads:
thread.join()
time_took = time.time() - begin
self.assertEquals(1.5, round(time_took, 1))
def test_call_invalid_path(self):
env = {'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '//v1/AUTH_1234567890',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '80',
'swift.cache': FakeMemcache(),
'SERVER_PROTOCOL': 'HTTP/1.0'}
app = lambda *args, **kwargs: ['fake_app']
rate_mid = ratelimit.RateLimitMiddleware(app, {},
logger=FakeLogger())
class a_callable(object):
def __call__(self, *args, **kwargs):
pass
resp = rate_mid.__call__(env, a_callable())
self.assert_('fake_app' == resp[0])
def test_no_memcache(self):
current_rate = 13
num_calls = 5
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
req = Request.blank('/v/a')
req.environ['swift.cache'] = None
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate, check_time=False)
time_took = time.time() - begin
self.assertEquals(round(time_took, 1), 0) # no memcache, no limiting
def test_restarting_memcache(self):
current_rate = 2
num_calls = 5
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
req = Request.blank('/v/a/c')
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].error_on_incr = True
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate, check_time=False)
time_took = time.time() - begin
self.assertEquals(round(time_took, 1), 0) # no memcache, no limiting
if __name__ == '__main__':
unittest.main()
| 37.076023 | 79 | 0.597371 |
3678b8353c7cda3455f77902af137aa7bea8f3d1 | 8,365 | py | Python | blink_env/lib/python2.7/site-packages/djutils/management/commands/queue_consumer.py | naamara/blink | 326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a | [
"Unlicense",
"MIT"
] | null | null | null | blink_env/lib/python2.7/site-packages/djutils/management/commands/queue_consumer.py | naamara/blink | 326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a | [
"Unlicense",
"MIT"
] | 10 | 2019-12-26T17:31:31.000Z | 2022-03-21T22:17:33.000Z | blink_env/lib/python2.7/site-packages/djutils/management/commands/queue_consumer.py | naamara/blink | 326c035b2f0ef0feae4cd7aa2d4e73fa4a40171a | [
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging
import os
import Queue
import sys
import time
import threading
from logging.handlers import RotatingFileHandler
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db.models.loading import get_apps
from djutils.queue import autodiscover
from djutils.queue.exceptions import QueueException
from djutils.queue.queue import invoker, queue_name, registry
from djutils.utils.helpers import ObjectDict
class Command(BaseCommand):
"""
Queue consumer. Example usage::
To start the consumer (note you must export the settings module):
django-admin.py queue_consumer
"""
help = "Run the queue consumer"
option_list = BaseCommand.option_list + (
make_option('--delay', '-d',
dest='delay',
default=0.1,
type='float',
help='Default interval between invoking, in seconds'
),
make_option('--backoff', '-b',
dest='backoff',
default=1.15,
type='float',
help='Backoff factor when no message found'
),
make_option('--max', '-m',
dest='max_delay',
default=60,
type='int',
help='Maximum time to wait, in seconds, between polling'
),
make_option('--logfile', '-l',
dest='logfile',
default='',
help='Destination for log file, e.g. /var/log/myapp.log'
),
make_option('--no-periodic', '-n',
dest='no_periodic',
action='store_true',
default=False,
help='Do not enqueue periodic commands'
),
make_option('--threads', '-t',
dest='threads',
default=1,
type='int',
help='Number of worker threads'
),
)
def initialize_options(self, options):
self.queue_name = queue_name
self.logfile = options.logfile or '/var/log/djutils-%s.log' % self.queue_name
self.default_delay = options.delay
self.max_delay = options.max_delay
self.backoff_factor = options.backoff
self.threads = options.threads
self.periodic_commands = not options.no_periodic
if self.backoff_factor < 1.0:
raise CommandError('backoff must be greater than or equal to 1')
if self.threads < 1:
raise CommandError('threads must be at least 1')
# initialize delay
self.delay = self.default_delay
self.logger = self.get_logger()
# queue to track messages to be processed
self._queue = Queue.Queue()
# queue to track ids of threads that errored out
self._errors = Queue.Queue()
# list of worker threads
self._threads = []
def get_logger(self, verbosity=1):
log = logging.getLogger('djutils.queue.logger')
if verbosity == 2:
log.setLevel(logging.DEBUG)
elif verbosity == 1:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.WARNING)
if not log.handlers:
handler = RotatingFileHandler(self.logfile, maxBytes=1024*1024, backupCount=3)
handler.setFormatter(logging.Formatter("%(asctime)s:%(name)s:%(levelname)s:%(message)s"))
log.addHandler(handler)
return log
def start_periodic_command_thread(self):
periodic_command_thread = threading.Thread(
target=self.enqueue_periodic_commands
)
periodic_command_thread.daemon = True
self.logger.info('Starting periodic command execution thread')
periodic_command_thread.start()
return periodic_command_thread
def _queue_worker(self):
"""
A worker thread that will chew on dequeued messages
"""
while 1:
message = self._queue.get()
self._queue.task_done()
try:
command = registry.get_command_for_message(message)
command.execute()
except QueueException:
# log error
self.logger.warn('queue exception raised', exc_info=1)
except:
# put the thread's id into the queue of errors for removal
current = threading.current_thread()
self._errors.put(current.ident)
# log the error and raise, killing the worker thread
self.logger.error('exception encountered, exiting thread %s' % current, exc_info=1)
raise
def create_worker_thread(self):
thread = threading.Thread(target=self._queue_worker)
thread.daemon = True
thread.start()
self.logger.info('created thread "%s"' % (thread.ident))
return thread
def remove_dead_worker(self, ident):
self.logger.info('removing thread "%s"' % (ident))
self._threads = [w for w in self._threads if w.ident != ident]
def check_worker_health(self):
while not self._errors.empty():
error_ident = self._errors.get()
self.remove_dead_worker(error_ident)
self._errors.task_done()
while len(self._threads) < self.threads:
self._threads.append(self.create_worker_thread())
def initialize_threads(self):
self.check_worker_health()
def run_with_periodic_commands(self):
"""
Pull messages from the queue so long as:
- no unhandled exceptions when dequeue-ing and processing messages
- no unhandled exceptions while enqueue-ing periodic commands
"""
while 1:
t = self.start_periodic_command_thread()
while t.is_alive():
self.check_worker_health()
self.process_message()
self.logger.error('Periodic command thread died')
def run_only_queue(self):
"""
Pull messages from the queue until shut down or an unhandled exception
is encountered while dequeue-ing and processing messages
"""
while 1:
self.check_worker_health()
self.process_message()
def process_message(self):
message = invoker.read()
if message:
self.logger.info('Processing: %s' % message)
self.delay = self.default_delay
self._queue.put(message)
self._queue.join()
else:
if self.delay > self.max_delay:
self.delay = self.max_delay
self.logger.debug('No messages, sleeping for: %s' % self.delay)
time.sleep(self.delay)
self.delay *= self.backoff_factor
def enqueue_periodic_commands(self):
while True:
start = time.time()
self.logger.debug('Enqueueing periodic commands')
try:
invoker.enqueue_periodic_commands()
except:
self.logger.error('Error enqueueing periodic commands', exc_info=1)
raise
end = time.time()
time.sleep(60 - (end - start))
def handle(self, *args, **options):
"""
Entry-point of the consumer -- in what might be a premature optimization,
I've chosen to keep the code paths separate depending on whether the
periodic command thread is started.
"""
autodiscover()
self.initialize_options(ObjectDict(options))
self.logger.info('Initializing consumer with options:\nlogfile: %s\ndelay: %s\nbackoff: %s\nthreads: %s' % (
self.logfile, self.delay, self.backoff_factor, self.threads))
self.logger.info('Loaded classes:\n%s' % '\n'.join([
klass for klass in registry._registry
]))
try:
if self.periodic_commands:
self.run_with_periodic_commands()
else:
self.run_only_queue()
except:
self.logger.error('error', exc_info=1)
| 32.933071 | 116 | 0.572385 |
e04bc94d6b47867d00821fedb207cef88ffa3be1 | 3,307 | py | Python | nornir_cli/common_commands/cmd_filter.py | timeforplanb123/nornir_cli | fe42b9ff070d72bc61634f1442f932e34ff058f2 | [
"MIT"
] | 16 | 2021-03-26T12:12:14.000Z | 2021-09-08T15:55:46.000Z | nornir_cli/common_commands/cmd_filter.py | timeforplanb123/nornir_cli | fe42b9ff070d72bc61634f1442f932e34ff058f2 | [
"MIT"
] | null | null | null | nornir_cli/common_commands/cmd_filter.py | timeforplanb123/nornir_cli | fe42b9ff070d72bc61634f1442f932e34ff058f2 | [
"MIT"
] | 3 | 2021-03-29T02:34:09.000Z | 2021-04-02T22:30:08.000Z | from itertools import takewhile, dropwhile
import click
from nornir.core.filter import F
from nornir_cli.common_commands import (
cmd_show_inventory,
common_options,
_pickle_to_hidden_file,
_json_loads,
_get_lists,
SHOW_INVENTORY_OPTIONS,
)
ERROR_MESSAGE = (
"Filter optiions. There should be something like...\n\n"
"Simple filtering:\n"
" nornir_cli nornir-netmiko <init> filter site=cmh role=spine\n\n"
"Simple filtering with json:\n"
" nornir_cli nornir-netmiko <init> filter --hosts "
'\'primary_ip={"address": "10.1.129.71/32", "family": 4, "id": 4482, '
'"url": "http://netbox-domain/api/ipam/ip-addresses/4482/"} name=spine_1\'\n\n'
"Advanced filtering:\n"
" nornir_cli nornir-netmiko <init> filter -a "
"'name__contains=cmh device_role__name__contains=access'\n"
"The same:\n"
" nornir_cli nornir-netmiko <init> filter -a "
"'name__contains=cmh & device_role__name__contains=access'\n"
"Or:\n"
" nornir_cli nornir-netmiko <init> filter -a "
"'name__contains=cmh | name__contains=access'\n\n"
"where <init> is optional command"
)
# add quotes for filter values
def _get_quotes(t):
return ", ".join(["{}='{}'".format(*_) for _ in [__.split("=") for __ in t]])
# get Nornir object after advanced filter
def _get_obj_after_adv_filter(nr, t):
body = ""
t = t.split()
while True:
try:
begin = takewhile(lambda x: len(x) > 2, t)
body += f"F({_get_quotes(begin)}) "
t = dropwhile(lambda x: len(x) > 2, t)
body += f"{next(t)} "
t = tuple(t)
except StopIteration:
break
exec(f"o = nr.filter({body})")
return locals()["o"]
@click.command(
context_settings=dict(
ignore_unknown_options=True,
),
short_help="Do simple or advanced filtering",
)
@click.option(
"-a",
"--advanced_filter",
is_flag=True,
help="Use an advanced filtering (string)",
)
@common_options(SHOW_INVENTORY_OPTIONS)
@click.option(
"-s",
"--save",
is_flag=True,
help="Save filtered Nornir object to pickle file for later use",
)
@click.argument("f", required=False)
@click.pass_context
# Leftover argumetns via ctx.args doesn't work. Oh, really? :'( https://github.com/pallets/click/issues/473
def cli(ctx, advanced_filter, f, save, **kwargs):
"""
Do simple or advanced filtering
that will enable us to operate on groups of hosts
based on their properties.
"""
try:
ctx.obj["nornir"] = _pickle_to_hidden_file("temp.pkl", mode="rb", dump=False)
if advanced_filter:
ctx.obj["nornir"] = _get_obj_after_adv_filter(ctx.obj["nornir"], f)
else:
d = dict(
[_json_loads(i) for i in (value.split("=") for value in _get_lists(f))]
)
ctx.obj["nornir"] = ctx.obj["nornir"].filter(**d)
if save:
_pickle_to_hidden_file("temp.pkl", obj=ctx.obj["nornir"])
# run show_inventory command
if any(kwargs.values()):
ctx.invoke(cmd_show_inventory.cli, **kwargs)
except (ValueError, IndexError):
raise ctx.fail(
click.BadParameter(
f"{ERROR_MESSAGE}",
).format_message(),
)
| 31.198113 | 107 | 0.618083 |
dbd5f2b7f39997f24b9a9f8882ffd5321a4de794 | 2,103 | py | Python | daroca/apps/checkout/migrations/0001_initial.py | Yuri-Lima/DaRocaOfficial | 04347de59ab0fe1ebbac27cc37db67e0ab8faead | [
"MIT"
] | null | null | null | daroca/apps/checkout/migrations/0001_initial.py | Yuri-Lima/DaRocaOfficial | 04347de59ab0fe1ebbac27cc37db67e0ab8faead | [
"MIT"
] | null | null | null | daroca/apps/checkout/migrations/0001_initial.py | Yuri-Lima/DaRocaOfficial | 04347de59ab0fe1ebbac27cc37db67e0ab8faead | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-09-28 11:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DeliveryOptions',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('delivery_name', models.CharField(help_text='Required', max_length=255, verbose_name='delivery_name')),
('delivery_price', models.DecimalField(decimal_places=2, error_messages={'name': {'max_length': 'The price must be between 0 and 999.99.'}}, help_text='Maximum 999.99', max_digits=5, verbose_name='delivery price')),
('delivery_method', models.CharField(choices=[('IS', 'In Store'), ('HD', 'Home Delivery')], help_text='Required', max_length=255, verbose_name='delivery_method')),
('delivery_timeframe', models.CharField(help_text='Required', max_length=255, verbose_name='delivery timeframe')),
('delivery_window', models.CharField(help_text='Required', max_length=255, verbose_name='delivery window')),
('order', models.IntegerField(default=0, help_text='Required', verbose_name='list order')),
('is_active', models.BooleanField(default=True)),
],
options={
'verbose_name': 'Delivery Option',
'verbose_name_plural': 'Delivery Options',
},
),
migrations.CreateModel(
name='PaymentSelections',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Required', max_length=255, verbose_name='name')),
('is_active', models.BooleanField(default=True)),
],
options={
'verbose_name': 'Payment Selection',
'verbose_name_plural': 'Payment Selections',
},
),
]
| 47.795455 | 231 | 0.605801 |
2c953a432671352ce6662ba1deab1221c683e359 | 1,291 | py | Python | garnahata_site/catalog/management/commands/reindex.py | dchaplinsky/garnahata.in.ua | 02cd9772cdc4a99a24525b60ba1040b7343d76f6 | [
"MIT"
] | 9 | 2015-07-04T22:54:02.000Z | 2016-10-27T05:05:03.000Z | garnahata_site/catalog/management/commands/reindex.py | dchaplinsky/garnahata.in.ua | 02cd9772cdc4a99a24525b60ba1040b7343d76f6 | [
"MIT"
] | 12 | 2019-12-04T21:33:53.000Z | 2022-02-10T09:20:07.000Z | garnahata_site/catalog/management/commands/reindex.py | dchaplinsky/garnahata.in.ua | 02cd9772cdc4a99a24525b60ba1040b7343d76f6 | [
"MIT"
] | 4 | 2015-08-04T23:52:46.000Z | 2019-02-07T11:16:16.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from elasticsearch_dsl import Index
from elasticsearch_dsl.connections import connections
from catalog.models import Address, Ownership
from catalog.elastic_models import (
Address as ElasticAddress,
Ownership as ElasticOwnership,
ownership_idx
)
class Command(BaseCommand):
def handle(self, *args, **options):
Index(ElasticAddress._doc_type.index).delete(ignore=404)
ElasticAddress.init()
es = connections.get_connection('default')
es.indices.put_settings(
index=ElasticAddress._doc_type.index,
body={
"number_of_replicas": 0,
'index.max_result_window': 50000
}
)
Address.objects.reindex()
self.stdout.write(
'Loaded {} addresses to persistence storage'.format(
Address.objects.count()))
ownership_idx.delete(ignore=404)
ownership_idx.create()
ElasticOwnership.init()
Ownership.objects.select_related("prop__address").reindex()
self.stdout.write(
'Loaded {} ownerships to persistence storage'.format(
Ownership.objects.count()))
| 29.340909 | 67 | 0.656081 |
14010fc2f89a891004d18ba047f6319b7a6a3c68 | 833 | py | Python | oscar/apps/wishlists/forms.py | Idematica/django-oscar | 242a0654210d63ba75f798788916c8b2f7abb7fb | [
"BSD-3-Clause"
] | 1 | 2015-08-02T05:36:11.000Z | 2015-08-02T05:36:11.000Z | oscar/apps/wishlists/forms.py | elliotthill/django-oscar | 5a71a1f896f2c14f8ed3e68535a36b26118a65c5 | [
"BSD-3-Clause"
] | null | null | null | oscar/apps/wishlists/forms.py | elliotthill/django-oscar | 5a71a1f896f2c14f8ed3e68535a36b26118a65c5 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django import forms
from django.db.models import get_model
from django.forms.models import inlineformset_factory
WishList = get_model('wishlists', 'WishList')
Line = get_model('wishlists', 'Line')
class WishListForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
super(WishListForm, self).__init__(*args, **kwargs)
self.instance.owner = user
class Meta:
model = WishList
fields = ('name', )
class WishListLineForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(WishListLineForm, self).__init__(*args, **kwargs)
self.fields['quantity'].widget.attrs['class'] = 'input-mini'
LineFormset = inlineformset_factory(
WishList, Line, fields=('quantity', ), form=WishListLineForm,
extra=0, can_delete=False)
| 26.870968 | 68 | 0.683073 |
eed51719897eaf231351dce329657063f6e33f8b | 410 | py | Python | chatbot_response.py | 106research/research | beee90909f29343ff88ba2e49779c0fbc394d560 | [
"Apache-2.0"
] | 1 | 2018-11-08T09:47:59.000Z | 2018-11-08T09:47:59.000Z | chatbot_response.py | 106research/research | beee90909f29343ff88ba2e49779c0fbc394d560 | [
"Apache-2.0"
] | null | null | null | chatbot_response.py | 106research/research | beee90909f29343ff88ba2e49779c0fbc394d560 | [
"Apache-2.0"
] | null | null | null | #from chatbot import chatbot
def Chat_with_Bot(sentence_in, project):
# project = chatbot.Chatbot()
# project.main(['--modelTag', '1108', '--test', 'daemon'])
# --modelTag 1108
# --test daemon
answer = project.daemonPredict(sentence=sentence_in)
# project.daemonClose()
del project
return answer
if __name__ == "__main__":
print(Chat_with_Bot()) | 25.625 | 63 | 0.621951 |
178b6ced6ede424167938396f1e886c379692a42 | 1,741 | py | Python | gravatar/templatetags/gravatar_tags.py | pureYun/django-gravatar | 21363be4f5b44b0f5da2e52c8e22934546390573 | [
"BSD-3-Clause"
] | 55 | 2016-04-05T15:42:21.000Z | 2018-07-19T07:13:09.000Z | gravatar/templatetags/gravatar_tags.py | pureYun/django-gravatar | 21363be4f5b44b0f5da2e52c8e22934546390573 | [
"BSD-3-Clause"
] | 10 | 2019-12-26T17:31:31.000Z | 2022-03-21T22:17:33.000Z | gravatar/templatetags/gravatar_tags.py | pureYun/django-gravatar | 21363be4f5b44b0f5da2e52c8e22934546390573 | [
"BSD-3-Clause"
] | 18 | 2016-04-05T15:40:13.000Z | 2018-03-15T23:50:27.000Z | from django import template
from django.utils.html import escape
from django.contrib.auth.models import User
from django.conf import settings
from django.utils.hashcompat import md5_constructor
import urllib
GRAVATAR_URL_PREFIX = getattr(settings, "GRAVATAR_URL_PREFIX", "http://www.gravatar.com/")
GRAVATAR_DEFAULT_IMAGE = getattr(settings, "GRAVATAR_DEFAULT_IMAGE", "")
register = template.Library()
def get_user(user):
if not isinstance(user, User):
try:
user = User.objects.get(username=user)
except User.DoesNotExist:
# TODO: make better? smarter? strong? maybe give it wheaties?
raise Exception, "Bad user for gravatar."
return user
def gravatar_for_email(email, size=80):
url = "%savatar/%s/?" % (GRAVATAR_URL_PREFIX, md5_constructor(email).hexdigest())
url += urllib.urlencode({"s": str(size), "default": GRAVATAR_DEFAULT_IMAGE})
return escape(url)
def gravatar_for_user(user, size=80):
user = get_user(user)
return gravatar_for_email(user.email, size)
def gravatar_img_for_email(email, size=80):
url = gravatar_for_email(email, size)
return """<img src="%s" height="%s" width="%s"/>""" % (escape(url), size, size)
def gravatar_img_for_user(user, size=80):
user = get_user(user)
url = gravatar_for_user(user)
return """<img src="%s" alt="Avatar for %s" height="%s" width="%s"/>""" % (escape(url), user.username, size, size)
def gravatar(user, size=80):
# backward compatibility
return gravatar_img_for_user(user, size)
register.simple_tag(gravatar)
register.simple_tag(gravatar_for_user)
register.simple_tag(gravatar_for_email)
register.simple_tag(gravatar_img_for_user)
register.simple_tag(gravatar_img_for_email)
| 34.82 | 118 | 0.723722 |
a2f7439129b63abeda8d4f1a9540a1a39eb774b3 | 16,390 | py | Python | backend/database/wrapper/field_wrapper.py | binury/DistributedReplays | c09d7e41b9c9e57586beb03f26fa4eee59aecf15 | [
"Apache-2.0"
] | null | null | null | backend/database/wrapper/field_wrapper.py | binury/DistributedReplays | c09d7e41b9c9e57586beb03f26fa4eee59aecf15 | [
"Apache-2.0"
] | null | null | null | backend/database/wrapper/field_wrapper.py | binury/DistributedReplays | c09d7e41b9c9e57586beb03f26fa4eee59aecf15 | [
"Apache-2.0"
] | null | null | null | from typing import Dict, List
from backend.database.utils.dynamic_field_manager import DynamicFieldResult
class FieldExplanation:
def __init__(self, field_name: str, simple_explanation: str,
field_rename: str = None, math_explanation: str = None, file_creation: str = None,
short_name: str = None):
self.field_rename = field_rename
self.field_name = field_name
self.simple_explanation = simple_explanation
self.math_explanation = math_explanation
self.file_creation = file_creation
self.short_name = short_name
class QueryFieldWrapper:
def __init__(self, query: any, dynamic_field: DynamicFieldResult,
explanation: FieldExplanation = None, is_percent=False, is_boolean=False, is_cumulative=False):
self.is_cumulative = is_cumulative
self.is_boolean = is_boolean
self.is_percent = is_percent
self.query = query
self.explanation = explanation
self.is_averaged = None
self.dynamic_field = dynamic_field
def get_query_key(self) -> str:
return self.dynamic_field.field_name
def get_field_name(self) -> str:
"""
:return: The field name or the rename
"""
if self.explanation is not None and self.explanation.field_rename is not None:
return self.explanation.field_rename
else:
return self.dynamic_field.field_name.replace('_', ' ')
def get_explanations(dynamic_field_list) -> (Dict[str, FieldExplanation], List[FieldExplanation]):
field_list = [
FieldExplanation('goals',
'Total number of goals scored by the player obtained from the match data.'),
FieldExplanation('saves',
'Total number of saves by the player obtained from the match data.'),
FieldExplanation('score',
'In-game player score for the match obtained from the match data.'),
FieldExplanation('assists',
'Total number of passes that led to goals obtained from the match data.'),
FieldExplanation('shots',
'Total number of hits towards the enemy goal obtained from the match data.'),
FieldExplanation('total_passes',
'Total hits followed by a teammate hit.', field_rename='passes'),
FieldExplanation('total_hits',
'Total number of hits (using hit detection).',
field_rename='hits'),
FieldExplanation('total_goals',
'Total number of goals used for hit analysis.'),
FieldExplanation('total_saves',
'Total number of saves used for hit analysis.'),
FieldExplanation('total_shots',
'Total number of shots used for hit analysis. Shots are defined as any hit '
'where the ball will be inside the goal within the next 5 seconds using ball '
'prediction under the assumption that no other player will touch it.'),
FieldExplanation('total_dribble_conts',
'Total amount of dribble continuations or consecutive touches with the ball.'),
FieldExplanation('total_aerials',
'Number of hits > than the height of the goal.',
field_rename='aerials'),
FieldExplanation('total_dribbles',
'Number of dribbles.',
field_rename='dribbles'),
FieldExplanation('useful/hits',
'Number of shots/passes/saves out of total hits.',
math_explanation='\\frac{100 \\times (\\textrm{shots} + \\textrm{passes} + \\textrm{saves} + \\textrm{goals})}{\\textrm{total hits} - \\textrm{total dribble hits}}'),
FieldExplanation('shots/hit',
'Number of shots per hit.',
math_explanation='\\frac{100 \\times \\textrm{shots}}{\\textrm{total hits} - \\textrm{total dribble hits}}'),
FieldExplanation('assists/hit',
'Number of assists per hit.',
math_explanation='\\frac{100 \\times \\textrm{assists}}{\\textrm{total hits} - \\textrm{total dribble hits}}'),
FieldExplanation('passes/hit',
'Number of passes per hit.',
math_explanation='\\frac{100 \\times \\textrm{passes}}{\\textrm{total hits} - \\textrm{total dribble hits}}'),
# turnovers
FieldExplanation('turnovers',
'Number of lost possessions to the other team. Defined as the other team hitting the ball '
'twice in a row after the player\'s hit.'),
FieldExplanation('turnovers_on_my_half',
'Turnovers that occur on the defending half.'),
FieldExplanation('turnovers_on_their_half',
'Turnovers that occur on the offensive half.'),
FieldExplanation('won_turnovers',
'Number of first hits that gained possession back from the other team.',
field_rename='takeaways'),
FieldExplanation('possession_time',
'Total time this player had the ball. '
'Possession starts after the ball is hit twice. '
'This continues until another hits the ball or play is stopped.',
field_rename='possession time',
short_name='possession'),
FieldExplanation('turnover_efficiency',
'Percentage of hits that were not turnovers.',
short_name='turnover eff'),
# averages
FieldExplanation('average_speed',
'The average speed of your car during the entire game.', field_rename='speed'),
FieldExplanation('average_hit_distance',
'Average distance the ball went after being hit, before being touched by another player.',
math_explanation='\\frac{\\textrm{total hit distance}}{\\textrm{total hits}}',
short_name='avg hit dist'),
FieldExplanation('average_distance_from_center',
'Average distance from the team\'s positional center.'),
# boost
FieldExplanation('boost_usage',
'Total boost used during the game. Accurate within 3% in the worst case.'),
FieldExplanation('num_small_boosts',
'The number of small boost pads collected.'),
FieldExplanation('num_large_boosts',
'The number of large boost pads collected.'),
FieldExplanation('num_stolen_boosts',
'The number of large pads collected on the enemy\'s half.'),
FieldExplanation('wasted_collection',
'The amount of boost collected that goes beyond 100%. '
'Ex: if you were at 95% and collect a full 100 boost pad you just wasted 95 collected boost.'),
FieldExplanation('wasted_usage',
'The amount of boost used when supersonic.'),
FieldExplanation('time_full_boost',
'Total time in the game with 100 boost.'),
FieldExplanation('time_low_boost',
'Total time in the game with <25 boost.'),
FieldExplanation('time_no_boost',
'Total time in the game with 0 boost.'),
# tendencies
FieldExplanation('time_on_ground',
'Total time spent on the ground.'),
FieldExplanation('time_low_in_air',
'Total time spent above ground but below the max height of '
'double jumping (roughly goal height).'),
FieldExplanation('time_high_in_air',
'Total time spent above the max height of '
'double jumping (roughly goal height).'),
FieldExplanation('time_in_defending_half',
'Total time the player is in the defending half.',
short_name='def 1/2'),
FieldExplanation('time_in_attacking_half',
'Total time the player is in the offensive half.',
short_name='att 1/2'),
FieldExplanation('time_in_defending_third',
'Total time the player is in the defending third of the field.',
short_name='def 1/3'),
FieldExplanation('time_in_neutral_third',
'Total time the player is in the midfield.',
short_name='mid 1/3'),
FieldExplanation('time_in_attacking_third',
'Total time the player is in the offensive third of the field.',
short_name='att 1/3'),
FieldExplanation('time_behind_ball',
'(< ball) Time the player is between the ball and their own goal.',
short_name='< ball'),
FieldExplanation('time_in_front_ball',
'(> ball) Time the player is between the ball and the opponents\' goal.',
short_name='> ball'),
FieldExplanation('time_closest_to_ball',
'Total time being the closest player to the ball.'),
FieldExplanation('time_furthest_from_ball',
'Total time being the further player from the ball.'),
FieldExplanation('time_close_to_ball',
'Total time being near the ball (equivalent to half the length of a goal).'),
FieldExplanation('time_closest_to_team_center',
'Total time being the player closest to the positional center of their team.'),
FieldExplanation('time_furthest_from_team_center',
'Total time being the player furthest from the positional center of their team.'),
FieldExplanation('time_in_corner',
'Total time spent in the corners of the field.'),
FieldExplanation('time_near_wall',
'Total time spent near the walls of the field.'),
# distance
FieldExplanation('ball_hit_forward',
'Summed distance of hits towards opponent goal.'),
FieldExplanation('ball_hit_backward',
'Summed distance of hits towards own goal.'),
# team positioning
FieldExplanation('time_in_front_of_center_of_mass',
'Total time the player is in front of the relative center of the team\'s positioning.'),
FieldExplanation('time_behind_center_of_mass',
'Total time the player is behind the relative center of the team\'s positioning.'),
FieldExplanation('time_between_players',
'Total time the player positioned between teammates (when the player is not the most forward or most backward player).',
math_explanation='\\textrm{time in game}-(\\textrm{time most back player}+\\textrm{time most forward player})'),
FieldExplanation('time_most_back_player',
'Total time the player is positioned as the most back player of the team.'),
FieldExplanation('time_most_forward_player',
'Total time the player is positioned as the most forward player of the team.'),
# speed
FieldExplanation('time_at_boost_speed',
'Total time driving at any speed higher than maximum speed obtained by just pure throttle. '
'Achieved via dodges or boosting.'),
FieldExplanation('time_at_slow_speed',
'Total time at half the maximum car speed obtained by just pure throttle.'),
FieldExplanation('time_at_super_sonic',
'Total time at true max car speed of 2300 uu/s (83 kph or 51 mph). '
'Achieved by tapping boost just once after the supersonic trail is showing.'),
FieldExplanation('boost_ratio',
'Ratio of small boost pad pickups to large pickups.',
math_explanation='\\frac{\\textrm{num small boosts}}{\\textrm{num large boosts}}'),
FieldExplanation('collection_boost_efficiency',
'How efficient the player is at collecting boost.',
math_explanation='1 - '
'\\frac{\\textrm{wasted collected boost}}{\\textrm{total boost collected}}',
short_name='bst clct eff'),
FieldExplanation('used_boost_efficiency',
'How efficient the player is at using boost.',
math_explanation='1 - \\frac{\\textrm{wasted used boost}}{\\textrm{total boost usage}}',
short_name='bst use eff'),
FieldExplanation('total_boost_efficiency',
'How efficient the player is at using and collecting boost.',
math_explanation='1 - \\frac{\\textrm{wasted used boost} + '
'\\textrm{wasted collected boost}}{100 \\times \\textrm{num large boosts}'
' + 12 \\times \\textrm{num small boosts}}',
short_name='boost efficiency'),
FieldExplanation('average_boost_level',
'Average amount of boost this player possessed over the entire game.'),
FieldExplanation('wasted_big',
'Amount of wasted boost from big boosts.',
math_explanation='100 - \\textrm{amount of boost in tank}'),
FieldExplanation('wasted_small',
'Amount of wasted boost from small boosts.'),
FieldExplanation('aerial_efficiency',
'Ratio of aerials to time in the air.',
math_explanation='\\frac{\\textrm{total aerials}}'
'{\\textrm{time high in air + time low in air}}',
short_name='aerial eff'),
FieldExplanation('shot_%',
'Ratio of goals to shots.',
math_explanation='\\frac{\\textrm{total goals}}{\\textrm{total shots}}'),
FieldExplanation('rank',
'Average rank of all games in all playlists.'),
FieldExplanation('mmr',
'MMR of the player for the given playlist.'),
#misc stats
FieldExplanation('first_frame_in_game',
'First frame where the player is completely loaded in the game and is able to interact with physics.'),
FieldExplanation('is_keyboard',
'How likely the player is using a keyboard.'),
FieldExplanation('time_in_game',
'Total amount of time spent in the match in seconds. Typically, 300 seconds but can differ if the match went '
'into overtime, or the player joined late/left early in unranked matches.'),
FieldExplanation('num_demos_inflicted',
'Total number of demos inflicted on other players.'),
FieldExplanation('num_demos_taken',
'Total number of times the player was demoed.'),
]
explanation_map = dict()
for field in field_list:
if field.field_rename is None:
field.field_rename = field.field_name.replace('_', ' ')
if field.short_name is None:
field.short_name = field.field_rename
explanation_map[field.field_name] = field
for field in dynamic_field_list:
if field.field_name not in explanation_map:
rename = field.field_name.replace('_', ' ')
explanation = FieldExplanation(field.field_name, field.field_name, field_rename=rename)
field_list.append(explanation)
explanation_map[explanation.field_name] = explanation
return field_list, explanation_map
| 57.915194 | 191 | 0.575595 |
159678e834975ecb527eb23ed47ed9fc3c1df572 | 25 | py | Python | Game/Pieces/Major/__init__.py | ritwikd/interom | 0b626351fd742f2a99d0a6d11ba8c1a214aab576 | [
"MIT"
] | null | null | null | Game/Pieces/Major/__init__.py | ritwikd/interom | 0b626351fd742f2a99d0a6d11ba8c1a214aab576 | [
"MIT"
] | 1 | 2021-03-06T22:08:32.000Z | 2021-03-06T22:09:07.000Z | Game/Pieces/Major/__init__.py | ritwikd/interom | 0b626351fd742f2a99d0a6d11ba8c1a214aab576 | [
"MIT"
] | 1 | 2021-03-03T22:48:07.000Z | 2021-03-03T22:48:07.000Z | from . import Queen, Rook | 25 | 25 | 0.76 |
5e9581bf68f3fd96ca45d5c9c23b4823ab8619dc | 2,969 | py | Python | xirl/xirl/evaluators/reward_visualizer.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-13T21:48:52.000Z | 2022-03-13T21:48:52.000Z | xirl/xirl/evaluators/reward_visualizer.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | xirl/xirl/evaluators/reward_visualizer.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reward visualizer."""
from .base import Evaluator
from .base import EvaluatorOutput
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial.distance import cdist
class RewardVisualizer(Evaluator):
"""Distance to goal state visualizer."""
def __init__(self, distance, num_plots):
"""Constructor.
Args:
distance: The distance metric to use when calculating nearest-neighbours.
num_plots: The number of reward plots to display.
Raises:
ValueError: If the distance metric is invalid.
"""
super().__init__(inter_class=False)
if distance not in ["sqeuclidean", "cosine"]:
raise ValueError(
"{} is not a supported distance metric.".format(distance))
# For plotting, we don't want to display squared euclidean distances so we
# will override to `euclidean` if it was selected.
if distance == "sqeuclidean":
distance = "euclidean"
self.distance = distance
self.num_plots = num_plots
def _gen_reward_plot(self, rewards):
"""Create a pyplot plot and save to buffer."""
fig, axes = plt.subplots(1, len(rewards), figsize=(6.4 * len(rewards), 4.8))
if len(rewards) == 1:
axes = [axes]
for i, rew in enumerate(rewards):
axes[i].plot(rew)
fig.text(0.5, 0.04, "Timestep", ha="center")
fig.text(0.04, 0.5, "Reward", va="center", rotation="vertical")
fig.canvas.draw()
img_arr = np.array(fig.canvas.renderer.buffer_rgba())[:, :, :3]
plt.close()
return img_arr
def _compute_goal_emb(self, embs):
"""Compute the mean of all last frame embeddings."""
goal_emb = [emb[-1, :] for emb in embs]
goal_emb = np.stack(goal_emb, axis=0)
goal_emb = np.mean(goal_emb, axis=0, keepdims=True)
return goal_emb
def evaluate(self, outs):
embs = [o.embs for o in outs]
goal_emb = self._compute_goal_emb(embs)
# Make sure we sample only as many as are available.
num_plots = min(len(embs), self.num_plots)
rand_idxs = np.random.choice(
np.arange(len(embs)), size=num_plots, replace=False)
# Compute rewards as distances to the goal embedding.
rewards = []
for idx in rand_idxs:
emb = embs[idx]
dists = cdist(emb, goal_emb, self.distance)
rewards.append(-dists)
image = self._gen_reward_plot(rewards)
return EvaluatorOutput(image=image)
| 32.626374 | 80 | 0.687437 |
421f4cd5a3ce6d269c4fe1c3d9e091319f4e1728 | 1,957 | py | Python | lisa/tools/stat.py | anirudhrb/lisa | fe009802577c81e45ca2ff5a34d353878caa725d | [
"MIT"
] | 48 | 2018-05-19T17:46:34.000Z | 2020-09-28T21:09:06.000Z | lisa/tools/stat.py | anirudhrb/lisa | fe009802577c81e45ca2ff5a34d353878caa725d | [
"MIT"
] | 1,261 | 2018-05-17T04:32:22.000Z | 2020-11-23T17:29:13.000Z | lisa/tools/stat.py | anirudhrb/lisa | fe009802577c81e45ca2ff5a34d353878caa725d | [
"MIT"
] | 133 | 2018-05-15T23:12:14.000Z | 2020-11-13T10:37:49.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import cast
from lisa.executable import Tool
from lisa.operating_system import Posix
class Stat(Tool):
@property
def command(self) -> str:
return "stat"
@property
def can_install(self) -> bool:
return True
def install(self) -> bool:
posix_os: Posix = cast(Posix, self.node.os)
package_name = "coreutils"
posix_os.install_packages(package_name)
return self._check_exists()
def get_fs_block_size(self, file: str) -> int:
cmd_result = self.run(
"-f --format='%S' " f"{file}",
force_run=True,
expected_exit_code=0,
expected_exit_code_failure_message=(
f"fail to get block size of {file} in file system"
),
)
return int(cmd_result.stdout)
def get_fs_available_size(self, file: str) -> int:
cmd_result = self.run(
"-f --format='%a' " f"{file}",
force_run=True,
expected_exit_code=0,
expected_exit_code_failure_message=(
f"fail to get available size of {file} in filesystem"
),
)
return int(cmd_result.stdout)
def get_total_size(self, file: str) -> int:
cmd_result = self.run(
f"{file}" " --format='%s'",
force_run=True,
expected_exit_code=0,
expected_exit_code_failure_message=(f"fail to get total size of {file}"),
)
return int(cmd_result.stdout)
def get_fs_free_blocks(self, file: str) -> int:
cmd_result = self.run(
"-f --format='%f'" f" {file}",
force_run=True,
expected_exit_code=0,
expected_exit_code_failure_message=(
f"fail to get free blocks of {file} in file system"
),
)
return int(cmd_result.stdout)
| 29.651515 | 85 | 0.571794 |
f27090412699d265e919f8d9a96fdb43b6c382de | 2,594 | py | Python | ml/machine_learning.py | ShiNik/web_ml | d5586f09f952be455316a6f8ff464e4b67b8fcc1 | [
"MIT"
] | null | null | null | ml/machine_learning.py | ShiNik/web_ml | d5586f09f952be455316a6f8ff464e4b67b8fcc1 | [
"MIT"
] | null | null | null | ml/machine_learning.py | ShiNik/web_ml | d5586f09f952be455316a6f8ff464e4b67b8fcc1 | [
"MIT"
] | null | null | null | # ML Packages
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import pandas as pd
import numpy as np
# Function to make predictions
def prediction(X_test, model):
# Predicton on test with giniIndex
y_pred = model.predict(X_test)
return y_pred
# Function to calculate accuracy
def cal_accuracy(y_test, y_pred):
# confusion_matrix_report = np.array2string(confusion_matrix(y_test, y_pred))
accuracy = accuracy_score(y_test, y_pred) * 100
accuracy = str(round(accuracy, 4))
report = classification_report(y_test, y_pred)
return accuracy, report
# Function to split the dataset
def splitdataset(data):
# # Separating the target variable
X = data.iloc[:, 0:-1]
Y = data[data.columns[-1]] # Select the last column as target
# Splitting the dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.3, random_state=100)
return X, Y, X_train, X_test, y_train, y_test
def perform_analysis(df):
df_Xfeatures = df.iloc[:, 0:-1]
df_Ylabels = df[df.columns[-1]] # Select the last column as target
# Model Building
X = df_Xfeatures
Y = df_Ylabels
# prepare models
models = []
models.append(('Logistic Regression', LogisticRegression()))
models.append(('Linear Discriminant Analysis', LinearDiscriminantAnalysis()))
models.append(('K Neighbors Classifier', KNeighborsClassifier()))
models.append(('Decision Tree Classifier', DecisionTreeClassifier()))
models.append(('Gaussian NB', GaussianNB()))
models.append(('SVM', SVC()))
all_models_results = {}
scoring = 'accuracy'
for name, model in models:
X, Y, X_train, X_test, y_train, y_test = splitdataset(df)
result = model.fit( X_train, y_train)
# Prediction using gini
y_pred = prediction(X_test, model)
accuracy, report = cal_accuracy(y_test, y_pred)
msg = "ML Algorithm: %s | Accuracy: %s" % (name, accuracy)
result = { "name": name, "results": msg, "report":report}
all_models_results[name] = result
return all_models_results
| 31.253012 | 81 | 0.720894 |
a6aed5c4b3c48c2b7c21dfade3ae56408a71cd55 | 925 | py | Python | src/emcee/moves/stretch.py | hamogu/emcee | 1d19dd94ca11b298b319d470b4d818b61083e184 | [
"MIT"
] | null | null | null | src/emcee/moves/stretch.py | hamogu/emcee | 1d19dd94ca11b298b319d470b4d818b61083e184 | [
"MIT"
] | null | null | null | src/emcee/moves/stretch.py | hamogu/emcee | 1d19dd94ca11b298b319d470b4d818b61083e184 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
from .red_blue import RedBlueMove
__all__ = ["StretchMove"]
class StretchMove(RedBlueMove):
"""
A `Goodman & Weare (2010)
<http://msp.berkeley.edu/camcos/2010/5-1/p04.xhtml>`_ "stretch move" with
parallelization as described in `Foreman-Mackey et al. (2013)
<http://arxiv.org/abs/1202.3665>`_.
:param a: (optional)
The stretch scale parameter. (default: ``2.0``)
"""
def __init__(self, a=2.0, **kwargs):
self.a = a
super(StretchMove, self).__init__(**kwargs)
def get_proposal(self, s, c, random):
c = np.concatenate(c, axis=0)
Ns, Nc = len(s), len(c)
ndim = s.shape[1]
zz = ((self.a - 1.0) * random.rand(Ns) + 1) ** 2.0 / self.a
factors = (ndim - 1.0) * np.log(zz)
rint = random.randint(Nc, size=(Ns,))
return c[rint] - (c[rint] - s) * zz[:, None], factors
| 27.205882 | 77 | 0.570811 |
a1e86d1af18e8b26fa46628fb911eee17c492738 | 298 | py | Python | src/user/serializers.py | shunsukeaihara/my-django-scaffold | d3b6866c3fb89f9e6af224b657c31ad149ea9aab | [
"MIT"
] | 1 | 2019-01-29T08:15:31.000Z | 2019-01-29T08:15:31.000Z | src/user/serializers.py | shunsukeaihara/my-django-scaffold | d3b6866c3fb89f9e6af224b657c31ad149ea9aab | [
"MIT"
] | null | null | null | src/user/serializers.py | shunsukeaihara/my-django-scaffold | d3b6866c3fb89f9e6af224b657c31ad149ea9aab | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from rest_framework import serializers
from .models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
exclude = ["password", "is_active", "is_staff", "is_superuser", "groups"]
read_only_fields = ["email"]
| 24.833333 | 81 | 0.667785 |
7eed930ef9fb2aa09ea837c9e273e1adcf5b23b7 | 244 | py | Python | docs/conf.py | gcmt/i3pie | a6df121dc0f101202ff0071fd30b6e770f150731 | [
"MIT"
] | null | null | null | docs/conf.py | gcmt/i3pie | a6df121dc0f101202ff0071fd30b6e770f150731 | [
"MIT"
] | null | null | null | docs/conf.py | gcmt/i3pie | a6df121dc0f101202ff0071fd30b6e770f150731 | [
"MIT"
] | null | null | null | import os
import sys
sys.path.insert(0, os.path.abspath('..'))
project = 'i3pie'
copyright = '2019, Giacomo Comitti'
author = 'Giacomo Comitti'
release = '0.1'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
html_theme = 'classic'
| 20.333333 | 58 | 0.704918 |
bbda947710df633ed20c7624b53758a90879b034 | 1,527 | py | Python | tests/unit/modules/test_pecl.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 19 | 2016-01-29T14:37:52.000Z | 2022-03-30T18:08:01.000Z | tests/unit/modules/test_pecl.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 223 | 2016-03-02T16:39:41.000Z | 2022-03-03T12:26:35.000Z | tests/unit/modules/test_pecl.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 64 | 2016-02-04T19:45:26.000Z | 2021-12-15T02:02:31.000Z | # -*- coding: utf-8 -*-
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.pecl as pecl
from tests.support.mock import patch
# Import Salt Testing Libs
from tests.support.unit import TestCase
class PeclTestCase(TestCase):
"""
Test cases for salt.modules.pecl
"""
def test_install(self):
"""
Test to installs one or several pecl extensions.
"""
with patch.object(pecl, "_pecl", return_value="A"):
self.assertEqual(pecl.install("fuse", force=True), "A")
self.assertFalse(pecl.install("fuse"))
with patch.object(pecl, "list_", return_value={"A": ["A", "B"]}):
self.assertTrue(pecl.install(["A", "B"]))
def test_uninstall(self):
"""
Test to uninstall one or several pecl extensions.
"""
with patch.object(pecl, "_pecl", return_value="A"):
self.assertEqual(pecl.uninstall("fuse"), "A")
def test_update(self):
"""
Test to update one or several pecl extensions.
"""
with patch.object(pecl, "_pecl", return_value="A"):
self.assertEqual(pecl.update("fuse"), "A")
def test_list_(self):
"""
Test to list installed pecl extensions.
"""
with patch.object(pecl, "_pecl", return_value="A\nB"):
self.assertDictEqual(pecl.list_("channel"), {})
| 28.277778 | 77 | 0.603798 |
5bd70a032e4424bf007ac4030ab865e18fa9e892 | 982 | py | Python | clients/keto/python/test/test_health_status.py | simoneromano96/sdk | a6113d0daefbbb803790297e4b242d4c7cbbcb22 | [
"Apache-2.0"
] | null | null | null | clients/keto/python/test/test_health_status.py | simoneromano96/sdk | a6113d0daefbbb803790297e4b242d4c7cbbcb22 | [
"Apache-2.0"
] | null | null | null | clients/keto/python/test/test_health_status.py | simoneromano96/sdk | a6113d0daefbbb803790297e4b242d4c7cbbcb22 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
ORY Keto
A cloud native access control server providing best-practice patterns (RBAC, ABAC, ACL, AWS IAM Policies, Kubernetes Roles, ...) via REST APIs. # noqa: E501
The version of the OpenAPI document: v0.0.0-alpha.1
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import ory_keto_client
from ory_keto_client.models.health_status import HealthStatus # noqa: E501
from ory_keto_client.rest import ApiException
class TestHealthStatus(unittest.TestCase):
"""HealthStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHealthStatus(self):
"""Test HealthStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = ory_keto_client.models.health_status.HealthStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.95122 | 161 | 0.702648 |
53e6d1aa6441b6063b3393dff1aa140f3a219e91 | 4,038 | py | Python | pyro/distributions/__init__.py | stjordanis/pyro | a2863febce935021dc5d5903bd89ec2f805294c7 | [
"Apache-2.0"
] | null | null | null | pyro/distributions/__init__.py | stjordanis/pyro | a2863febce935021dc5d5903bd89ec2f805294c7 | [
"Apache-2.0"
] | null | null | null | pyro/distributions/__init__.py | stjordanis/pyro | a2863febce935021dc5d5903bd89ec2f805294c7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import pyro.distributions.torch_patch # noqa F403
from pyro.distributions.avf_mvn import AVFMultivariateNormal
from pyro.distributions.coalescent import CoalescentRateLikelihood, CoalescentTimes, CoalescentTimesWithRate
from pyro.distributions.conditional import (ConditionalDistribution, ConditionalTransform,
ConditionalTransformedDistribution, ConditionalTransformModule)
from pyro.distributions.conjugate import BetaBinomial, DirichletMultinomial, GammaPoisson
from pyro.distributions.delta import Delta
from pyro.distributions.diag_normal_mixture import MixtureOfDiagNormals
from pyro.distributions.diag_normal_mixture_shared_cov import MixtureOfDiagNormalsSharedCovariance
from pyro.distributions.distribution import Distribution
from pyro.distributions.empirical import Empirical
from pyro.distributions.extended import ExtendedBetaBinomial, ExtendedBinomial
from pyro.distributions.folded import FoldedDistribution
from pyro.distributions.gaussian_scale_mixture import GaussianScaleMixture
from pyro.distributions.hmm import DiscreteHMM, GammaGaussianHMM, GaussianHMM, GaussianMRF, IndependentHMM, LinearHMM
from pyro.distributions.inverse_gamma import InverseGamma
from pyro.distributions.lkj import LKJCorrCholesky
from pyro.distributions.mixture import MaskedMixture
from pyro.distributions.multivariate_studentt import MultivariateStudentT
from pyro.distributions.omt_mvn import OMTMultivariateNormal
from pyro.distributions.polya_gamma import TruncatedPolyaGamma
from pyro.distributions.rejector import Rejector
from pyro.distributions.relaxed_straight_through import (RelaxedBernoulliStraightThrough,
RelaxedOneHotCategoricalStraightThrough)
from pyro.distributions.spanning_tree import SpanningTree
from pyro.distributions.stable import Stable
from pyro.distributions.torch import * # noqa F403
from pyro.distributions.torch import __all__ as torch_dists
from pyro.distributions.torch_distribution import MaskedDistribution, TorchDistribution
from pyro.distributions.torch_transform import ComposeTransformModule, TransformModule
from pyro.distributions.unit import Unit
from pyro.distributions.util import enable_validation, is_validation_enabled, validation_enabled
from pyro.distributions.von_mises_3d import VonMises3D
from pyro.distributions.zero_inflated import ZeroInflatedDistribution, ZeroInflatedNegativeBinomial, ZeroInflatedPoisson
from . import constraints, kl, transforms
__all__ = [
"AVFMultivariateNormal",
"BetaBinomial",
"CoalescentRateLikelihood",
"CoalescentTimes",
"CoalescentTimesWithRate",
"ComposeTransformModule",
"ConditionalDistribution",
"ConditionalTransform",
"ConditionalTransformModule",
"ConditionalTransformedDistribution",
"Delta",
"DirichletMultinomial",
"DiscreteHMM",
"Distribution",
"Empirical",
"ExtendedBetaBinomial",
"ExtendedBinomial",
"FoldedDistribution",
"GammaGaussianHMM",
"GammaPoisson",
"GaussianHMM",
"GaussianMRF",
"GaussianScaleMixture",
"IndependentHMM",
"InverseGamma",
"LinearHMM",
"LKJCorrCholesky",
"MaskedDistribution",
"MaskedMixture",
"MixtureOfDiagNormals",
"MixtureOfDiagNormalsSharedCovariance",
"MultivariateStudentT",
"OMTMultivariateNormal",
"Rejector",
"RelaxedBernoulliStraightThrough",
"RelaxedOneHotCategoricalStraightThrough",
"SpanningTree",
"Stable",
"TorchDistribution",
"TransformModule",
"TruncatedPolyaGamma",
"Unit",
"VonMises3D",
"ZeroInflatedPoisson",
"ZeroInflatedNegativeBinomial",
"ZeroInflatedDistribution",
"constraints",
"enable_validation",
"is_validation_enabled",
"kl",
"transforms",
"validation_enabled",
]
# Import all torch distributions from `pyro.distributions.torch_distribution`
__all__.extend(torch_dists)
del torch_dists
| 40.787879 | 120 | 0.795939 |
7770d8448b9db7219a715d2701819da1326d9321 | 3,027 | py | Python | wikidata_panglaodb/plotting.py | jvfe/wikidata_panglaodb | a1dd854393c9c81229dcf639d62fb758cf145973 | [
"BSD-2-Clause"
] | 1 | 2020-11-12T21:28:34.000Z | 2020-11-12T21:28:34.000Z | wikidata_panglaodb/plotting.py | jvfe/wikidata_panglaodb | a1dd854393c9c81229dcf639d62fb758cf145973 | [
"BSD-2-Clause"
] | 2 | 2020-09-16T21:09:36.000Z | 2020-12-25T19:02:41.000Z | wikidata_panglaodb/plotting.py | jvfe/wikidata_panglaodb | a1dd854393c9c81229dcf639d62fb758cf145973 | [
"BSD-2-Clause"
] | null | null | null | """Plotting functions"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="whitegrid", palette="muted")
def plot_matched_item_types(reconciled_table, summary_table, data_type, ax):
"""Plot reconciled item types
This will make a count table of item types, and plot it, for a defined entity type (data_type),
it will take in the reconciled table itself, from where it will extract the counts,
a summary table from where the totals can be acquired, the entity type being analysed
and an axis on a matplotlib figure.
Args:
reconciled_table (DataFrame): The table containing the reconciled items.
summary_table (DataFrame): A summary table created by summarize_histology()
data_type (str): The data type being analysed, "cells", "organs" or "tissues".
ax (matplotlib.axes.Axes): A matplotlib figure axis to plot the final figure.
Returns:
matplotlib.Figure: A bar plot for the item type counts.
"""
type_counts = (
reconciled_table["type"]
.value_counts()
.reset_index()
.replace("[]", "None")
.rename(columns={"index": "Item type", "type": "# of items"})
)
type_counts["% of matched items"] = (
type_counts["# of items"] / summary_table.loc[data_type, "n_item_matches"]
) * 100
p = sns.barplot(
x="Item type",
y="% of matched items",
data=type_counts,
edgecolor="w",
color="rosybrown",
# palette="magma",
dodge=False,
ax=ax,
)
p.set(xlabel=None)
p.set_xticklabels(p.get_xticklabels(), rotation=45, horizontalalignment="right")
return p
def plot_gene_violin(data, file_to, miniplot=False):
"""Make a violin plot for gene data
Plots the distribution of the number of statements for reconciled gene data.
Args:
data (DataFrame): Gene data.
file_to (str): Path where to save the final image.
miniplot (bool). Wether or not have a miniplot.
"""
fig, ax = plt.subplots(figsize=(10, 10))
sns.violinplot(
x="species",
y="statements",
cut=0,
# hue="species",
data=data,
palette="Set2",
split=True,
scale="count",
inner="quartiles",
ax=ax,
)
ax.set_xlabel("")
ax.set_ylabel("# of statements")
if miniplot == True:
ax2 = plt.axes([0.2, 0.6, 0.2, 0.2])
sns.violinplot(
x="species",
y="statements",
cut=0,
# hue="species",
data=data[data["statements"] < 70],
palette="Set2",
split=True,
scale="count",
inner="quartiles",
ax=ax2,
)
ax2.set_xticklabels("")
# ax2.set_yticklabels('')
ax2.set_xlabel("")
ax2.set_ylabel("")
ax2.grid(False)
ax.grid(False)
fig.savefig(file_to)
else:
fig.savefig(file_to)
| 28.28972 | 99 | 0.58738 |
e3f4b9f3b8b0dfe27319beed3872cd2ea5d06f99 | 46,441 | py | Python | transformers/modeling_utils.py | ajbouh/transformers | adcdbe95930272abdb305e95b3794672f992d5f8 | [
"Apache-2.0"
] | 1 | 2020-12-15T10:50:04.000Z | 2020-12-15T10:50:04.000Z | transformers/modeling_utils.py | ajbouh/transformers | adcdbe95930272abdb305e95b3794672f992d5f8 | [
"Apache-2.0"
] | null | null | null | transformers/modeling_utils.py | ajbouh/transformers | adcdbe95930272abdb305e95b3794672f992d5f8 | [
"Apache-2.0"
] | 1 | 2020-07-01T01:16:11.000Z | 2020-07-01T01:16:11.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import json
import logging
import os
from io import open
import six
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .configuration_utils import PretrainedConfig
from .file_utils import (TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, DUMMY_INPUTS,
cached_path, hf_bucket_url, is_remote_url)
logger = logging.getLogger(__name__)
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super(Identity, self).__init__()
def forward(self, input):
return input
class PreTrainedModel(nn.Module):
r""" Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models
as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.
- ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:
- ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,
- ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,
- ``path``: a path (string) to the TensorFlow checkpoint.
- ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
pretrained_model_archive_map = {}
load_tf_weights = lambda model, config, path: None
base_model_prefix = ""
@property
def dummy_inputs(self):
""" Dummy inputs to do a forward pass in the network.
Returns:
torch.Tensor with dummy inputs
"""
return {'input_ids': torch.tensor(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
# Save config in model
self.config = config
@property
def base_model(self):
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self):
""" Get model's input embeddings
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
""" Set model's input embeddings
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self):
""" Get model's output embeddings
Return None if the model doesn't have output embeddings
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if hasattr(output_embeddings, 'bias') and output_embeddings.bias is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]),
'constant',
0
)
if hasattr(output_embeddings, 'out_features') and hasattr(input_embeddings, 'num_embeddings'):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens: (`optional`) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.
If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embeddings Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embeddings``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy word embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
""" Initialize and prunes weights if needed. """
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune):
""" Prunes heads of the base model.
Arguments:
heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).
E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
"""
assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, 'module') else self
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with ``model.train()``
The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.
It is up to you to train those weights with a downstream fine-tuning task.
The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.
Parameters:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) one of:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop('config', None)
state_dict = kwargs.pop('state_dict', None)
cache_dir = kwargs.pop('cache_dir', None)
from_tf = kwargs.pop('from_tf', False)
force_download = kwargs.pop('force_download', False)
resume_download = kwargs.pop('resume_download', False)
proxies = kwargs.pop('proxies', None)
output_loading_info = kwargs.pop('output_loading_info', False)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path, *model_args,
cache_dir=cache_dir, return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
**kwargs
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError("Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"],
pretrained_model_name_or_path))
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert from_tf, "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index")
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(pretrained_model_name_or_path, postfix=WEIGHTS_NAME)
if from_tf:
raise EnvironmentError("Loading a PyTorch model from a TF checkpoint is not supported when using a model identifier name.")
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download,
proxies=proxies, resume_download=resume_download)
except EnvironmentError:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file)
else:
msg = "Model name '{}' was not found in model name list ({}). " \
"We assumed '{}' was a path or url to model weight files named one of {} but " \
"couldn't find any such file at this path or url.".format(
pretrained_model_name_or_path,
', '.join(cls.pretrained_model_archive_map.keys()),
archive_file,
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME])
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
else:
resolved_archive_file = None
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
try:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
except:
raise OSError("Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. ")
missing_keys = []
unexpected_keys = []
error_msgs = []
if from_tf:
if resolved_archive_file.endswith('.index'):
# Load from a TensorFlow 1.X checkpoint - provided by original authors
model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
else:
# Load from our TensorFlow 2.0 checkpoints
try:
from transformers import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
except ImportError as e:
logger.error("Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.")
raise e
else:
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ''
model_to_load = model
if not hasattr(model, cls.base_model_prefix) and any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()):
start_prefix = cls.base_model_prefix + '.'
if hasattr(model, cls.base_model_prefix) and not any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()):
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
model.tie_weights() # make sure word embedding weights are still tied if needed
# Set model in evaluation mode to desactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs}
return model, loading_info
return model
class Conv1D(nn.Module):
def __init__(self, nf, nx):
""" Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
Basically works like a Linear layer but the weights are transposed
"""
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class PoolerStartLogits(nn.Module):
""" Compute SQuAD start_logits from sequence hidden states. """
def __init__(self, config):
super(PoolerStartLogits, self).__init__()
self.dense = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, p_mask=None):
""" Args:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)`
invalid position mask such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
x = self.dense(hidden_states).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerEndLogits(nn.Module):
""" Compute SQuAD end_logits from sequence hidden states and start token hidden state.
"""
def __init__(self, config):
super(PoolerEndLogits, self).__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None):
""" Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to hidden_states
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None"
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
if next(self.parameters()).dtype == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerAnswerClass(nn.Module):
""" Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """
def __init__(self, config):
super(PoolerAnswerClass, self).__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None):
"""
Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``.
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
note(Original repo):
no dependency on end_feature so that we can obtain one single `cls_logits`
for each sample
"""
hsz = hidden_states.shape[-1]
assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None"
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
class SQuADHead(nn.Module):
r""" A SQuAD head inspired by XLNet.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Inputs:
**hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)``
hidden states of sequence tokens
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the last token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
**is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)``
Whether the question has a possible answer in the paragraph or not.
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
**start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
**start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
Indices for the top config.start_n_top start token possibilities (beam-search).
**end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size,)``
Log probabilities for the ``is_impossible`` label of the answers.
"""
def __init__(self, config):
super(SQuADHead, self).__init__()
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
def forward(self, hidden_states, start_positions=None, end_positions=None,
cls_index=None, is_impossible=None, p_mask=None):
outputs = ()
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
class SequenceSummary(nn.Module):
r""" Compute a single vector summary of a sequence hidden states according to various possibilities:
Args of the config class:
summary_type:
- 'last' => [default] take the last token hidden state (like XLNet)
- 'first' => take the first token hidden state (like Bert)
- 'mean' => take the mean of all tokens hidden states
- 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)
- 'attn' => Not implemented now, use multi-head attention
summary_use_proj: Add a projection after the vector extraction
summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
summary_activation: 'tanh' => add a tanh activation to the output, Other => no activation. Default
summary_first_dropout: Add a dropout before the projection and activation
summary_last_dropout: Add a dropout after the projection and activation
"""
def __init__(self, config):
super(SequenceSummary, self).__init__()
self.summary_type = config.summary_type if hasattr(config, 'summary_type') else 'last'
if self.summary_type == 'attn':
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = Identity()
if hasattr(config, 'summary_use_proj') and config.summary_use_proj:
if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
self.activation = Identity()
if hasattr(config, 'summary_activation') and config.summary_activation == 'tanh':
self.activation = nn.Tanh()
self.first_dropout = Identity()
if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = Identity()
if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(self, hidden_states, cls_index=None):
""" hidden_states: float Tensor in shape [bsz, ..., seq_len, hidden_size], the hidden-states of the last layer.
cls_index: [optional] position of the classification token if summary_type == 'cls_index',
shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
if summary_type == 'cls_index' and cls_index is None:
we take the last token of the sequence as classification token
"""
if self.summary_type == 'last':
output = hidden_states[:, -1]
elif self.summary_type == 'first':
output = hidden_states[:, 0]
elif self.summary_type == 'mean':
output = hidden_states.mean(dim=1)
elif self.summary_type == 'cls_index':
if cls_index is None:
cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2]-1, dtype=torch.long)
else:
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == 'attn':
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer, index, dim=1):
""" Prune a Conv1D layer (a model parameters) to keep only entries in index.
A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_layer(layer, index, dim=None):
""" Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError("Can't prune layer of class {}".format(layer.__class__))
| 53.01484 | 472 | 0.649964 |
29c2729bbbd17142d71480c1024fde3bbb152053 | 3,583 | py | Python | armstrong/apps/embeds/migrations/0001_initial.py | armstrong/armstrong.apps.embeds | 6042f4ab39e752c4e78826e44f7c2aa82bc04e6a | [
"Apache-2.0"
] | 1 | 2016-08-02T09:33:35.000Z | 2016-08-02T09:33:35.000Z | armstrong/apps/embeds/migrations/0001_initial.py | armstrong/armstrong.apps.embeds | 6042f4ab39e752c4e78826e44f7c2aa82bc04e6a | [
"Apache-2.0"
] | null | null | null | armstrong/apps/embeds/migrations/0001_initial.py | armstrong/armstrong.apps.embeds | 6042f4ab39e752c4e78826e44f7c2aa82bc04e6a | [
"Apache-2.0"
] | 1 | 2018-03-04T20:30:15.000Z | 2018-03-04T20:30:15.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import armstrong.apps.embeds.fields
import model_utils.fields
import armstrong.apps.embeds.mixins
import django_extensions.db.fields.json
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Backend',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('code_path', models.CharField(help_text=b'Full Python path for the actual Backend code.', unique=True, max_length=100)),
('description', models.CharField(max_length=255, null=True, blank=True)),
('regex', models.CharField(help_text=b'Used to match a URL when automatically assigning backends.', max_length=100)),
('priority', models.PositiveSmallIntegerField(default=1, help_text=b'A higher number means higher priority. Used when automatically assigning a backend.')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Embed',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', armstrong.apps.embeds.fields.EmbedURLField(unique=True, response_attr=b'response')),
('response_cache', django_extensions.db.fields.json.JSONField()),
('response_last_updated', model_utils.fields.MonitorField(default=None, null=True, monitor=b'response_cache', blank=True)),
('backend', armstrong.apps.embeds.fields.EmbedForeignKey(blank=True, response_attr=b'response', to='embeds.Backend', help_text=b'The most appropriate Backend will auto-assign if not explicitly provided')),
],
options={
},
bases=(models.Model, armstrong.apps.embeds.mixins.TemplatesByEmbedTypeMixin),
),
migrations.CreateModel(
name='EmbedType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'Automatically populated by the backends', unique=True, max_length=25, editable=False)),
('slug', models.SlugField(help_text=b'Used as a folder name in the template lookup.', unique=True, max_length=25, editable=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Provider',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'Automatically populated by the backends', unique=True, max_length=50, editable=False)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='embed',
name='provider',
field=models.ForeignKey(blank=True, to='embeds.Provider', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='embed',
name='type',
field=models.ForeignKey(blank=True, to='embeds.EmbedType', null=True),
preserve_default=True,
),
]
| 45.935897 | 221 | 0.607591 |
c79942c48509e193cb5223b256c21b317af0f7b7 | 5,163 | py | Python | pandas/_typing.py | erictleung/pandas | ca52e3968058dce48c20070b3e9efc2ec42d7cf4 | [
"BSD-3-Clause"
] | null | null | null | pandas/_typing.py | erictleung/pandas | ca52e3968058dce48c20070b3e9efc2ec42d7cf4 | [
"BSD-3-Clause"
] | null | null | null | pandas/_typing.py | erictleung/pandas | ca52e3968058dce48c20070b3e9efc2ec42d7cf4 | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime, timedelta, tzinfo
from io import BufferedIOBase, RawIOBase, TextIOBase, TextIOWrapper
from mmap import mmap
from os import PathLike
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
Callable,
Collection,
Dict,
Hashable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
# To prevent import cycles place any internal imports in the branch below
# and use a string literal forward reference to it in subsequent types
# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
if TYPE_CHECKING:
from typing import final
from pandas._libs import Period, Timedelta, Timestamp
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas import Interval
from pandas.core.arrays.base import ExtensionArray # noqa: F401
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame # noqa: F401
from pandas.core.groupby.generic import DataFrameGroupBy, SeriesGroupBy
from pandas.core.indexes.base import Index
from pandas.core.resample import Resampler
from pandas.core.series import Series
from pandas.core.window.rolling import BaseWindow
from pandas.io.formats.format import EngFormatter
else:
# typing.final does not exist until py38
final = lambda x: x
# array-like
AnyArrayLike = TypeVar("AnyArrayLike", "ExtensionArray", "Index", "Series", np.ndarray)
ArrayLike = TypeVar("ArrayLike", "ExtensionArray", np.ndarray)
# scalars
PythonScalar = Union[str, int, float, bool]
DatetimeLikeScalar = TypeVar("DatetimeLikeScalar", "Period", "Timestamp", "Timedelta")
PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"]
Scalar = Union[PythonScalar, PandasScalar]
# timestamp and timedelta convertible types
TimestampConvertibleTypes = Union[
"Timestamp", datetime, np.datetime64, int, np.int64, float, str
]
TimedeltaConvertibleTypes = Union[
"Timedelta", timedelta, np.timedelta64, int, np.int64, float, str
]
Timezone = Union[str, tzinfo]
# other
Dtype = Union[
"ExtensionDtype", str, np.dtype, Type[Union[str, float, int, complex, bool, object]]
]
DtypeObj = Union[np.dtype, "ExtensionDtype"]
# FrameOrSeriesUnion means either a DataFrame or a Series. E.g.
# `def func(a: FrameOrSeriesUnion) -> FrameOrSeriesUnion: ...` means that if a Series
# is passed in, either a Series or DataFrame is returned, and if a DataFrame is passed
# in, either a DataFrame or a Series is returned.
FrameOrSeriesUnion = Union["DataFrame", "Series"]
# FrameOrSeries is stricter and ensures that the same subclass of NDFrame always is
# used. E.g. `def func(a: FrameOrSeries) -> FrameOrSeries: ...` means that if a
# Series is passed into a function, a Series is always returned and if a DataFrame is
# passed in, a DataFrame is always returned.
FrameOrSeries = TypeVar("FrameOrSeries", bound="NDFrame")
Axis = Union[str, int]
Label = Optional[Hashable]
IndexLabel = Union[Label, Sequence[Label]]
Level = Union[Label, int]
Shape = Tuple[int, ...]
Suffixes = Tuple[str, str]
Ordered = Optional[bool]
JSONSerializable = Optional[Union[PythonScalar, List, Dict]]
Axes = Collection
# For functions like rename that convert one label to another
Renamer = Union[Mapping[Label, Any], Callable[[Label], Label]]
# to maintain type information across generic functions and parametrization
T = TypeVar("T")
# used in decorators to preserve the signature of the function it decorates
# see https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators
FuncType = Callable[..., Any]
F = TypeVar("F", bound=FuncType)
# types of vectorized key functions for DataFrame::sort_values and
# DataFrame::sort_index, among others
ValueKeyFunc = Optional[Callable[["Series"], Union["Series", AnyArrayLike]]]
IndexKeyFunc = Optional[Callable[["Index"], Union["Index", AnyArrayLike]]]
# types of `func` kwarg for DataFrame.aggregate and Series.aggregate
AggFuncTypeBase = Union[Callable, str]
AggFuncTypeDict = Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]]
AggFuncType = Union[
AggFuncTypeBase,
List[AggFuncTypeBase],
AggFuncTypeDict,
]
AggObjType = Union[
"Series",
"DataFrame",
"SeriesGroupBy",
"DataFrameGroupBy",
"BaseWindow",
"Resampler",
]
PythonFuncType = Callable[[Any], Any]
# filenames and file-like-objects
Buffer = Union[IO[AnyStr], RawIOBase, BufferedIOBase, TextIOBase, TextIOWrapper, mmap]
FileOrBuffer = Union[str, Buffer[T]]
FilePathOrBuffer = Union["PathLike[str]", FileOrBuffer[T]]
# for arbitrary kwargs passed during reading/writing files
StorageOptions = Optional[Dict[str, Any]]
# compression keywords and compression
CompressionDict = Dict[str, Any]
CompressionOptions = Optional[Union[str, CompressionDict]]
# types in DataFrameFormatter
FormattersType = Union[
List[Callable], Tuple[Callable, ...], Mapping[Union[str, int], Callable]
]
ColspaceType = Mapping[Label, Union[str, int]]
FloatFormatType = Union[str, Callable, "EngFormatter"]
ColspaceArgType = Union[
str, int, Sequence[Union[str, int]], Mapping[Label, Union[str, int]]
]
| 32.068323 | 88 | 0.743173 |
71d5059fd7a3071c9966e650fc6b217907f1bd25 | 152 | py | Python | exercicios_programacao/exs2/ex2_2.py | JellyWellyBelly/PythonTeaching | a56545b744e9835500372e96a1518d537bd46850 | [
"Unlicense"
] | null | null | null | exercicios_programacao/exs2/ex2_2.py | JellyWellyBelly/PythonTeaching | a56545b744e9835500372e96a1518d537bd46850 | [
"Unlicense"
] | null | null | null | exercicios_programacao/exs2/ex2_2.py | JellyWellyBelly/PythonTeaching | a56545b744e9835500372e96a1518d537bd46850 | [
"Unlicense"
] | null | null | null | x = int(input())
y = int(input())
z = int(input())
if (x>y):
if (x<z):
print (z)
else:
print (x)
else:
if (y<z):
print(z)
else:
print (y)
| 10.133333 | 16 | 0.493421 |
71a23a442d74acb57b24def11c98a6944a559c8a | 1,060 | py | Python | magenta/music/__init__.py | Bogatom/MagentaTraining | 9ffa8c566f70868720d69d9839bb3222c4c1c93b | [
"Apache-2.0"
] | 17 | 2017-06-29T18:32:25.000Z | 2021-10-03T12:30:49.000Z | magenta/music/__init__.py | Bogatom/MagentaTraining | 9ffa8c566f70868720d69d9839bb3222c4c1c93b | [
"Apache-2.0"
] | 12 | 2021-02-15T07:42:08.000Z | 2022-02-08T02:05:27.000Z | magenta/music/__init__.py | Bogatom/MagentaTraining | 9ffa8c566f70868720d69d9839bb3222c4c1c93b | [
"Apache-2.0"
] | 6 | 2017-07-06T06:12:36.000Z | 2021-07-06T13:07:32.000Z | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper around the `note_seq` package for backward compatibility.
These utilities have moved to https://github.com/magenta/note_seq.
"""
import sys
import warnings
import note_seq
warnings.warn(
'`magenta.music` is deprecated, please use the `note_seq` package in its '
'place (https://github.com/magenta/note-seq). Importing `magenta.music` '
'will become a failure in a future version.',
DeprecationWarning)
sys.modules['magenta.music'] = note_seq
| 34.193548 | 78 | 0.751887 |
1761a8a51d3a2b919d5f9b9a2a038c0b3130d09e | 5,085 | py | Python | word_counting/main.py | ropok/explore-jupyter3 | e04a70b76f70b7da81a272595c736c33052b7d80 | [
"MIT"
] | null | null | null | word_counting/main.py | ropok/explore-jupyter3 | e04a70b76f70b7da81a272595c736c33052b7d80 | [
"MIT"
] | null | null | null | word_counting/main.py | ropok/explore-jupyter3 | e04a70b76f70b7da81a272595c736c33052b7d80 | [
"MIT"
] | null | null | null | '''
text-mining: menghitung frekuensi kemunculan kata
'''
import os
from os import system, name
import csv
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
import pandas as pd
from datetime import datetime
# NLTK
class Main():
# open log to write report process
def __init__(self, log):
super(Main, self)
self.log = open(log+'.txt', "w")
self.log_output = open('output_'+log+'.txt', "w")
def logSukses(self, textInfo, textContent):
self.log.write("SUCCESS: \t {textInfo} \"{textContent}\"\n".format(textInfo=textInfo, textContent=textContent))
self.log.flush()
print("SUCCESS: \t {textInfo} \"{textContent}\"\n".format(textInfo=textInfo, textContent=textContent))
def logError(self, errorId, textInfo, textContent):
self.log.write("ERROR {errorId}: \t {textInfo} \"{textContent}\"\n".format(errorId=errorId, textInfo=textInfo, textContent=textContent))
self.log.flush()
print("ERROR {errorId}: \t {textInfo} \"{textContent}\"\n".format(errorId=errorId, textInfo=textInfo, textContent=textContent))
# tokenize text, nltk initiation to text
def tokenize(self, text):
try:
self.text = text
self.token = word_tokenize(self.text)
self.logSukses('tokenize', self.text)
except expression as identifier:
self.logError('101', 'tokenize', self.text)
# count frequency of words
def freqdist(self):
try:
self.fdist = FreqDist(self.token)
self.logSukses('freqdist', self.text)
return self.fdist
except expression as identifier:
self.logError('102', 'freqdist', self.text)
# print frequency (key and value), and write txt
def freqword(self, dictWords):
try:
self.dictWords = dictWords
self.logSukses('writing frequency of words', '')
self.log_output.seek(0)
for i in self.dictWords:
# print(i, self.dictWords[i])
self.log_output.write(i + " " + str(self.dictWords[i]) + "\n")
self.log_output.truncate()
self.log_output.flush()
except expression as identifier:
self.logError('103', 'writing frequency of words failed', '')
def closeLog(self):
self.log.close()
self.log_output.close()
# Dataframe (read .xlsx)
class readData():
def __init__(self):
super(readData, self)
# open file .xlsx
def open(self, data_file):
self.data_file = pd.read_excel(data_file)
def count(self):
self.count_index = len(self.data_file.index)
return self.count_index
# return value of column
def column(self, column_name):
# read dataframe with specific column_name
data = pd.DataFrame(self.data_file, columns=[column_name])
self.data_value = []
for i in data.values:
self.data_value.append(i[0])
return self.data_value
def readLastColumn(self):
kolom = self.data_file.columns
kolomTerakir = kolom.tolist()[len(kolom)-1]
return kolomTerakir
if __name__ == '__main__':
# nltk class
main = Main(datetime.today().strftime('%Y%m%d'))
read_data = readData()
# clear function
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux (here, os.name is 'posix')
else:
_ = system('clear')
# initiation for data collector variable
main.tokenize("")
fdist_collect = main.freqdist()
main.freqword(fdist_collect)
# path of the .xlsx files
path = r"C:\Users\jalerse\Documents\dev-jalerse\python-helping-tools\rekaman-stt\text-mining\rekaman-text-tervalidasi\ALL\anggaa"
# count row total
index_total = 0
for root, dirs, files in os.walk(path):
for filename in files:
if filename.startswith("batch") and filename.endswith(".xlsx"):
input_file = root + "/" + filename
read_data.open(input_file)
index_total+=read_data.count()
print(read_data.count())
print('index total', index_total)
# hitung frekuensi Kata
index_current = 0
for root, dirs, files in os.walk(path):
for filename in files:
if filename.startswith("batch") and filename.endswith(".xlsx"):
input_file = root + "/" + filename
print(input_file)
read_data.open(input_file)
data_list = read_data.column('transcript_training')
for data in data_list:
main.tokenize(data)
fdist = main.freqdist()
fdist_collect.update(fdist)
main.freqword(fdist_collect)
# print progress
index_current+=1
clear()
print(index_current, 'of', index_total)
main.closeLog() | 33.019481 | 144 | 0.594297 |
e2b6ac0864634ff13b380b449567aa046378fc5c | 64,216 | py | Python | tensorflow/python/ops/sparse_ops.py | abdulazizali77/tensorflow | f7d07f5d9683a7d5ce91b108c6c31a47e1372eaa | [
"Apache-2.0"
] | 8 | 2017-04-20T18:08:52.000Z | 2021-10-21T11:19:02.000Z | tensorflow/python/ops/sparse_ops.py | Skywice/tensorflow | f7d07f5d9683a7d5ce91b108c6c31a47e1372eaa | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/sparse_ops.py | Skywice/tensorflow | f7d07f5d9683a7d5ce91b108c6c31a47e1372eaa | [
"Apache-2.0"
] | 9 | 2017-02-02T03:29:29.000Z | 2021-02-10T17:04:21.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""## Sparse Tensor Representation
TensorFlow supports a `SparseTensor` representation for data that is sparse
in multiple dimensions. Contrast this representation with `IndexedSlices`,
which is efficient for representing tensors that are sparse in their first
dimension, and dense along all other dimensions.
@@SparseTensor
@@SparseTensorValue
## Conversion
@@sparse_to_dense
@@sparse_tensor_to_dense
@@sparse_to_indicator
@@sparse_merge
## Manipulation
@@sparse_concat
@@sparse_reorder
@@sparse_reshape
@@sparse_split
@@sparse_retain
@@sparse_reset_shape
@@sparse_fill_empty_rows
@@sparse_transpose
## Reduction
@@sparse_reduce_sum
@@sparse_reduce_sum_sparse
## Math Operations
@@sparse_add
@@sparse_softmax
@@sparse_tensor_dense_matmul
@@sparse_maximum
@@sparse_minimum
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_sparse_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
def _convert_to_sparse_tensor(sp_input):
"""Convert `sp_input` to `SparseTensor` and return it.
Args:
sp_input: `SparseTensor` or `SparseTensorValue`.
Returns:
`sp_input` converted to `SparseTensor`.
Raises:
ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.
"""
if isinstance(sp_input, sparse_tensor.SparseTensorValue):
return sparse_tensor.SparseTensor.from_value(sp_input)
if not isinstance(sp_input, sparse_tensor.SparseTensor):
raise TypeError("Input must be a SparseTensor.")
return sp_input
def _convert_to_sparse_tensors(sp_inputs):
"""Convert `sp_inputs` to `SparseTensor` objects and return them.
Args:
sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue`
objects.
Returns:
`sp_inputs` converted to `SparseTensor` objects.
Raises:
ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor
`SparseTensorValue`.
"""
if isinstance(sp_inputs, list):
return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]
if isinstance(sp_inputs, tuple):
return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)
raise TypeError("Inputs must be a list or tuple.")
# pylint: disable=protected-access
def sparse_concat(axis,
sp_inputs,
name=None,
expand_nonconcat_dim=False,
concat_dim=None):
"""Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of each sparse input.
It is assumed that each inputs is a `SparseTensor` whose elements are ordered
along increasing dimension number.
If expand_nonconcat_dim is False, all inputs' shapes must match, except for
the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are
allowed to vary among all inputs.
The `indices`, `values`, and `shapes` lists must have the same length.
If expand_nonconcat_dim is False, then the output shape is identical to the
inputs', except along the concat dimension, where it is the sum of the inputs'
sizes along that dimension.
If expand_nonconcat_dim is True, then the output shape along the non-concat
dimensions will be expand to be the largest among all inputs, and it is the
sum of the inputs sizes along the concat dimension.
The output elements will be resorted to preserve the sort order along
increasing dimension number.
This op runs in `O(M log M)` time, where `M` is the total number of non-empty
values across all inputs. This is due to the need for an internal sort in
order to concatenate efficiently across an arbitrary dimension.
For example, if `axis = 1` and the inputs are
sp_inputs[0]: shape = [2, 3]
[0, 2]: "a"
[1, 0]: "b"
[1, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
then the output will be
shape = [2, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[1, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b c ] [ ] [b c ]
Another example, if 'axis = 1' and the inputs are
sp_inputs[0]: shape = [3, 3]
[0, 2]: "a"
[1, 0]: "b"
[2, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
if expand_nonconcat_dim = False, this will result in an error. But if
expand_nonconcat_dim = True, this will result in:
shape = [3, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[2, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b ] [ ] [b ]
[ c ] [ c ]
Args:
axis: Dimension to concatenate along. Must be in range [-rank, rank),
where rank is the number of dimensions in each input `SparseTensor`.
sp_inputs: List of `SparseTensor` to concatenate.
name: A name prefix for the returned tensors (optional).
expand_nonconcat_dim: Whether to allow the expansion in the non-concat
dimensions. Defaulted to False.
concat_dim: The old (deprecated) name for axis.
Returns:
A `SparseTensor` with the concatenated output.
Raises:
TypeError: If `sp_inputs` is not a list of `SparseTensor`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "concat_dim",
concat_dim)
sp_inputs = _convert_to_sparse_tensors(sp_inputs)
if len(sp_inputs) == 1: # Degenerate case of one tensor.
return sp_inputs[0]
inds = [sp_input.indices for sp_input in sp_inputs]
vals = [sp_input.values for sp_input in sp_inputs]
shapes = [sp_input.dense_shape for sp_input in sp_inputs]
if expand_nonconcat_dim:
max_shape = math_ops.reduce_max(
array_ops.concat(
[array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0)
shapes = [
array_ops.concat([
max_shape[:axis], shape[-1:] if axis == -1 else
shape[axis:axis + 1], [] if axis == -1 else max_shape[axis + 1:]
], 0) for shape in shapes
]
output_ind, output_val, output_shape = (gen_sparse_ops._sparse_concat(
inds, vals, shapes, axis, name=name))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
def sparse_add(a, b, thresh=0):
"""Adds two tensors, at least one of each is a `SparseTensor`.
If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If
both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order
of arguments does not matter. Use vanilla `tf.add()` for adding two dense
`Tensor`s.
The indices of any input `SparseTensor` are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
`SparseReorder` to restore index ordering.
If both arguments are sparse, we perform "clipping" as follows. By default,
if two values sum to zero at some index, the output `SparseTensor` would still
include that particular location in its index, storing a zero in the
corresponding value slot. To override this, callers can specify `thresh`,
indicating that if the sum has a magnitude strictly smaller than `thresh`, its
corresponding value and index would then not be included. In particular,
`thresh == 0.0` (default) means everything is kept and actual thresholding
happens only for a positive value.
For example, suppose the logical sum of two sparse operands is (densified):
[ 2]
[.1 0]
[ 6 -.2]
Then,
* `thresh == 0` (the default): all 5 index/value pairs will be returned.
* `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three
index/value pairs will be returned.
* `thresh == 0.21`: .1, 0, and -.2 will vanish.
Args:
a: The first operand; `SparseTensor` or `Tensor`.
b: The second operand; `SparseTensor` or `Tensor`. At least one operand
must be sparse.
thresh: A 0-D `Tensor`. The magnitude threshold that determines if an
output value/index pair takes space. Its dtype should match that of the
values if they are real; if the latter are complex64/complex128, then the
dtype should be float32/float64, correspondingly.
Returns:
A `SparseTensor` or a `Tensor`, representing the sum.
Raises:
TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead.
"""
sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)
if not any(isinstance(inp, sparse_classes) for inp in [a, b]):
raise TypeError("At least one input should be SparseTensor; do you mean to"
" use tf.add()?")
if all(isinstance(inp, sparse_classes) for inp in [a, b]):
a = _convert_to_sparse_tensor(a)
thresh = ops.convert_to_tensor(
thresh, dtype=a.values.dtype.real_dtype, name="thresh")
output_ind, output_val, output_shape = (gen_sparse_ops._sparse_add(
a.indices, a.values, a.dense_shape,
b.indices, b.values, b.dense_shape,
thresh))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
else:
# swap to make `a` the SparseTensor.
if isinstance(b, sparse_classes):
a, b = b, a
return gen_sparse_ops._sparse_tensor_dense_add(
a.indices, a.values, a.dense_shape, b)
def sparse_dense_cwise_add(sp_t, dense_t):
"""Adds up a SparseTensor and a dense Tensor, using these special rules:
(1) Broadcasts the dense side to have the same shape as the sparse side, if
eligible;
(2) Then, only the dense values pointed to by the indices of the SparseTensor
participate in the cwise addition.
By the rules, the result is a logical SparseTensor with exactly the same
indices and shape, but possibly with different non-zero values. The output of
this Op is the resultant non-zero values.
Args:
sp_t: the SparseTensor operand.
dense_t: the dense Tensor operand; must have the same dtype and a
broadcast-compatible shape as `sp_t`.
Returns:
output: the SparseTensor output.
"""
result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values,
sp_t.dense_shape, dense_t)
return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)
def sparse_reorder(sp_input, name=None):
"""Reorders a `SparseTensor` into the canonical, row-major ordering.
Note that by convention, all sparse ops preserve the canonical ordering
along increasing dimension number. The only time ordering can be violated
is during manual manipulation of the indices and values to add entries.
Reordering does not affect the shape of the `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[4, 5]` and
`indices` / `values`:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same shape and non-empty values, but in
canonical ordering.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
reordered_ind, reordered_val = (gen_sparse_ops._sparse_reorder(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name))
return sparse_tensor.SparseTensor(reordered_ind, reordered_val,
array_ops.identity(sp_input.dense_shape))
def sparse_reshape(sp_input, shape, name=None):
"""Reshapes a `SparseTensor` to represent values in a new dense shape.
This operation has the same semantics as `reshape` on the represented dense
tensor. The indices of non-empty values in `sp_input` are recomputed based
on the new dense shape, and a new `SparseTensor` is returned containing the
new indices and new shape. The order of non-empty values in `sp_input` is
unchanged.
If one component of `shape` is the special value -1, the size of that
dimension is computed so that the total dense size remains constant. At
most one component of `shape` can be -1. The number of dense elements
implied by `shape` must be the same as the number of dense elements
originally represented by `sp_input`.
For example, if `sp_input` has shape `[2, 3, 6]` and `indices` / `values`:
[0, 0, 0]: a
[0, 0, 1]: b
[0, 1, 0]: c
[1, 0, 0]: d
[1, 2, 3]: e
and `shape` is `[9, -1]`, then the output will be a `SparseTensor` of
shape `[9, 4]` and `indices` / `values`:
[0, 0]: a
[0, 1]: b
[1, 2]: c
[4, 2]: d
[8, 1]: e
Args:
sp_input: The input `SparseTensor`.
shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the
represented `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same non-empty values but with indices calculated
by the new dense shape.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseReshape", [sp_input]) as name:
reshaped_ind, reshaped_shape = gen_sparse_ops._sparse_reshape(
sp_input.indices, sp_input.dense_shape, shape, name=name)
return sparse_tensor.SparseTensor(
reshaped_ind, array_ops.identity(sp_input.values),
reshaped_shape)
# TODO(aselle): Remove keyword required once for 1.0 final
class KeywordRequired(object):
def __repr__(self):
# This is needed to make documentation without fully qualified module paths
return "KeywordRequired()"
def sparse_split(keyword_required=KeywordRequired(),
sp_input=None, num_split=None, axis=None,
name=None, split_dim=None):
"""Split a `SparseTensor` into `num_split` tensors along `axis`.
If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split`
each slice starting from 0:`shape[axis] % num_split` gets extra one
dimension. For example, if `axis = 1` and `num_split = 2` and the
input is:
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
output_tensor[0] =
[ a ]
[b c ]
output_tensor[1] =
[ d e ]
[ ]
Args:
keyword_required: Python 2 standin for * (temporary for argument reorder)
sp_input: The `SparseTensor` to split.
num_split: A Python integer. The number of ways to split.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
name: A name for the operation (optional).
split_dim: Deprecated old name for axis.
Returns:
`num_split` `SparseTensor` objects resulting from splitting `value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If the deprecated `split_dim` and `axis` are both non None.
"""
if not isinstance(keyword_required, KeywordRequired):
raise ValueError("Keyword arguments are required for this function.")
if sp_input is None:
raise ValueError("sp_input is required")
if num_split is None:
raise ValueError("num_split is required")
if axis is None:
raise ValueError("axis is required")
axis = deprecation.deprecated_argument_lookup("axis", axis, "split_dim",
split_dim)
sp_input = _convert_to_sparse_tensor(sp_input)
output_inds, output_vals, output_shapes = (gen_sparse_ops._sparse_split(
axis,
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
num_split,
name=name))
sparse_tensors = []
for i in range(0, num_split):
sparse_tensors.append(
sparse_tensor.SparseTensor(
output_inds[i], output_vals[i], output_shapes[i]))
return sparse_tensors
def sparse_to_dense(sparse_indices,
output_shape,
sparse_values,
default_value=0,
validate_indices=True,
name=None):
"""Converts a sparse representation into a dense tensor.
Builds an array `dense` with shape `output_shape` such that
```python
# If sparse_indices is scalar
dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i
dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n)
dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
```
All other values in `dense` are set to `default_value`. If `sparse_values`
is a scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not
contain any repeats. If `validate_indices` is True, these properties
are checked during execution.
Args:
sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`.
`sparse_indices[i]` contains the complete index where `sparse_values[i]`
will be placed.
output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape
of the dense output tensor.
sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of
`sparse_indices`, or a scalar value to be used for all sparse indices.
default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value
to set for indices not specified in `sparse_indices`. Defaults to zero.
validate_indices: A boolean value. If True, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name for the operation (optional).
Returns:
Dense `Tensor` of shape `output_shape`. Has the same type as
`sparse_values`.
"""
return gen_sparse_ops._sparse_to_dense(
sparse_indices,
output_shape,
sparse_values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
def sparse_reduce_sum(sp_input, axis=None, keep_dims=False,
reduction_axes=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
similar to the indexing rules in Python.
For example:
```python
# 'x' represents [[1, ?, 1]
# [?, 1, ?]]
# where ? is implicitly-zero.
tf.sparse_reduce_sum(x) ==> 3
tf.sparse_reduce_sum(x, 0) ==> [1, 1, 1]
tf.sparse_reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.
tf.sparse_reduce_sum(x, 1, keep_dims=True) ==> [[2], [1]]
tf.sparse_reduce_sum(x, [0, 1]) ==> 3
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
Returns:
The reduced Tensor.
"""
return gen_sparse_ops.sparse_reduce_sum(
sp_input.indices, sp_input.values,
sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes),
keep_dims)
def sparse_reduce_sum_sparse(sp_input, axis=None, keep_dims=False,
reduction_axes=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
SparseTensor.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis
Returns:
The reduced SparseTensor.
"""
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_reduce_sum_sparse(
sp_input.indices, sp_input.values,
sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis,
reduction_axes),
keep_dims))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
def sparse_tensor_to_dense(sp_input,
default_value=0,
validate_indices=True,
name=None):
"""Converts a `SparseTensor` into a dense tensor.
This op is a convenience wrapper around `sparse_to_dense` for `SparseTensor`s.
For example, if `sp_input` has shape `[3, 5]` and non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
and `default_value` is `x`, then the output will be a dense `[3, 5]`
string tensor with values:
[[x a x b x]
[x x x x x]
[c x x x x]]
Indices must be without repeats. This is only
tested if validate_indices is True.
Args:
sp_input: The input `SparseTensor`.
default_value: Scalar value to set for indices not specified in
`sp_input`. Defaults to zero.
validate_indices: A boolean value. If `True`, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name prefix for the returned tensors (optional).
Returns:
A dense tensor with shape `sp_input.dense_shape` and values specified by
the non-empty values in `sp_input`. Indices not in `sp_input` are assigned
`default_value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return sparse_to_dense(
sp_input.indices,
sp_input.dense_shape,
sp_input.values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
def sparse_to_indicator(sp_input, vocab_size, name=None):
"""Converts a `SparseTensor` of ids into a dense bool indicator tensor.
The last dimension of `sp_input.indices` is discarded and replaced with
the values of `sp_input`. If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`,
then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where
output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True
and False elsewhere in `output`.
For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values:
[0, 0, 0]: 0
[0, 1, 0]: 10
[1, 0, 3]: 103
[1, 1, 2]: 150
[1, 1, 3]: 149
[1, 1, 4]: 150
[1, 2, 1]: 121
and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool
tensor with False everywhere except at positions
(0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150),
(1, 2, 121).
Note that repeats are allowed in the input SparseTensor.
This op is useful for converting `SparseTensor`s into dense formats for
compatibility with ops that expect dense tensors.
The input `SparseTensor` must be in row-major order.
Args:
sp_input: A `SparseTensor` with `values` property of type `int32` or
`int64`.
vocab_size: A scalar int64 Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_input.values < vocab_size)`.
name: A name prefix for the returned tensors (optional)
Returns:
A dense bool indicator tensor representing the indices with specified value.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseToIndicator", [sp_input]) as name:
num_entries = array_ops.shape(sp_input.indices)[0]
new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True)
sp_values = sparse_tensor.SparseTensor(
sp_input.indices, new_values, sp_input.dense_shape)
sp_new = sparse_merge(sp_input, sp_values, vocab_size, name)
# validate_indices may be False because we allow duplicates in new_indices:
# repeated indices are allowed when creating an indicator matrix.
return sparse_tensor_to_dense(
sp_new, default_value=False, validate_indices=False, name=name)
def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
already_sorted=False):
"""Combines a batch of feature ids and values into a single `SparseTensor`.
The most common use case for this function occurs when feature ids and
their corresponding values are stored in `Example` protos on disk.
`parse_example` will return a batch of ids and a batch of values, and this
function joins them into a single logical `SparseTensor` for use in
functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc.
The `SparseTensor` returned by this function has the following properties:
- `indices` is equivalent to `sp_ids.indices` with the last
dimension discarded and replaced with `sp_ids.values`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn, vocab_size]`.
For example, consider the following feature vectors:
```python
vector1 = [-3, 0, 0, 0, 0, 0]
vector2 = [ 0, 1, 0, 4, 1, 0]
vector3 = [ 5, 0, 0, 9, 0, 0]
```
These might be stored sparsely in the following Example protos by storing
only the feature ids (column number if the vectors are treated as a matrix)
of the non-zero elements and the corresponding values:
```python
examples = [Example(features={
"ids": Feature(int64_list=Int64List(value=[0])),
"values": Feature(float_list=FloatList(value=[-3]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[1, 4, 3])),
"values": Feature(float_list=FloatList(value=[1, 1, 4]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[0, 3])),
"values": Feature(float_list=FloatList(value=[5, 9]))})]
```
The result of calling parse_example on these examples will produce a
dictionary with entries for "ids" and "values". Passing those two objects
to this function along with vocab_size=6, will produce a `SparseTensor` that
sparsely represents all three instances. Namely, the `indices` property will
contain the coordinates of the non-zero entries in the feature matrix (the
first dimension is the row number in the matrix, i.e., the index within the
batch, and the second dimension is the column number, i.e., the feature id);
`values` will contain the actual values. `shape` will be the shape of the
original matrix, i.e., (3, 6). For our example above, the output will be
equal to:
```python
SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]],
values=[-3, 1, 4, 1, 5, 9],
dense_shape=[3, 6])
```
Args:
sp_ids: A `SparseTensor` with `values` property of type `int32`
or `int64`.
sp_values: A`SparseTensor` of any type.
vocab_size: A scalar `int64` Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_ids.values < vocab_size)`.
name: A name prefix for the returned tensors (optional)
already_sorted: A boolean to specify whether the per-batch values in
`sp_values` are already sorted. If so skip sorting, False by default
(optional).
Returns:
A `SparseTensor` compactly representing a batch of feature ids and values,
useful for passing to functions that expect such a `SparseTensor`.
Raises:
TypeError: If `sp_ids` or `sp_values` are not a `SparseTensor`.
"""
sp_ids = _convert_to_sparse_tensor(sp_ids)
sp_values = _convert_to_sparse_tensor(sp_values)
with ops.name_scope(name, "SparseMerge", [sp_ids, sp_values]):
indices_shape = array_ops.shape(sp_ids.indices)
rank = indices_shape[1]
ids = sp_ids.values
if ids.dtype != dtypes.int64:
ids = math_ops.cast(ids, dtypes.int64)
# Slice off the last dimension of indices, then tack on the ids
indices_columns_to_preserve = array_ops.slice(
sp_ids.indices, [0, 0], array_ops.stack([-1, rank - 1]))
new_indices = array_ops.concat(
[indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)
new_values = sp_values.values
new_shape = array_ops.concat([
array_ops.slice(sp_ids.dense_shape, [0],
array_ops.expand_dims(rank - 1, 0)),
math_ops.cast(array_ops.stack([vocab_size]), dtypes.int64)
], 0)
result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape)
return result if already_sorted else sparse_reorder(result)
def sparse_retain(sp_input, to_retain):
"""Retains specified non-empty values within a `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
and `to_retain = [True, False, False, True]`, then the output will
be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values:
[0, 1]: a
[3, 1]: d
Args:
sp_input: The input `SparseTensor` with `N` non-empty elements.
to_retain: A bool vector of length `N` with `M` true values.
Returns:
A `SparseTensor` with the same shape as the input and `M` non-empty
elements corresponding to the true positions in `to_retain`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
to_retain = ops.convert_to_tensor(to_retain)
# Shape checking, if shape is known at graph construction time
retain_shape = to_retain.get_shape()
retain_shape.assert_has_rank(1)
sp_input.values.get_shape()[0].merge_with(retain_shape[0])
where_true = array_ops.reshape(array_ops.where(to_retain), [-1])
new_indices = array_ops.gather(sp_input.indices, where_true)
new_values = array_ops.gather(sp_input.values, where_true)
return sparse_tensor.SparseTensor(new_indices, new_values,
array_ops.identity(sp_input.dense_shape))
def sparse_reset_shape(sp_input, new_shape=None):
"""Resets the shape of a `SparseTensor` with indices and values unchanged.
If `new_shape` is None, returns a copy of `sp_input` with its shape reset
to the tight bounding box of `sp_input`.
If `new_shape` is provided, then it must be larger or equal in all dimensions
compared to the shape of `sp_input`. When this condition is met, the returned
SparseTensor will have its shape reset to `new_shape` and its indices and
values unchanged from that of `sp_input.`
For example:
Consider a `sp_input` with shape [2, 3, 5]:
[0, 0, 1]: a
[0, 1, 0]: b
[0, 2, 2]: c
[1, 0, 3]: d
- It is an error to set `new_shape` as [3, 7] since this represents a
rank-2 tensor while `sp_input` is rank-3. This is either a ValueError
during graph construction (if both shapes are known) or an OpError during
run time.
- Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or
equal in every dimension compared to the original shape [2, 3, 5].
- On the other hand, setting new_shape as [2, 3, 4] is also an error: The
third dimension is smaller than the original shape [2, 3, 5] (and an
`InvalidArgumentError` will be raised).
- If `new_shape` is None, the returned SparseTensor will have a shape
[2, 3, 4], which is the tight bounding box of `sp_input`.
Args:
sp_input: The input `SparseTensor`.
new_shape: None or a vector representing the new shape for the returned
`SparseTensor`.
Returns:
A `SparseTensor` indices and values unchanged from `input_sp`. Its shape is
`new_shape` if that is set. Otherwise it is the tight bounding box of
`input_sp`
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If `new_shape` represents a tensor with a different rank from
that of `sp_input` (if shapes are known when graph is constructed).
OpError:
- If `new_shape` has dimension sizes that are too small.
- If shapes are not known during graph construction time, and during run
time it is found out that the ranks do not match.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
in_indices = array_ops.identity(sp_input.indices)
in_values = array_ops.identity(sp_input.values)
in_shape = array_ops.identity(sp_input.dense_shape)
if new_shape is None:
dim_low_bound = math_ops.reduce_max(in_indices, 0)
output_shape_tensor = math_ops.add(dim_low_bound,
array_ops.ones_like(in_shape))
else:
output_shape_tensor = ops.convert_to_tensor(new_shape)
output_shape_tensor.get_shape().assert_has_rank(1)
output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64)
# For cases when shape is known during graph construction, this catches the
# error before the sparse_tensor.SparseTensor catches it.
output_shape_tensor.get_shape()[0].merge_with(in_shape.get_shape()[0])
# For cases where shape is not known during graph construction.
output_shape_tensor = control_flow_ops.with_dependencies(
[check_ops.assert_equal(
array_ops.shape(in_shape), array_ops.shape(output_shape_tensor))],
output_shape_tensor)
output_shape_tensor = control_flow_ops.with_dependencies(
[check_ops.assert_less_equal(in_shape, output_shape_tensor)],
output_shape_tensor)
return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor)
def sparse_fill_empty_rows(sp_input, default_value, name=None):
"""Fills empty rows in the input 2-D `SparseTensor` with a default value.
This op adds entries with the specified `default_value` at index
`[row, 0]` for any row in the input that does not already have a value.
For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
[0, 1]: a
[0, 3]: b
[1, 0]: default_value
[2, 0]: c
[3, 1]: d
[4, 0]: default_value
Note that the input may have empty columns at the end, with no effect on
this op.
The output `SparseTensor` will be in row-major order and will have the
same shape as the input.
This op also returns an indicator vector such that
empty_row_indicator[i] = True iff row i was an empty row.
Args:
sp_input: A `SparseTensor` with shape `[N, M]`.
default_value: The value to fill for empty rows, with the same type as
`sp_input.`
name: A name prefix for the returned tensors (optional)
Returns:
sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty
rows filled in with `default_value`.
empty_row_indicator: A bool vector of length `N` indicating whether each
input row was empty.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseFillEmptyRows", [sp_input]):
default_value = ops.convert_to_tensor(
default_value, dtype=sp_input.values.dtype)
num_rows = math_ops.cast(sp_input.dense_shape[0], dtypes.int32)
all_row_indices = math_ops.cast(math_ops.range(num_rows), dtypes.int64)
empty_row_indices, _ = array_ops.setdiff1d(all_row_indices,
sp_input.indices[:, 0])
empty_row_indicator = sparse_to_dense(
empty_row_indices,
array_ops.expand_dims(sp_input.dense_shape[0], -1), True,
False)
empty_row_indices_as_column = array_ops.reshape(empty_row_indices, [-1, 1])
additional_indices = array_ops.concat([
empty_row_indices_as_column,
array_ops.zeros_like(empty_row_indices_as_column)
], 1)
additional_values = array_ops.fill(
array_ops.shape(empty_row_indices), default_value)
all_indices_unordered = array_ops.concat(
[sp_input.indices, additional_indices], 0)
all_values_unordered = array_ops.concat(
[sp_input.values, additional_values], 0)
sp_unordered_output = sparse_tensor.SparseTensor(
all_indices_unordered,
all_values_unordered, sp_input.dense_shape)
sp_ordered_output = sparse_reorder(sp_unordered_output)
return sp_ordered_output, empty_row_indicator
def serialize_sparse(sp_input, name=None):
"""Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object.
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional).
Returns:
A string 3-vector (1D `Tensor`), with each column representing the
serialized `SparseTensor`'s indices, values, and shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops._serialize_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name)
def serialize_many_sparse(sp_input, name=None):
"""Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
name: A name prefix for the returned tensors (optional).
Returns:
A string matrix (2-D `Tensor`) with `N` rows and `3` columns.
Each column represents serialized `SparseTensor`'s indices, values, and
shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops._serialize_many_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name)
def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize and concatenate `SparseTensors` from a serialized minibatch.
The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`serialize_sparse`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`.
The serialized and packed `SparseTensor` objects.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
output_indices, output_values, output_shape = (
gen_sparse_ops._deserialize_many_sparse(
serialized_sparse, dtype, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
def sparse_tensor_dense_matmul(sp_a,
b,
adjoint_a=False,
adjoint_b=False,
name=None):
# pylint: disable=line-too-long
"""Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
No validity checking is performed on the indices of A. However, the following
input format is recommended for optimal behavior:
if adjoint_a == false:
A should be sorted in lexicographically increasing order. Use
sparse_reorder if you're not sure.
if adjoint_a == true:
A should be sorted in order of increasing dimension 1 (i.e., "column major"
order instead of "row major" order).
Deciding when to use sparse_tensor_dense_matmul vs. matmul(sp_a=True):
There are a number of questions to ask in the decision process, including:
* Will the SparseTensor A fit in memory if densified?
* Is the column count of the product large (>> 1)?
* Is the density of A larger than approximately 15%?
If the answer to several of these questions is yes, consider
converting the `SparseTensor` to a dense one and using `tf.matmul` with
`sp_a=True`.
This operation tends to perform well when A is more sparse, if the column size
of the product is small (e.g. matrix-vector multiplication), if
`sp_a.dense_shape` takes on large values.
Below is a rough speed comparison between sparse_tensor_dense_matmul,
labelled 'sparse', and matmul(sp_a=True), labelled 'dense'. For purposes of
the comparison, the time spent converting from a SparseTensor to a dense
Tensor is not included, so it is overly conservative with respect to
the time ratio.
Benchmark system:
CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB
GPU: NVidia Tesla k40c
Compiled with:
`-c opt --config=cuda --copt=-mavx`
```
tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks
A sparse [m, k] with % nonzero values between 1% and 80%
B dense [k, n]
% nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense)
0.01 1 True 100 100 0.000221166 0.00010154 0.459112
0.01 1 True 100 1000 0.00033858 0.000109275 0.322745
0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385
0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669
0.01 1 False 100 100 0.000208085 0.000107603 0.51711
0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762
0.01 1 False 1000 100 0.000308222 0.00010345 0.335635
0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124
0.01 10 True 100 100 0.000218522 0.000105537 0.482958
0.01 10 True 100 1000 0.000340882 0.000111641 0.327506
0.01 10 True 1000 100 0.000315472 0.000117376 0.372064
0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128
0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354
0.01 10 False 100 1000 0.000330552 0.000112615 0.340687
0.01 10 False 1000 100 0.000341277 0.000114097 0.334324
0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549
0.01 25 True 100 100 0.000207806 0.000105977 0.509981
0.01 25 True 100 1000 0.000322879 0.00012921 0.400181
0.01 25 True 1000 100 0.00038262 0.00014158 0.370035
0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504
0.01 25 False 100 100 0.000209401 0.000104696 0.499979
0.01 25 False 100 1000 0.000321161 0.000130737 0.407076
0.01 25 False 1000 100 0.000377012 0.000136801 0.362856
0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413
0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833
0.2 1 True 100 1000 0.000348674 0.000147475 0.422959
0.2 1 True 1000 100 0.000336908 0.00010122 0.300439
0.2 1 True 1000 1000 0.001022 0.000203274 0.198898
0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746
0.2 1 False 100 1000 0.000356127 0.000146824 0.41228
0.2 1 False 1000 100 0.000322664 0.000100918 0.312764
0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648
0.2 10 True 100 100 0.000211692 0.000109903 0.519165
0.2 10 True 100 1000 0.000372819 0.000164321 0.440753
0.2 10 True 1000 100 0.000338651 0.000144806 0.427596
0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064
0.2 10 False 100 100 0.000215727 0.000110502 0.512231
0.2 10 False 100 1000 0.000375419 0.0001613 0.429653
0.2 10 False 1000 100 0.000336999 0.000145628 0.432132
0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618
0.2 25 True 100 100 0.000218705 0.000129913 0.594009
0.2 25 True 100 1000 0.000394794 0.00029428 0.745402
0.2 25 True 1000 100 0.000404483 0.0002693 0.665788
0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052
0.2 25 False 100 100 0.000221494 0.0001306 0.589632
0.2 25 False 100 1000 0.000396436 0.000297204 0.74969
0.2 25 False 1000 100 0.000409346 0.000270068 0.659754
0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046
0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836
0.5 1 True 100 1000 0.000415328 0.000223073 0.537101
0.5 1 True 1000 100 0.000358324 0.00011269 0.314492
0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851
0.5 1 False 100 100 0.000224196 0.000101423 0.452386
0.5 1 False 100 1000 0.000400987 0.000223286 0.556841
0.5 1 False 1000 100 0.000368825 0.00011224 0.304318
0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563
0.5 10 True 100 100 0.000222125 0.000112308 0.505608
0.5 10 True 100 1000 0.000461088 0.00032357 0.701753
0.5 10 True 1000 100 0.000394624 0.000225497 0.571422
0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801
0.5 10 False 100 100 0.000232083 0.000114978 0.495418
0.5 10 False 100 1000 0.000454574 0.000324632 0.714146
0.5 10 False 1000 100 0.000379097 0.000227768 0.600817
0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638
0.5 25 True 100 100 0.00023429 0.000151703 0.647501
0.5 25 True 100 1000 0.000497462 0.000598873 1.20386
0.5 25 True 1000 100 0.000460778 0.000557038 1.20891
0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845
0.5 25 False 100 100 0.000228981 0.000155334 0.678371
0.5 25 False 100 1000 0.000496139 0.000620789 1.25124
0.5 25 False 1000 100 0.00045473 0.000551528 1.21287
0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927
0.8 1 True 100 100 0.000222037 0.000105301 0.47425
0.8 1 True 100 1000 0.000410804 0.000329327 0.801664
0.8 1 True 1000 100 0.000349735 0.000131225 0.375212
0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633
0.8 1 False 100 100 0.000214079 0.000107486 0.502085
0.8 1 False 100 1000 0.000413746 0.000323244 0.781261
0.8 1 False 1000 100 0.000348983 0.000131983 0.378193
0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282
0.8 10 True 100 100 0.000229159 0.00011825 0.516017
0.8 10 True 100 1000 0.000498845 0.000532618 1.0677
0.8 10 True 1000 100 0.000383126 0.00029935 0.781336
0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689
0.8 10 False 100 100 0.000230783 0.000124958 0.541452
0.8 10 False 100 1000 0.000493393 0.000550654 1.11606
0.8 10 False 1000 100 0.000377167 0.000298581 0.791642
0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024
0.8 25 True 100 100 0.000233496 0.000175241 0.75051
0.8 25 True 100 1000 0.00055654 0.00102658 1.84458
0.8 25 True 1000 100 0.000463814 0.000783267 1.68875
0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132
0.8 25 False 100 100 0.000240243 0.000175047 0.728625
0.8 25 False 100 1000 0.000578102 0.00104499 1.80763
0.8 25 False 1000 100 0.000485113 0.000776849 1.60138
0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992
```
Args:
sp_a: SparseTensor A, of rank 2.
b: A dense Matrix with the same dtype as sp_a.
adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex,
this is transpose(conj(A)). Otherwise it's transpose(A).
adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex,
this is transpose(conj(B)). Otherwise it's transpose(B).
name: A name prefix for the returned tensors (optional)
Returns:
A dense matrix (pseudo-code in dense np.matrix notation):
A = A.H if adjoint_a else A
B = B.H if adjoint_b else B
return A*B
"""
# pylint: enable=line-too-long
sp_a = _convert_to_sparse_tensor(sp_a)
with ops.name_scope(name, "SparseTensorDenseMatMul",
[sp_a.indices, sp_a.values, b]) as name:
b = ops.convert_to_tensor(b, name="b")
return gen_sparse_ops._sparse_tensor_dense_mat_mul(
a_indices=sp_a.indices,
a_values=sp_a.values,
a_shape=sp_a.dense_shape,
b=b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
def sparse_softmax(sp_input, name=None):
"""Applies softmax to a batched N-D `SparseTensor`.
The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
(where `N >= 2`), and with indices sorted in the canonical lexicographic
order.
This op is equivalent to applying the normal `tf.nn.softmax()` to each
innermost logical submatrix with shape `[B, C]`, but with the catch that *the
implicitly zero elements do not participate*. Specifically, the algorithm is
equivalent to:
(1) Applies `tf.nn.softmax()` to a densified view of each innermost
submatrix with shape `[B, C]`, along the size-C dimension;
(2) Masks out the original implicitly-zero locations;
(3) Renormalizes the remaining elements.
Hence, the `SparseTensor` result has exactly the same non-zero indices and
shape.
Example:
```python
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
shape = [2, 2, 2] # 3-D SparseTensor
values = np.asarray([[[0., np.e], [1., 0.]], [[np.e, 0.], [np.e, np.e]]])
indices = np.vstack(np.where(values)).astype(np.int64).T
result = tf.sparse_softmax(tf.SparseTensor(indices, values, shape))
# ...returning a 3-D SparseTensor, equivalent to:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
```
Args:
sp_input: N-D `SparseTensor`, where `N >= 2`.
name: optional name of the operation.
Returns:
output: N-D `SparseTensor` representing the results.
"""
with ops.name_scope(name, "SparseSoftmax",
[sp_input.indices, sp_input.values]) as name:
out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values,
sp_input.dense_shape)
return sparse_tensor.SparseTensor(
sp_input.indices, out_vals, sp_input.dense_shape)
def sparse_maximum(sp_a, sp_b, name=None):
"""Returns the element-wise max of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_maximum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 1], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(name, "SparseSparseMaximum", [sp_a.indices, sp_a.values,
sp_b.indices,
sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
def sparse_minimum(sp_a, sp_b, name=None):
"""Returns the element-wise min of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_minimum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 0], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(name, "SparseSparseMinimum", [sp_a.indices, sp_a.values,
sp_b.indices,
sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
def sparse_transpose(sp_input, perm=None, name=None):
"""Transposes a `SparseTensor`
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[5, 4]` and
`indices` / `values`:
[0, 2]: c
[1, 0]: a
[1, 3]: d
[3, 0]: b
Args:
sp_input: The input `SparseTensor`.
perm: A permutation of the dimensions of `sp_input`.
name: A name prefix for the returned tensors (optional)
Returns:
A transposed `SparseTensor`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
with ops.op_scope([sp_input], name, "SparseTranspose") as name:
if perm is None:
rank = array_ops.rank(sp_input)
perm = (rank - 1) - math_ops.range(0, rank, 1)
indices = sp_input.indices
transposed_indices = array_ops.transpose(
array_ops.gather(array_ops.transpose(indices), perm))
dense_shape = sp_input.dense_shape
transposed_dense_shape = array_ops.gather(dense_shape, perm)
transposed_st = sparse_tensor.SparseTensor(
transposed_indices, sp_input.values,
transposed_dense_shape)
transposed_st = sparse_reorder(transposed_st)
return transposed_st
def _add_sparse_to_tensors_map(sp_input, container=None,
shared_name=None, name=None):
"""Add a `SparseTensor` to a `SparseTensorsMap` and return its handle.
Args:
sp_input: The input `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string 1-vector (1D `Tensor`), with the single element representing the
a unique handle to a `SparseTensor` stored by the `SparseTensorMap`
underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops._add_sparse_to_tensors_map(
sp_input.indices, sp_input.values, sp_input.dense_shape,
container=container, shared_name=shared_name, name=name)
def _add_many_sparse_to_tensors_map(sp_input, container=None,
shared_name=None, name=None):
"""Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string matrix (2-D `Tensor`) with `N` rows and `1` column.
Each row represents a unique handle to a `SparseTensor` stored by
the `SparseTensorMap` underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops._add_many_sparse_to_tensors_map(
sp_input.indices, sp_input.values, sp_input.dense_shape,
container=container, shared_name=shared_name, name=name)
def _take_many_sparse_from_tensors_map(
sparse_map_op, sparse_handles, rank=None, name=None):
"""Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be a string matrix of shape `[N, 1]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
sparse_map_op: The `Operation` that created the original handles.
Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
The serialized and packed `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
if not isinstance(sparse_map_op, ops.Operation):
raise TypeError("sparse_map_op be an Operation")
if sparse_map_op.type not in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
raise TypeError("sparse_map_op must be one of AddSparseToTensorsMap or "
"AddSparseToTensorsMap. Instead, found `%s`." %
sparse_map_op.type)
with ops.colocate_with(sparse_map_op):
shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name
output_indices, output_values, output_shape = (
gen_sparse_ops._take_many_sparse_from_tensors_map(
sparse_handles, dtype=sparse_map_op.get_attr("T"),
container=sparse_map_op.get_attr("container"),
shared_name=shared_name, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
| 37.885546 | 80 | 0.674224 |
3a57632dfda4670ecbd16a034480b2d69e54711c | 23,082 | py | Python | runtime/bindings/python/tests/test_ngraph/test_ops.py | akhakimova/openvino | 3a588476cd7a34bdc8ad02b85c14dc939747a282 | [
"Apache-2.0"
] | 2,406 | 2020-04-22T15:47:54.000Z | 2022-03-31T10:27:37.000Z | runtime/bindings/python/tests/test_ngraph/test_ops.py | akhakimova/openvino | 3a588476cd7a34bdc8ad02b85c14dc939747a282 | [
"Apache-2.0"
] | 4,948 | 2020-04-22T15:12:39.000Z | 2022-03-31T18:45:42.000Z | runtime/bindings/python/tests/test_ngraph/test_ops.py | sbalandi/openvino | 519951a4a9f979c1b04529dda821111c56113716 | [
"Apache-2.0"
] | 991 | 2020-04-23T18:21:09.000Z | 2022-03-31T18:40:57.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# flake8: noqa
import numpy as np
import openvino.opset8 as ov
from openvino.impl import AxisSet, Function, Shape, Type
from openvino.impl.op import Constant, Parameter
from tests.runtime import get_runtime
from tests import xfail_issue_67415
def binary_op(op_str, a, b):
if op_str == "+":
return a + b
elif op_str == "Add":
return ov.add(a, b)
elif op_str == "-":
return a - b
elif op_str == "Sub":
return ov.subtract(a, b)
elif op_str == "*":
return a * b
elif op_str == "Mul":
return ov.multiply(a, b)
elif op_str == "/":
return a / b
elif op_str == "Div":
return ov.divide(a, b)
elif op_str == "Equal":
return ov.equal(a, b)
elif op_str == "Greater":
return ov.greater(a, b)
elif op_str == "GreaterEq":
return ov.greater_equal(a, b)
elif op_str == "Less":
return ov.less(a, b)
elif op_str == "LessEq":
return ov.less_equal(a, b)
elif op_str == "Maximum":
return ov.maximum(a, b)
elif op_str == "Minimum":
return ov.minimum(a, b)
elif op_str == "NotEqual":
return ov.not_equal(a, b)
elif op_str == "Power":
return ov.power(a, b)
def binary_op_ref(op_str, a, b):
if op_str == "+" or op_str == "Add":
return a + b
elif op_str == "-" or op_str == "Sub":
return a - b
elif op_str == "*" or op_str == "Mul":
return a * b
elif op_str == "/" or op_str == "Div":
return a / b
elif op_str == "Dot":
return np.dot(a, b)
elif op_str == "Equal":
return np.equal(a, b)
elif op_str == "Greater":
return np.greater(a, b)
elif op_str == "GreaterEq":
return np.greater_equal(a, b)
elif op_str == "Less":
return np.less(a, b)
elif op_str == "LessEq":
return np.less_equal(a, b)
elif op_str == "Maximum":
return np.maximum(a, b)
elif op_str == "Minimum":
return np.minimum(a, b)
elif op_str == "NotEqual":
return np.not_equal(a, b)
elif op_str == "Power":
return np.power(a, b)
def binary_op_exec(op_str):
element_type = Type.f32
shape = Shape([2, 2])
A = Parameter(element_type, shape)
B = Parameter(element_type, shape)
parameter_list = [A, B]
function = Function([binary_op(op_str, A, B)], parameter_list, "test")
a_arr = np.array([[1, 6], [7, 4]], dtype=np.float32)
b_arr = np.array([[5, 2], [3, 8]], dtype=np.float32)
runtime = get_runtime()
computation = runtime.computation(function, A, B)
result = computation(a_arr, b_arr)[0]
expected = binary_op_ref(op_str, a_arr, b_arr)
assert np.allclose(result, expected)
def binary_op_comparison(op_str):
element_type = Type.f32
shape = Shape([2, 2])
A = Parameter(element_type, shape)
B = Parameter(element_type, shape)
parameter_list = [A, B]
function = Function([binary_op(op_str, A, B)], parameter_list, "test")
a_arr = np.array([[1, 5], [3, 2]], dtype=np.float32)
b_arr = np.array([[2, 4], [3, 1]], dtype=np.float32)
runtime = get_runtime()
computation = runtime.computation(function, A, B)
result = computation(a_arr, b_arr)[0]
expected = binary_op_ref(op_str, a_arr, b_arr)
assert np.allclose(result, expected)
def test_add():
binary_op_exec("+")
def test_add_op():
binary_op_exec("Add")
def test_sub():
binary_op_exec("-")
def test_sub_op():
binary_op_exec("Sub")
def test_mul():
binary_op_exec("*")
def test_mul_op():
binary_op_exec("Mul")
def test_div():
binary_op_exec("/")
def test_div_op():
binary_op_exec("Div")
def test_maximum():
binary_op_exec("Maximum")
def test_minimum():
binary_op_exec("Minimum")
def test_power():
binary_op_exec("Power")
def test_greater():
binary_op_comparison("Greater")
def test_greater_eq():
binary_op_comparison("GreaterEq")
def test_less():
binary_op_comparison("Less")
def test_less_eq():
binary_op_comparison("LessEq")
def test_not_equal():
binary_op_comparison("NotEqual")
def test_add_with_mul():
element_type = Type.f32
shape = Shape([4])
A = Parameter(element_type, shape)
B = Parameter(element_type, shape)
C = Parameter(element_type, shape)
parameter_list = [A, B, C]
function = Function([ov.multiply(ov.add(A, B), C)], parameter_list, "test")
runtime = get_runtime()
computation = runtime.computation(function, A, B, C)
result = computation(
np.array([1, 2, 3, 4], dtype=np.float32),
np.array([5, 6, 7, 8], dtype=np.float32),
np.array([9, 10, 11, 12], dtype=np.float32),
)[0]
a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
b_arr = np.array([5, 6, 7, 8], dtype=np.float32)
c_arr = np.array([9, 10, 11, 12], dtype=np.float32)
result_arr_ref = (a_arr + b_arr) * c_arr
assert np.allclose(result, result_arr_ref)
def unary_op(op_str, a):
if op_str == "Abs":
return ov.abs(a)
elif op_str == "Acos":
return ov.acos(a)
elif op_str == "Acosh":
return ov.acosh(a)
elif op_str == "Asin":
return ov.asin(a)
elif op_str == "Asinh":
return ov.asinh(a)
elif op_str == "Atan":
return ov.atan(a)
elif op_str == "Atanh":
return ov.atanh(a)
elif op_str == "Ceiling":
return ov.ceiling(a)
elif op_str == "Cos":
return ov.cos(a)
elif op_str == "Cosh":
return ov.cosh(a)
elif op_str == "Floor":
return ov.floor(a)
elif op_str == "log":
return ov.log(a)
elif op_str == "exp":
return ov.exp(a)
elif op_str == "negative":
return ov.negative(a)
elif op_str == "Sign":
return ov.sign(a)
elif op_str == "Sin":
return ov.sin(a)
elif op_str == "Sinh":
return ov.sinh(a)
elif op_str == "Sqrt":
return ov.sqrt(a)
elif op_str == "Tan":
return ov.tan(a)
elif op_str == "Tanh":
return ov.tanh(a)
def unary_op_ref(op_str, a):
if op_str == "Abs":
return np.abs(a)
elif op_str == "Acos":
return np.arccos(a)
elif op_str == "Acosh":
return np.arccosh(a)
elif op_str == "Asin":
return np.arcsin(a)
elif op_str == "Asinh":
return np.arcsinh(a)
elif op_str == "Atan":
return np.arctan(a)
elif op_str == "Atanh":
return np.arctanh(a)
elif op_str == "Ceiling":
return np.ceil(a)
elif op_str == "Cos":
return np.cos(a)
elif op_str == "Cosh":
return np.cosh(a)
elif op_str == "Floor":
return np.floor(a)
elif op_str == "log":
return np.log(a)
elif op_str == "exp":
return np.exp(a)
elif op_str == "negative":
return np.negative(a)
elif op_str == "Reverse":
return np.fliplr(a)
elif op_str == "Sign":
return np.sign(a)
elif op_str == "Sin":
return np.sin(a)
elif op_str == "Sinh":
return np.sinh(a)
elif op_str == "Sqrt":
return np.sqrt(a)
elif op_str == "Tan":
return np.tan(a)
elif op_str == "Tanh":
return np.tanh(a)
def unary_op_exec(op_str, input_list):
"""
input_list needs to have deep length of 4
"""
element_type = Type.f32
shape = Shape(np.array(input_list).shape)
A = Parameter(element_type, shape)
parameter_list = [A]
function = Function([unary_op(op_str, A)], parameter_list, "test")
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation(np.array(input_list, dtype=np.float32))[0]
expected = unary_op_ref(op_str, np.array(input_list, dtype=np.float32))
assert np.allclose(result, expected)
def test_abs():
input_list = [-1, 0, 1, 2]
op_str = "Abs"
unary_op_exec(op_str, input_list)
def test_acos():
input_list = [-1, 0, 0.5, 1]
op_str = "Acos"
unary_op_exec(op_str, input_list)
def test_acosh():
input_list = [2., 3., 1.5, 1.0]
op_str = "Acosh"
unary_op_exec(op_str, input_list)
def test_asin():
input_list = [-1, 0, 0.5, 1]
op_str = "Asin"
unary_op_exec(op_str, input_list)
def test_asinh():
input_list = [-1, 0, 0.5, 1]
op_str = "Asinh"
unary_op_exec(op_str, input_list)
def test_atan():
input_list = [-1, 0, 0.5, 1]
op_str = "Atan"
unary_op_exec(op_str, input_list)
def test_atanh():
input_list = [-1, 0, 0.5, 1]
op_str = "Atanh"
unary_op_exec(op_str, input_list)
def test_ceiling():
input_list = [0.5, 0, 0.4, 0.5]
op_str = "Ceiling"
unary_op_exec(op_str, input_list)
def test_cos():
input_list = [0, 0.7, 1.7, 3.4]
op_str = "Cos"
unary_op_exec(op_str, input_list)
def test_cosh():
input_list = [-1, 0.0, 0.5, 1]
op_str = "Cosh"
unary_op_exec(op_str, input_list)
def test_floor():
input_list = [-0.5, 0, 0.4, 0.5]
op_str = "Floor"
unary_op_exec(op_str, input_list)
def test_log():
input_list = [1, 2, 3, 4]
op_str = "log"
unary_op_exec(op_str, input_list)
def test_exp():
input_list = [-1, 0, 1, 2]
op_str = "exp"
unary_op_exec(op_str, input_list)
def test_negative():
input_list = [-1, 0, 1, 2]
op_str = "negative"
unary_op_exec(op_str, input_list)
def test_sign():
input_list = [-1, 0, 0.5, 1]
op_str = "Sign"
unary_op_exec(op_str, input_list)
def test_sin():
input_list = [0, 0.7, 1.7, 3.4]
op_str = "Sin"
unary_op_exec(op_str, input_list)
def test_sinh():
input_list = [-1, 0.0, 0.5, 1]
op_str = "Sinh"
unary_op_exec(op_str, input_list)
def test_sqrt():
input_list = [0.0, 0.5, 1, 2]
op_str = "Sqrt"
unary_op_exec(op_str, input_list)
def test_tan():
input_list = [-np.pi / 4, 0, np.pi / 8, np.pi / 8]
op_str = "Tan"
unary_op_exec(op_str, input_list)
def test_tanh():
input_list = [-1, 0, 0.5, 1]
op_str = "Tanh"
unary_op_exec(op_str, input_list)
def test_reshape():
element_type = Type.f32
shape = Shape([2, 3])
A = Parameter(element_type, shape)
parameter_list = [A]
function = Function([ov.reshape(A, Shape([3, 2]), special_zero=False)], parameter_list, "test")
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation(np.array(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), dtype=np.float32))[0]
expected = np.reshape(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), (3, 2))
assert np.allclose(result, expected)
def test_broadcast():
element_type = Type.f32
A = Parameter(element_type, Shape([3]))
parameter_list = [A]
function = Function([ov.broadcast(A, [3, 3])], parameter_list, "test")
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation(np.array([1, 2, 3], dtype=np.float32))[0]
a_arr = np.array([[0], [0], [0]], dtype=np.float32)
b_arr = np.array([[1, 2, 3]], dtype=np.float32)
expected = np.add(a_arr, b_arr)
assert np.allclose(result, expected)
def test_constant():
element_type = Type.f32
parameter_list = []
function = Function([Constant(element_type, Shape([3, 3]), list(range(9)))], parameter_list, "test")
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation()[0]
expected = np.arange(9).reshape(3, 3)
assert np.allclose(result, expected)
def test_concat():
element_type = Type.f32
A = Parameter(element_type, Shape([1, 2]))
B = Parameter(element_type, Shape([1, 2]))
C = Parameter(element_type, Shape([1, 2]))
parameter_list = [A, B, C]
axis = 0
function = Function([ov.concat([A, B, C], axis)], parameter_list, "test")
a_arr = np.array([[1, 2]], dtype=np.float32)
b_arr = np.array([[5, 6]], dtype=np.float32)
c_arr = np.array([[7, 8]], dtype=np.float32)
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation(a_arr, b_arr, c_arr)[0]
expected = np.concatenate((a_arr, b_arr, c_arr), axis)
assert np.allclose(result, expected)
def test_axisset():
set_axisset = AxisSet({1, 2, 3})
list_axisset = AxisSet([1, 2, 3])
tuple_axisset = AxisSet((1, 2, 3))
assert len(set_axisset) == 3
assert set(set_axisset) == {1, 2, 3}
assert len(list_axisset) == 3
assert set(list_axisset) == set(set_axisset)
assert len(tuple_axisset) == 3
assert set(tuple_axisset) == set(set_axisset)
@xfail_issue_67415
def test_select():
element_type = Type.f32
A = Parameter(Type.boolean, Shape([1, 2]))
B = Parameter(element_type, Shape([1, 2]))
C = Parameter(element_type, Shape([1, 2]))
parameter_list = [A, B, C]
function = Function([ov.select(A, B, C)], parameter_list, "test")
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation(
np.array([[True, False]], dtype=np.bool),
np.array([[5, 6]], dtype=np.float32),
np.array([[7, 8]], dtype=np.float32),
)[0]
expected = np.array([[5, 8]])
assert np.allclose(result, expected)
def test_max_pool():
# test 1d
element_type = Type.f32
shape = Shape([1, 1, 10])
A = Parameter(element_type, shape)
parameter_list = [A]
input_arr = np.arange(10, dtype=np.float32).reshape([1, 1, 10])
window_shape = [3]
strides = [1] * len(window_shape)
dilations = [1] * len(window_shape)
pads_begin = [0] * len(window_shape)
pads_end = [0] * len(window_shape)
rounding_type = "floor"
auto_pad = "explicit"
idx_elem_type = "i32"
model = ov.max_pool(
A,
strides,
dilations,
pads_begin,
pads_end,
window_shape,
rounding_type,
auto_pad,
idx_elem_type,
)
function = Function([model], parameter_list, "test")
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation(input_arr)[0]
expected = (np.arange(8) + 2).reshape(1, 1, 8)
assert np.allclose(result, expected)
# test 1d with strides
strides = [2]
pads_begin = [0] * len(window_shape)
pads_end = [0] * len(window_shape)
model = ov.max_pool(
A,
strides,
dilations,
pads_begin,
pads_end,
window_shape,
rounding_type,
auto_pad,
idx_elem_type,
)
function = Function([model], parameter_list, "test")
size = 4
computation = runtime.computation(function, *parameter_list)
result = computation(input_arr)[0]
expected = ((np.arange(size) + 1) * 2).reshape(1, 1, size)
assert np.allclose(result, expected)
# test 2d
element_type = Type.f32
shape = Shape([1, 1, 10, 10])
A = Parameter(element_type, shape)
parameter_list = [A]
input_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
window_shape = [3, 3]
strides = [1, 1]
dilations = [1, 1]
pads_begin = [0, 0]
pads_end = [0, 0]
model = ov.max_pool(
A,
strides,
dilations,
pads_begin,
pads_end,
window_shape,
rounding_type,
auto_pad,
idx_elem_type,
)
function = Function([model], parameter_list, "test")
computation = runtime.computation(function, *parameter_list)
result = computation(input_arr)[0]
expected = ((np.arange(100).reshape(10, 10))[2:, 2:]).reshape(1, 1, 8, 8)
assert np.allclose(result, expected)
# test 2d with strides
strides = [2, 2]
dilations = [1, 1]
pads_begin = [0, 0]
pads_end = [0, 0]
model = ov.max_pool(
A,
strides,
dilations,
pads_begin,
pads_end,
window_shape,
rounding_type,
auto_pad,
idx_elem_type,
)
function = Function([model], parameter_list, "test")
computation = runtime.computation(function, *parameter_list)
result = computation(input_arr)[0]
size = 4
expected = ((np.arange(100).reshape(10, 10))[2::2, 2::2]).reshape(1, 1, size, size)
assert np.allclose(result, expected)
def convolution2d(
image,
filterit,
strides=(1, 1),
dilation=(1, 1),
padding_below=(0, 0),
padding_above=(0, 0),
data_dilation=(1, 1),
):
def dilate(arr, dil=(1, 1)):
m, n = arr.shape
new_m, new_n = (m - 1) * dil[0] + 1, (n - 1) * dil[1] + 1
new_arr = np.zeros(new_m * new_n, dtype=np.float32).reshape(new_m, new_n)
for i in range(m):
for j in range(n):
new_arr[dil[0] * i][dil[1] * j] = arr[i][j]
return new_arr
i_m, i_n = image.shape
new_image = np.zeros(
(i_m + padding_below[0] + padding_above[0]) * (i_n + padding_below[1] + padding_above[1]),
dtype=np.float32,
).reshape(i_m + padding_below[0] + padding_above[0], i_n + padding_below[1] + padding_above[1])
new_image[padding_below[0] : padding_below[0] + i_m, padding_below[1] : padding_below[1] + i_n] = image
image = new_image
image = image if data_dilation[0] == data_dilation[1] == 1 else dilate(image, data_dilation)
i_m, i_n = image.shape
filterit = filterit if dilation[0] == dilation[1] == 1 else dilate(filterit, dilation)
f_m, f_n = filterit.shape
# result_shape
r_m = i_m - f_m + 1
r_n = i_n - f_n + 1
r_m //= strides[0]
r_n //= strides[1]
result = np.zeros(r_m * r_n, dtype=np.float32).reshape(r_m, r_n)
for i in range(r_m):
for j in range(r_n):
sub_m = image[i * strides[0] : i * strides[0] + f_m, j * strides[1] : j * strides[1] + f_n]
result[i][j] = np.sum(sub_m * filterit)
return result
def test_convolution_simple():
element_type = Type.f32
image_shape = Shape([1, 1, 16, 16])
filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(-128, 128, 1, dtype=np.float32).reshape(1, 1, 16, 16)
filter_arr = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)
filter_arr[0][0][0][0] = -1
filter_arr[0][0][1][1] = -1
filter_arr[0][0][2][2] = -1
filter_arr[0][0][0][2] = -1
filter_arr[0][0][2][0] = -1
strides = [1, 1]
pads_begin = [0, 0]
pads_end = [0, 0]
dilations = [1, 1]
model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Function([model], parameter_list, "test")
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation(image_arr, filter_arr)[0]
expected = convolution2d(image_arr[0][0], filter_arr[0][0]).reshape(1, 1, 14, 14)
assert np.allclose(result, expected)
def test_convolution_with_strides():
element_type = Type.f32
image_shape = Shape([1, 1, 10, 10])
filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(100, dtype=np.float32).reshape([1, 1, 10, 10])
filter_arr = np.zeros(9, dtype=np.float32).reshape([1, 1, 3, 3])
filter_arr[0][0][1][1] = 1
strides = [2, 2]
pads_begin = [0, 0]
pads_end = [0, 0]
dilations = [1, 1]
model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Function([model], parameter_list, "test")
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation(image_arr, filter_arr)[0]
expected = convolution2d(image_arr[0][0], filter_arr[0][0], strides).reshape(1, 1, 4, 4)
assert np.allclose(result, expected)
def test_convolution_with_filter_dilation():
element_type = Type.f32
image_shape = Shape([1, 1, 10, 10])
filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(100, dtype=np.float32).reshape([1, 1, 10, 10])
filter_arr = np.ones(9, dtype=np.float32).reshape([1, 1, 3, 3])
strides = [1, 1]
pads_begin = [0, 0]
pads_end = [0, 0]
dilations = [2, 2]
model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Function([model], parameter_list, "test")
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation(image_arr, filter_arr)[0]
expected = convolution2d(image_arr[0][0], filter_arr[0][0], strides, dilations).reshape([1, 1, 6, 6])
assert np.allclose(result, expected)
def test_convolution_with_padding():
element_type = Type.f32
image_shape = Shape([1, 1, 10, 10])
filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
filter_arr = np.zeros(9, dtype=np.float32).reshape(1, 1, 3, 3)
filter_arr[0][0][1][1] = 1
strides = [1, 1]
dilations = [2, 2]
pads_begin = [0, 0]
pads_end = [0, 0]
model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Function([model], parameter_list, "test")
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation(image_arr, filter_arr)[0]
expected = convolution2d(
image_arr[0][0], filter_arr[0][0], strides, dilations, pads_begin, pads_end
).reshape([1, 1, 6, 6])
assert np.allclose(result, expected)
def test_convolution_with_non_zero_padding():
element_type = Type.f32
image_shape = Shape([1, 1, 10, 10])
filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
filter_arr = (np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)) * -1
filter_arr[0][0][1][1] = 1
strides = [1, 1]
dilations = [2, 2]
pads_begin = [2, 1]
pads_end = [1, 2]
model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Function([model], parameter_list, "test")
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation(image_arr, filter_arr)[0]
expected = convolution2d(
image_arr[0][0], filter_arr[0][0], strides, dilations, pads_begin, pads_end
).reshape([1, 1, 9, 9])
assert np.allclose(result, expected)
| 26.839535 | 107 | 0.610952 |
66fab9ecb82e9b05093340fce82cd06d18ad7f60 | 776 | py | Python | etk/data_extractors/digPriceExtractor/digpe/unit/time_units.py | linqyd/etk | dcf0cae4076619f5261573d47b4f5f26baaf15b7 | [
"MIT"
] | null | null | null | etk/data_extractors/digPriceExtractor/digpe/unit/time_units.py | linqyd/etk | dcf0cae4076619f5261573d47b4f5f26baaf15b7 | [
"MIT"
] | null | null | null | etk/data_extractors/digPriceExtractor/digpe/unit/time_units.py | linqyd/etk | dcf0cae4076619f5261573d47b4f5f26baaf15b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: ZwEin
# @Date: 2016-07-04 11:51:22
# @Last Modified by: ZwEin
# @Last Modified time: 2016-07-05 23:08:11
UNIT_TIME_UNITS = [
'hourly',
'half hour',
'half hr',
'half',
'hlf hr',
'hlf hour',
'hf hr',
'h hour',
'h hr',
'h h',
'full hour',
'full hr',
'f hour',
'f hr',
'f h',
'fh',
'hhr',
'hh',
'hf',
'hr',
'hour',
'hummer',
'qk',
'qv',
'qq',
'q',
'minute',
'mins',
'min',
# 'ss',
# 'second',
r'\bh\b'
]
# '30',
UNIT_TIME_HOUR = [
'full hour',
'full hr',
'f hour',
'f hr',
'f h',
'fh',
'hourly',
'hour',
'hr',
r'h'
]
UNIT_TIME_MINUTE = [
'minute',
'min'
] | 12.516129 | 42 | 0.400773 |
285f82be269eb56cdace2a6306669423ca4a4ffa | 929 | py | Python | ws/handler/appliance/light/zone/home_event_presence.py | fabaff/automate-ws | a9442f287692787e3f253e1ff23758bec8f3902e | [
"MIT"
] | null | null | null | ws/handler/appliance/light/zone/home_event_presence.py | fabaff/automate-ws | a9442f287692787e3f253e1ff23758bec8f3902e | [
"MIT"
] | 1 | 2021-12-21T11:34:47.000Z | 2021-12-21T11:34:47.000Z | ws/handler/appliance/light/zone/home_event_presence.py | fabaff/automate-ws | a9442f287692787e3f253e1ff23758bec8f3902e | [
"MIT"
] | 1 | 2021-12-21T10:10:13.000Z | 2021-12-21T10:10:13.000Z | import home
from ws.handler.event.enum import Handler as Parent
class Handler(Parent):
KLASS = home.event.presence.Event
APPLIANCE_KLASS = home.appliance.light.zone.Appliance
TEMPLATE = "event/enum.html"
LABEL = "Is someone in here?"
LABEL1 = "Someone is in here"
LABEL2 = "No one is in here"
def _get_str(self, e):
if e == home.event.presence.Event.On:
return self.YES
elif e == home.event.presence.Event.Off:
return self.NO
return e
def get_description(self, e):
if e == home.event.presence.Event.On:
return self.LABEL1
elif e == home.event.presence.Event.Off:
return self.LABEL2
def get_icon(self, e):
if e == home.event.presence.Event.On:
return "fas fa-sign-in-alt"
elif e == home.event.presence.Event.Off:
return "fas fa-sign-out-alt"
return e
| 27.323529 | 57 | 0.606028 |
35b292794eb168607d0978a80d81c08547d6791d | 1,328 | py | Python | lib/surface/composer/environments/describe.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | null | null | null | lib/surface/composer/environments/describe.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | lib/surface/composer/environments/describe.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 1 | 2020-07-24T18:47:35.000Z | 2020-07-24T18:47:35.000Z | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to show metadata for an environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.composer import environments_util as environments_api_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.composer import resource_args
class Describe(base.DescribeCommand):
"""Get details about a Cloud Composer environment."""
@staticmethod
def Args(parser):
resource_args.AddEnvironmentResourceArg(parser, 'to describe')
def Run(self, args):
env_ref = args.CONCEPTS.environment.Parse()
return environments_api_util.Get(env_ref, release_track=self.ReleaseTrack())
| 36.888889 | 86 | 0.781627 |
c1196050d89ea2c56ef3c159170b595e29443d1d | 348 | py | Python | examples/configs/generic_st7735/landscape_128x160.py | russhughes/lv_st7789 | aa5134383250abd7d26285c01cb344374449e5b8 | [
"MIT"
] | 8 | 2021-08-28T01:41:38.000Z | 2022-01-30T15:49:33.000Z | examples/configs/generic_st7735/landscape_128x160.py | russhughes/lv_st7789 | aa5134383250abd7d26285c01cb344374449e5b8 | [
"MIT"
] | 9 | 2021-08-29T03:01:28.000Z | 2022-01-28T11:57:45.000Z | examples/configs/generic_st7735/landscape_128x160.py | russhughes/lv_st7789 | aa5134383250abd7d26285c01cb344374449e5b8 | [
"MIT"
] | 1 | 2022-01-28T11:58:10.000Z | 2022-01-28T11:58:10.000Z | '''
Generic st7735 128x160 LCD module on esp32
'''
from ili9XXX import ili9341, COLOR_MODE_RGB, MADCTL_MY, MADCTL_MV
disp = ili9341(
mhz=3,
mosi=18,
clk=19,
cs=13,
dc=12,
rst=4,
power=-1,
backlight=15,
backlight_on=1,
width=128,
height=160,
colormode=COLOR_MODE_RGB,
rot=MADCTL_MY | MADCTL_MV)
| 16.571429 | 65 | 0.635057 |
128f1b4515d552527eb7dd38a8f1efede5325474 | 214,483 | py | Python | pyloxi3/loxi/of13/oxm.py | floodlight/loxigen-artifacts | 1822ec984cb6da342bbaa381677071cbbe53cee6 | [
"Apache-2.0"
] | 1 | 2017-06-01T09:41:07.000Z | 2017-06-01T09:41:07.000Z | pyloxi3/loxi/of13/oxm.py | floodlight/loxigen-artifacts | 1822ec984cb6da342bbaa381677071cbbe53cee6 | [
"Apache-2.0"
] | 2 | 2017-07-03T08:50:56.000Z | 2018-03-12T16:16:19.000Z | pyloxi3/loxi/of13/oxm.py | floodlight/loxigen-artifacts | 1822ec984cb6da342bbaa381677071cbbe53cee6 | [
"Apache-2.0"
] | 20 | 2015-02-16T15:23:04.000Z | 2022-03-15T20:06:10.000Z | # Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
from . import util
import functools
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of13']
class oxm(loxi.OFObject):
subtypes = {}
def __init__(self, type_len=None):
if type_len != None:
self.type_len = type_len
else:
self.type_len = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 0)
subclass = oxm.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = oxm()
obj.type_len = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type_len != other.type_len: return False
return True
def pretty_print(self, q):
q.text("oxm {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class arp_op(oxm):
type_len = 2147494402
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = arp_op()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147494402)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("arp_op {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147494402] = arp_op
class arp_op_masked(oxm):
type_len = 2147494660
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = arp_op_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147494660)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("arp_op_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147494660] = arp_op_masked
class arp_sha(oxm):
type_len = 2147495942
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = arp_sha()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147495942)
obj.value = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("arp_sha {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147495942] = arp_sha
class arp_sha_masked(oxm):
type_len = 2147496204
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
packed.append(struct.pack("!6B", *self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = arp_sha_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147496204)
obj.value = list(reader.read('!6B'))
obj.value_mask = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("arp_sha_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_mac(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147496204] = arp_sha_masked
class arp_spa(oxm):
type_len = 2147494916
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = arp_spa()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147494916)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("arp_spa {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147494916] = arp_spa
class arp_spa_masked(oxm):
type_len = 2147495176
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = arp_spa_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147495176)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("arp_spa_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147495176] = arp_spa_masked
class arp_tha(oxm):
type_len = 2147496454
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = arp_tha()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147496454)
obj.value = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("arp_tha {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147496454] = arp_tha
class arp_tha_masked(oxm):
type_len = 2147496716
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
packed.append(struct.pack("!6B", *self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = arp_tha_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147496716)
obj.value = list(reader.read('!6B'))
obj.value_mask = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("arp_tha_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_mac(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147496716] = arp_tha_masked
class arp_tpa(oxm):
type_len = 2147495428
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = arp_tpa()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147495428)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("arp_tpa {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147495428] = arp_tpa
class arp_tpa_masked(oxm):
type_len = 2147495688
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = arp_tpa_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147495688)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("arp_tpa_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147495688] = arp_tpa_masked
class bsn_egr_port_group_id(oxm):
type_len = 200196
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_egr_port_group_id()
_type_len = reader.read("!L")[0]
assert(_type_len == 200196)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_egr_port_group_id {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[200196] = bsn_egr_port_group_id
class bsn_egr_port_group_id_masked(oxm):
type_len = 200456
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_egr_port_group_id_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 200456)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_egr_port_group_id_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[200456] = bsn_egr_port_group_id_masked
class bsn_global_vrf_allowed(oxm):
type_len = 198145
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_global_vrf_allowed()
_type_len = reader.read("!L")[0]
assert(_type_len == 198145)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_global_vrf_allowed {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[198145] = bsn_global_vrf_allowed
class bsn_global_vrf_allowed_masked(oxm):
type_len = 198402
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_global_vrf_allowed_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 198402)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_global_vrf_allowed_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[198402] = bsn_global_vrf_allowed_masked
class bsn_in_ports_128(oxm):
type_len = 196624
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = set()
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(util.pack_bitmap_128(self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_in_ports_128()
_type_len = reader.read("!L")[0]
assert(_type_len == 196624)
obj.value = util.unpack_bitmap_128(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_in_ports_128 {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.pp(self.value)
q.breakable()
q.text('}')
oxm.subtypes[196624] = bsn_in_ports_128
class bsn_in_ports_128_masked(oxm):
type_len = 196896
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = set()
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = set()
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(util.pack_bitmap_128(self.value))
packed.append(util.pack_bitmap_128(self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_in_ports_128_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 196896)
obj.value = util.unpack_bitmap_128(reader)
obj.value_mask = util.unpack_bitmap_128(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_in_ports_128_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.pp(self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.pp(self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[196896] = bsn_in_ports_128_masked
class bsn_in_ports_512(oxm):
type_len = 206400
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = set()
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(util.pack_bitmap_512(self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_in_ports_512()
_type_len = reader.read("!L")[0]
assert(_type_len == 206400)
obj.value = util.unpack_bitmap_512(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_in_ports_512 {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.pp(self.value)
q.breakable()
q.text('}')
oxm.subtypes[206400] = bsn_in_ports_512
class bsn_in_ports_512_masked(oxm):
type_len = 206720
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = set()
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = set()
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(util.pack_bitmap_512(self.value))
packed.append(util.pack_bitmap_512(self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_in_ports_512_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 206720)
obj.value = util.unpack_bitmap_512(reader)
obj.value_mask = util.unpack_bitmap_512(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_in_ports_512_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.pp(self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.pp(self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[206720] = bsn_in_ports_512_masked
class bsn_ingress_port_group_id(oxm):
type_len = 206852
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_ingress_port_group_id()
_type_len = reader.read("!L")[0]
assert(_type_len == 206852)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_ingress_port_group_id {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[206852] = bsn_ingress_port_group_id
class bsn_ingress_port_group_id_masked(oxm):
type_len = 207112
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_ingress_port_group_id_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 207112)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_ingress_port_group_id_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[207112] = bsn_ingress_port_group_id_masked
class bsn_inner_eth_dst(oxm):
type_len = 207878
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_inner_eth_dst()
_type_len = reader.read("!L")[0]
assert(_type_len == 207878)
obj.value = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_inner_eth_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.breakable()
q.text('}')
oxm.subtypes[207878] = bsn_inner_eth_dst
class bsn_inner_eth_dst_masked(oxm):
type_len = 208140
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
packed.append(struct.pack("!6B", *self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_inner_eth_dst_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 208140)
obj.value = list(reader.read('!6B'))
obj.value_mask = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_inner_eth_dst_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_mac(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[208140] = bsn_inner_eth_dst_masked
class bsn_inner_eth_src(oxm):
type_len = 208390
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_inner_eth_src()
_type_len = reader.read("!L")[0]
assert(_type_len == 208390)
obj.value = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_inner_eth_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.breakable()
q.text('}')
oxm.subtypes[208390] = bsn_inner_eth_src
class bsn_inner_eth_src_masked(oxm):
type_len = 208652
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
packed.append(struct.pack("!6B", *self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_inner_eth_src_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 208652)
obj.value = list(reader.read('!6B'))
obj.value_mask = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_inner_eth_src_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_mac(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[208652] = bsn_inner_eth_src_masked
class bsn_inner_vlan_vid(oxm):
type_len = 208898
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_inner_vlan_vid()
_type_len = reader.read("!L")[0]
assert(_type_len == 208898)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_inner_vlan_vid {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[208898] = bsn_inner_vlan_vid
class bsn_inner_vlan_vid_masked(oxm):
type_len = 209156
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_inner_vlan_vid_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 209156)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_inner_vlan_vid_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[209156] = bsn_inner_vlan_vid_masked
class bsn_ip_fragmentation(oxm):
type_len = 209921
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_ip_fragmentation()
_type_len = reader.read("!L")[0]
assert(_type_len == 209921)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_ip_fragmentation {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[209921] = bsn_ip_fragmentation
class bsn_ip_fragmentation_masked(oxm):
type_len = 210178
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_ip_fragmentation_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 210178)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_ip_fragmentation_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[210178] = bsn_ip_fragmentation_masked
class bsn_l2_cache_hit(oxm):
type_len = 205825
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_l2_cache_hit()
_type_len = reader.read("!L")[0]
assert(_type_len == 205825)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_l2_cache_hit {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[205825] = bsn_l2_cache_hit
class bsn_l2_cache_hit_masked(oxm):
type_len = 206082
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_l2_cache_hit_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 206082)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_l2_cache_hit_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[206082] = bsn_l2_cache_hit_masked
class bsn_l3_dst_class_id(oxm):
type_len = 199684
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_l3_dst_class_id()
_type_len = reader.read("!L")[0]
assert(_type_len == 199684)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_l3_dst_class_id {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[199684] = bsn_l3_dst_class_id
class bsn_l3_dst_class_id_masked(oxm):
type_len = 199944
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_l3_dst_class_id_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 199944)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_l3_dst_class_id_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[199944] = bsn_l3_dst_class_id_masked
class bsn_l3_interface_class_id(oxm):
type_len = 198660
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_l3_interface_class_id()
_type_len = reader.read("!L")[0]
assert(_type_len == 198660)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_l3_interface_class_id {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[198660] = bsn_l3_interface_class_id
class bsn_l3_interface_class_id_masked(oxm):
type_len = 198920
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_l3_interface_class_id_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 198920)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_l3_interface_class_id_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[198920] = bsn_l3_interface_class_id_masked
class bsn_l3_src_class_id(oxm):
type_len = 199172
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_l3_src_class_id()
_type_len = reader.read("!L")[0]
assert(_type_len == 199172)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_l3_src_class_id {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[199172] = bsn_l3_src_class_id
class bsn_l3_src_class_id_masked(oxm):
type_len = 199432
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_l3_src_class_id_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 199432)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_l3_src_class_id_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[199432] = bsn_l3_src_class_id_masked
class bsn_lag_id(oxm):
type_len = 197124
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_lag_id()
_type_len = reader.read("!L")[0]
assert(_type_len == 197124)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_lag_id {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[197124] = bsn_lag_id
class bsn_lag_id_masked(oxm):
type_len = 197384
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_lag_id_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 197384)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_lag_id_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[197384] = bsn_lag_id_masked
class bsn_tcp_flags(oxm):
type_len = 204802
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_tcp_flags()
_type_len = reader.read("!L")[0]
assert(_type_len == 204802)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_tcp_flags {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[204802] = bsn_tcp_flags
class bsn_tcp_flags_masked(oxm):
type_len = 205060
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_tcp_flags_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 205060)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_tcp_flags_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[205060] = bsn_tcp_flags_masked
class bsn_udf0(oxm):
type_len = 200708
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf0()
_type_len = reader.read("!L")[0]
assert(_type_len == 200708)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_udf0 {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[200708] = bsn_udf0
class bsn_udf0_masked(oxm):
type_len = 200968
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf0_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 200968)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_udf0_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[200968] = bsn_udf0_masked
class bsn_udf1(oxm):
type_len = 201220
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf1()
_type_len = reader.read("!L")[0]
assert(_type_len == 201220)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_udf1 {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[201220] = bsn_udf1
class bsn_udf1_masked(oxm):
type_len = 201480
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf1_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 201480)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_udf1_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[201480] = bsn_udf1_masked
class bsn_udf2(oxm):
type_len = 201732
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf2()
_type_len = reader.read("!L")[0]
assert(_type_len == 201732)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_udf2 {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[201732] = bsn_udf2
class bsn_udf2_masked(oxm):
type_len = 201992
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf2_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 201992)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_udf2_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[201992] = bsn_udf2_masked
class bsn_udf3(oxm):
type_len = 202244
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf3()
_type_len = reader.read("!L")[0]
assert(_type_len == 202244)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_udf3 {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[202244] = bsn_udf3
class bsn_udf3_masked(oxm):
type_len = 202504
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf3_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 202504)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_udf3_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[202504] = bsn_udf3_masked
class bsn_udf4(oxm):
type_len = 202756
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf4()
_type_len = reader.read("!L")[0]
assert(_type_len == 202756)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_udf4 {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[202756] = bsn_udf4
class bsn_udf4_masked(oxm):
type_len = 203016
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf4_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 203016)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_udf4_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[203016] = bsn_udf4_masked
class bsn_udf5(oxm):
type_len = 203268
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf5()
_type_len = reader.read("!L")[0]
assert(_type_len == 203268)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_udf5 {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[203268] = bsn_udf5
class bsn_udf5_masked(oxm):
type_len = 203528
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf5_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 203528)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_udf5_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[203528] = bsn_udf5_masked
class bsn_udf6(oxm):
type_len = 203780
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf6()
_type_len = reader.read("!L")[0]
assert(_type_len == 203780)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_udf6 {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[203780] = bsn_udf6
class bsn_udf6_masked(oxm):
type_len = 204040
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf6_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 204040)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_udf6_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[204040] = bsn_udf6_masked
class bsn_udf7(oxm):
type_len = 204292
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf7()
_type_len = reader.read("!L")[0]
assert(_type_len == 204292)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_udf7 {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[204292] = bsn_udf7
class bsn_udf7_masked(oxm):
type_len = 204552
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_udf7_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 204552)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_udf7_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[204552] = bsn_udf7_masked
class bsn_vfi(oxm):
type_len = 209410
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_vfi()
_type_len = reader.read("!L")[0]
assert(_type_len == 209410)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_vfi {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[209410] = bsn_vfi
class bsn_vfi_masked(oxm):
type_len = 209668
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_vfi_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 209668)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_vfi_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[209668] = bsn_vfi_masked
class bsn_vlan_xlate_port_group_id(oxm):
type_len = 205316
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_vlan_xlate_port_group_id()
_type_len = reader.read("!L")[0]
assert(_type_len == 205316)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_vlan_xlate_port_group_id {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[205316] = bsn_vlan_xlate_port_group_id
class bsn_vlan_xlate_port_group_id_masked(oxm):
type_len = 205576
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_vlan_xlate_port_group_id_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 205576)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_vlan_xlate_port_group_id_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[205576] = bsn_vlan_xlate_port_group_id_masked
class bsn_vrf(oxm):
type_len = 197636
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_vrf()
_type_len = reader.read("!L")[0]
assert(_type_len == 197636)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_vrf {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[197636] = bsn_vrf
class bsn_vrf_masked(oxm):
type_len = 197896
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_vrf_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 197896)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_vrf_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[197896] = bsn_vrf_masked
class bsn_vxlan_network_id(oxm):
type_len = 207364
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_vxlan_network_id()
_type_len = reader.read("!L")[0]
assert(_type_len == 207364)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("bsn_vxlan_network_id {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[207364] = bsn_vxlan_network_id
class bsn_vxlan_network_id_masked(oxm):
type_len = 207624
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = bsn_vxlan_network_id_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 207624)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("bsn_vxlan_network_id_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[207624] = bsn_vxlan_network_id_masked
class conn_tracking_ipv6_dst(oxm):
type_len = 128528
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!16s", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_ipv6_dst()
_type_len = reader.read("!L")[0]
assert(_type_len == 128528)
obj.value = reader.read('!16s')[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_ipv6_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv6(self.value))
q.breakable()
q.text('}')
oxm.subtypes[128528] = conn_tracking_ipv6_dst
class conn_tracking_ipv6_dst_masked(oxm):
type_len = 128800
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!16s", self.value))
packed.append(struct.pack("!16s", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_ipv6_dst_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 128800)
obj.value = reader.read('!16s')[0]
obj.value_mask = reader.read('!16s')[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_ipv6_dst_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv6(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_ipv6(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[128800] = conn_tracking_ipv6_dst_masked
class conn_tracking_ipv6_src(oxm):
type_len = 128016
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!16s", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_ipv6_src()
_type_len = reader.read("!L")[0]
assert(_type_len == 128016)
obj.value = reader.read('!16s')[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_ipv6_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv6(self.value))
q.breakable()
q.text('}')
oxm.subtypes[128016] = conn_tracking_ipv6_src
class conn_tracking_ipv6_src_masked(oxm):
type_len = 128288
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!16s", self.value))
packed.append(struct.pack("!16s", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_ipv6_src_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 128288)
obj.value = reader.read('!16s')[0]
obj.value_mask = reader.read('!16s')[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_ipv6_src_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv6(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_ipv6(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[128288] = conn_tracking_ipv6_src_masked
class conn_tracking_label(oxm):
type_len = 120848
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = loxi.unimplemented('init uint128_t')
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(loxi.unimplemented('pack uint128_t'))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_label()
_type_len = reader.read("!L")[0]
assert(_type_len == 120848)
obj.value = loxi.unimplemented('unpack uint128_t')
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_label {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[120848] = conn_tracking_label
class conn_tracking_label_masked(oxm):
type_len = 121120
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = loxi.unimplemented('init uint128_t')
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = loxi.unimplemented('init uint128_t')
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(loxi.unimplemented('pack uint128_t'))
packed.append(loxi.unimplemented('pack uint128_t'))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_label_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 121120)
obj.value = loxi.unimplemented('unpack uint128_t')
obj.value_mask = loxi.unimplemented('unpack uint128_t')
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_label_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[121120] = conn_tracking_label_masked
class conn_tracking_mark(oxm):
type_len = 120324
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_mark()
_type_len = reader.read("!L")[0]
assert(_type_len == 120324)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_mark {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[120324] = conn_tracking_mark
class conn_tracking_mark_masked(oxm):
type_len = 120584
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_mark_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 120584)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_mark_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[120584] = conn_tracking_mark_masked
class conn_tracking_nw_dst(oxm):
type_len = 127492
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_nw_dst()
_type_len = reader.read("!L")[0]
assert(_type_len == 127492)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_nw_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[127492] = conn_tracking_nw_dst
class conn_tracking_nw_dst_masked(oxm):
type_len = 127752
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_nw_dst_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 127752)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_nw_dst_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[127752] = conn_tracking_nw_dst_masked
class conn_tracking_nw_proto(oxm):
type_len = 126465
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_nw_proto()
_type_len = reader.read("!L")[0]
assert(_type_len == 126465)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_nw_proto {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[126465] = conn_tracking_nw_proto
class conn_tracking_nw_proto_masked(oxm):
type_len = 126722
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_nw_proto_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 126722)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_nw_proto_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[126722] = conn_tracking_nw_proto_masked
class conn_tracking_nw_src(oxm):
type_len = 126980
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_nw_src()
_type_len = reader.read("!L")[0]
assert(_type_len == 126980)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_nw_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[126980] = conn_tracking_nw_src
class conn_tracking_nw_src_masked(oxm):
type_len = 127240
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_nw_src_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 127240)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_nw_src_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[127240] = conn_tracking_nw_src_masked
class conn_tracking_state(oxm):
type_len = 119300
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_state()
_type_len = reader.read("!L")[0]
assert(_type_len == 119300)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_state {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[119300] = conn_tracking_state
class conn_tracking_state_masked(oxm):
type_len = 119560
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_state_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 119560)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_state_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[119560] = conn_tracking_state_masked
class conn_tracking_tp_dst(oxm):
type_len = 129538
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_tp_dst()
_type_len = reader.read("!L")[0]
assert(_type_len == 129538)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_tp_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[129538] = conn_tracking_tp_dst
class conn_tracking_tp_dst_masked(oxm):
type_len = 129796
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_tp_dst_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 129796)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_tp_dst_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[129796] = conn_tracking_tp_dst_masked
class conn_tracking_tp_src(oxm):
type_len = 129026
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_tp_src()
_type_len = reader.read("!L")[0]
assert(_type_len == 129026)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_tp_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[129026] = conn_tracking_tp_src
class conn_tracking_tp_src_masked(oxm):
type_len = 129284
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_tp_src_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 129284)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_tp_src_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[129284] = conn_tracking_tp_src_masked
class conn_tracking_zone(oxm):
type_len = 119810
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_zone()
_type_len = reader.read("!L")[0]
assert(_type_len == 119810)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_zone {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[119810] = conn_tracking_zone
class conn_tracking_zone_masked(oxm):
type_len = 120068
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = conn_tracking_zone_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 120068)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("conn_tracking_zone_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[120068] = conn_tracking_zone_masked
class eth_dst(oxm):
type_len = 2147485190
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = eth_dst()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147485190)
obj.value = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("eth_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147485190] = eth_dst
class eth_dst_masked(oxm):
type_len = 2147485452
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
packed.append(struct.pack("!6B", *self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = eth_dst_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147485452)
obj.value = list(reader.read('!6B'))
obj.value_mask = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("eth_dst_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_mac(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147485452] = eth_dst_masked
class eth_src(oxm):
type_len = 2147485702
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = eth_src()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147485702)
obj.value = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("eth_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147485702] = eth_src
class eth_src_masked(oxm):
type_len = 2147485964
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
packed.append(struct.pack("!6B", *self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = eth_src_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147485964)
obj.value = list(reader.read('!6B'))
obj.value_mask = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("eth_src_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_mac(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147485964] = eth_src_masked
class eth_type(oxm):
type_len = 2147486210
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = eth_type()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147486210)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("eth_type {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147486210] = eth_type
class eth_type_masked(oxm):
type_len = 2147486468
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = eth_type_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147486468)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("eth_type_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147486468] = eth_type_masked
class icmpv4_code(oxm):
type_len = 2147493889
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = icmpv4_code()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147493889)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("icmpv4_code {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147493889] = icmpv4_code
class icmpv4_code_masked(oxm):
type_len = 2147494146
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = icmpv4_code_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147494146)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("icmpv4_code_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147494146] = icmpv4_code_masked
class icmpv4_type(oxm):
type_len = 2147493377
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = icmpv4_type()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147493377)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("icmpv4_type {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147493377] = icmpv4_type
class icmpv4_type_masked(oxm):
type_len = 2147493634
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = icmpv4_type_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147493634)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("icmpv4_type_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147493634] = icmpv4_type_masked
class icmpv6_code(oxm):
type_len = 2147499009
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = icmpv6_code()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147499009)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("icmpv6_code {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147499009] = icmpv6_code
class icmpv6_code_masked(oxm):
type_len = 2147499266
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = icmpv6_code_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147499266)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("icmpv6_code_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147499266] = icmpv6_code_masked
class icmpv6_type(oxm):
type_len = 2147498497
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = icmpv6_type()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147498497)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("icmpv6_type {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147498497] = icmpv6_type
class icmpv6_type_masked(oxm):
type_len = 2147498754
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = icmpv6_type_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147498754)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("icmpv6_type_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147498754] = icmpv6_type_masked
class in_phy_port(oxm):
type_len = 2147484164
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(util.pack_port_no(self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = in_phy_port()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147484164)
obj.value = util.unpack_port_no(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("in_phy_port {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_port(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147484164] = in_phy_port
class in_phy_port_masked(oxm):
type_len = 2147484424
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(util.pack_port_no(self.value))
packed.append(util.pack_port_no(self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = in_phy_port_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147484424)
obj.value = util.unpack_port_no(reader)
obj.value_mask = util.unpack_port_no(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("in_phy_port_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_port(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_port(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147484424] = in_phy_port_masked
class in_port(oxm):
type_len = 2147483652
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(util.pack_port_no(self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = in_port()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147483652)
obj.value = util.unpack_port_no(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("in_port {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_port(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147483652] = in_port
class in_port_masked(oxm):
type_len = 2147483912
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(util.pack_port_no(self.value))
packed.append(util.pack_port_no(self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = in_port_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147483912)
obj.value = util.unpack_port_no(reader)
obj.value_mask = util.unpack_port_no(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("in_port_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_port(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_port(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147483912] = in_port_masked
class ip_dscp(oxm):
type_len = 2147487745
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ip_dscp()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147487745)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ip_dscp {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147487745] = ip_dscp
class ip_dscp_masked(oxm):
type_len = 2147488002
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ip_dscp_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147488002)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ip_dscp_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147488002] = ip_dscp_masked
class ip_ecn(oxm):
type_len = 2147488257
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ip_ecn()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147488257)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ip_ecn {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147488257] = ip_ecn
class ip_ecn_masked(oxm):
type_len = 2147488514
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ip_ecn_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147488514)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ip_ecn_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147488514] = ip_ecn_masked
class ip_proto(oxm):
type_len = 2147488769
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ip_proto()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147488769)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ip_proto {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147488769] = ip_proto
class ip_proto_masked(oxm):
type_len = 2147489026
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ip_proto_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147489026)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ip_proto_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147489026] = ip_proto_masked
class ipv4_dst(oxm):
type_len = 2147489796
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv4_dst()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147489796)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ipv4_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv4(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147489796] = ipv4_dst
class ipv4_dst_masked(oxm):
type_len = 2147490056
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv4_dst_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147490056)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ipv4_dst_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv4(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_ipv4(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147490056] = ipv4_dst_masked
class ipv4_src(oxm):
type_len = 2147489284
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv4_src()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147489284)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ipv4_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv4(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147489284] = ipv4_src
class ipv4_src_masked(oxm):
type_len = 2147489544
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv4_src_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147489544)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ipv4_src_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv4(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_ipv4(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147489544] = ipv4_src_masked
class ipv6_dst(oxm):
type_len = 2147497488
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!16s", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_dst()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147497488)
obj.value = reader.read('!16s')[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ipv6_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv6(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147497488] = ipv6_dst
class ipv6_dst_masked(oxm):
type_len = 2147497760
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!16s", self.value))
packed.append(struct.pack("!16s", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_dst_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147497760)
obj.value = reader.read('!16s')[0]
obj.value_mask = reader.read('!16s')[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ipv6_dst_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv6(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_ipv6(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147497760] = ipv6_dst_masked
class ipv6_exthdr(oxm):
type_len = 2147503618
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_exthdr()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147503618)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ipv6_exthdr {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147503618] = ipv6_exthdr
class ipv6_exthdr_masked(oxm):
type_len = 2147503876
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_exthdr_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147503876)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ipv6_exthdr_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147503876] = ipv6_exthdr_masked
class ipv6_flabel(oxm):
type_len = 2147497988
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_flabel()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147497988)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ipv6_flabel {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147497988] = ipv6_flabel
class ipv6_flabel_masked(oxm):
type_len = 2147498248
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_flabel_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147498248)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ipv6_flabel_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147498248] = ipv6_flabel_masked
class ipv6_nd_sll(oxm):
type_len = 2147500038
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_nd_sll()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147500038)
obj.value = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ipv6_nd_sll {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147500038] = ipv6_nd_sll
class ipv6_nd_sll_masked(oxm):
type_len = 2147500300
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
packed.append(struct.pack("!6B", *self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_nd_sll_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147500300)
obj.value = list(reader.read('!6B'))
obj.value_mask = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ipv6_nd_sll_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_mac(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147500300] = ipv6_nd_sll_masked
class ipv6_nd_target(oxm):
type_len = 2147499536
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!16s", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_nd_target()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147499536)
obj.value = reader.read('!16s')[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ipv6_nd_target {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv6(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147499536] = ipv6_nd_target
class ipv6_nd_target_masked(oxm):
type_len = 2147499808
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!16s", self.value))
packed.append(struct.pack("!16s", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_nd_target_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147499808)
obj.value = reader.read('!16s')[0]
obj.value_mask = reader.read('!16s')[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ipv6_nd_target_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv6(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_ipv6(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147499808] = ipv6_nd_target_masked
class ipv6_nd_tll(oxm):
type_len = 2147500550
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_nd_tll()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147500550)
obj.value = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ipv6_nd_tll {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147500550] = ipv6_nd_tll
class ipv6_nd_tll_masked(oxm):
type_len = 2147500812
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = [0,0,0,0,0,0]
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!6B", *self.value))
packed.append(struct.pack("!6B", *self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_nd_tll_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147500812)
obj.value = list(reader.read('!6B'))
obj.value_mask = list(reader.read('!6B'))
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ipv6_nd_tll_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_mac(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_mac(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147500812] = ipv6_nd_tll_masked
class ipv6_src(oxm):
type_len = 2147496976
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!16s", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_src()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147496976)
obj.value = reader.read('!16s')[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ipv6_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv6(self.value))
q.breakable()
q.text('}')
oxm.subtypes[2147496976] = ipv6_src
class ipv6_src_masked(oxm):
type_len = 2147497248
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!16s", self.value))
packed.append(struct.pack("!16s", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ipv6_src_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147497248)
obj.value = reader.read('!16s')[0]
obj.value_mask = reader.read('!16s')[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ipv6_src_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv6(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_ipv6(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[2147497248] = ipv6_src_masked
class metadata(oxm):
type_len = 2147484680
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!Q", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = metadata()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147484680)
obj.value = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("metadata {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147484680] = metadata
class metadata_masked(oxm):
type_len = 2147484944
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!Q", self.value))
packed.append(struct.pack("!Q", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = metadata_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147484944)
obj.value = reader.read("!Q")[0]
obj.value_mask = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("metadata_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147484944] = metadata_masked
class mpls_bos(oxm):
type_len = 2147502081
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = mpls_bos()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147502081)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("mpls_bos {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147502081] = mpls_bos
class mpls_bos_masked(oxm):
type_len = 2147502338
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = mpls_bos_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147502338)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("mpls_bos_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147502338] = mpls_bos_masked
class mpls_label(oxm):
type_len = 2147501060
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = mpls_label()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147501060)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("mpls_label {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147501060] = mpls_label
class mpls_label_masked(oxm):
type_len = 2147501320
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = mpls_label_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147501320)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("mpls_label_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147501320] = mpls_label_masked
class mpls_tc(oxm):
type_len = 2147501569
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = mpls_tc()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147501569)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("mpls_tc {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147501569] = mpls_tc
class mpls_tc_masked(oxm):
type_len = 2147501826
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = mpls_tc_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147501826)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("mpls_tc_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147501826] = mpls_tc_masked
class ovs_tcp_flags(oxm):
type_len = 4294923270
experimenter_id = 1330529792
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.experimenter_id))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ovs_tcp_flags()
_type_len = reader.read("!L")[0]
assert(_type_len == 4294923270)
_experimenter_id = reader.read("!L")[0]
assert(_experimenter_id == 1330529792)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("ovs_tcp_flags {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[4294923270] = ovs_tcp_flags
class ovs_tcp_flags_masked(oxm):
type_len = 4294923528
experimenter_id = 1330529792
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.experimenter_id))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = ovs_tcp_flags_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 4294923528)
_experimenter_id = reader.read("!L")[0]
assert(_experimenter_id == 1330529792)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("ovs_tcp_flags_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[4294923528] = ovs_tcp_flags_masked
class sctp_dst(oxm):
type_len = 2147492866
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = sctp_dst()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147492866)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("sctp_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147492866] = sctp_dst
class sctp_dst_masked(oxm):
type_len = 2147493124
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = sctp_dst_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147493124)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("sctp_dst_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147493124] = sctp_dst_masked
class sctp_src(oxm):
type_len = 2147492354
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = sctp_src()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147492354)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("sctp_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147492354] = sctp_src
class sctp_src_masked(oxm):
type_len = 2147492612
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = sctp_src_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147492612)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("sctp_src_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147492612] = sctp_src_masked
class tcp_dst(oxm):
type_len = 2147490818
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = tcp_dst()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147490818)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("tcp_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147490818] = tcp_dst
class tcp_dst_masked(oxm):
type_len = 2147491076
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = tcp_dst_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147491076)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("tcp_dst_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147491076] = tcp_dst_masked
class tcp_src(oxm):
type_len = 2147490306
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = tcp_src()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147490306)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("tcp_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147490306] = tcp_src
class tcp_src_masked(oxm):
type_len = 2147490564
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = tcp_src_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147490564)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("tcp_src_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147490564] = tcp_src_masked
class tunnel_id(oxm):
type_len = 2147503112
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!Q", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = tunnel_id()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147503112)
obj.value = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("tunnel_id {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147503112] = tunnel_id
class tunnel_id_masked(oxm):
type_len = 2147503376
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!Q", self.value))
packed.append(struct.pack("!Q", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = tunnel_id_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147503376)
obj.value = reader.read("!Q")[0]
obj.value_mask = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("tunnel_id_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147503376] = tunnel_id_masked
class tunnel_ipv4_dst(oxm):
type_len = 81924
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = tunnel_ipv4_dst()
_type_len = reader.read("!L")[0]
assert(_type_len == 81924)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("tunnel_ipv4_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv4(self.value))
q.breakable()
q.text('}')
oxm.subtypes[81924] = tunnel_ipv4_dst
class tunnel_ipv4_dst_masked(oxm):
type_len = 82184
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = tunnel_ipv4_dst_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 82184)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("tunnel_ipv4_dst_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv4(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_ipv4(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[82184] = tunnel_ipv4_dst_masked
class tunnel_ipv4_src(oxm):
type_len = 81412
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = tunnel_ipv4_src()
_type_len = reader.read("!L")[0]
assert(_type_len == 81412)
obj.value = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("tunnel_ipv4_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv4(self.value))
q.breakable()
q.text('}')
oxm.subtypes[81412] = tunnel_ipv4_src
class tunnel_ipv4_src_masked(oxm):
type_len = 81672
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!L", self.value))
packed.append(struct.pack("!L", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = tunnel_ipv4_src_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 81672)
obj.value = reader.read("!L")[0]
obj.value_mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("tunnel_ipv4_src_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text(util.pretty_ipv4(self.value))
q.text(","); q.breakable()
q.text("value_mask = ");
q.text(util.pretty_ipv4(self.value_mask))
q.breakable()
q.text('}')
oxm.subtypes[81672] = tunnel_ipv4_src_masked
class udp_dst(oxm):
type_len = 2147491842
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = udp_dst()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147491842)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("udp_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147491842] = udp_dst
class udp_dst_masked(oxm):
type_len = 2147492100
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = udp_dst_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147492100)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("udp_dst_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147492100] = udp_dst_masked
class udp_src(oxm):
type_len = 2147491330
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = udp_src()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147491330)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("udp_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147491330] = udp_src
class udp_src_masked(oxm):
type_len = 2147491588
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = udp_src_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147491588)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("udp_src_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147491588] = udp_src_masked
class vlan_pcp(oxm):
type_len = 2147487233
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = vlan_pcp()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147487233)
obj.value = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("vlan_pcp {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147487233] = vlan_pcp
class vlan_pcp_masked(oxm):
type_len = 2147487490
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!B", self.value))
packed.append(struct.pack("!B", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = vlan_pcp_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147487490)
obj.value = reader.read("!B")[0]
obj.value_mask = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("vlan_pcp_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147487490] = vlan_pcp_masked
class vlan_vid(oxm):
type_len = 2147486722
def __init__(self, value=None):
if value != None:
self.value = value
else:
self.value = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = vlan_vid()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147486722)
obj.value = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
return True
def pretty_print(self, q):
q.text("vlan_vid {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.breakable()
q.text('}')
oxm.subtypes[2147486722] = vlan_vid
class vlan_vid_masked(oxm):
type_len = 2147486980
def __init__(self, value=None, value_mask=None):
if value != None:
self.value = value
else:
self.value = 0
if value_mask != None:
self.value_mask = value_mask
else:
self.value_mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!L", self.type_len))
packed.append(struct.pack("!H", self.value))
packed.append(struct.pack("!H", self.value_mask))
return functools.reduce(lambda x,y: x+y, packed)
@staticmethod
def unpack(reader):
obj = vlan_vid_masked()
_type_len = reader.read("!L")[0]
assert(_type_len == 2147486980)
obj.value = reader.read("!H")[0]
obj.value_mask = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.value != other.value: return False
if self.value_mask != other.value_mask: return False
return True
def pretty_print(self, q):
q.text("vlan_vid_masked {")
with q.group():
with q.indent(2):
q.breakable()
q.text("value = ");
q.text("%#x" % self.value)
q.text(","); q.breakable()
q.text("value_mask = ");
q.text("%#x" % self.value_mask)
q.breakable()
q.text('}')
oxm.subtypes[2147486980] = vlan_vid_masked
| 28.874933 | 97 | 0.537931 |
83a72f7bbb0b240d4a3a8ca36fb888060acc1d89 | 1,512 | py | Python | bitmovin/resources/models/filters/audio_mix_channel.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 44 | 2016-12-12T17:37:23.000Z | 2021-03-03T09:48:48.000Z | bitmovin/resources/models/filters/audio_mix_channel.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 38 | 2017-01-09T14:45:45.000Z | 2022-02-27T18:04:33.000Z | bitmovin/resources/models/filters/audio_mix_channel.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 27 | 2017-02-02T22:49:31.000Z | 2019-11-21T07:04:57.000Z | from bitmovin.errors import InvalidTypeError
from bitmovin.utils import Serializable
from . import AudioMixSourceChannel
class AudioMixChannel(Serializable):
def __init__(self, channel_number, source_channels):
super().__init__()
self._source_channels = None
self.channelNumber = channel_number
self.source_channels = source_channels
@property
def source_channels(self):
return self._source_channels
@source_channels.setter
def source_channels(self, new_value):
if new_value is None:
return
if not isinstance(new_value, list):
raise InvalidTypeError('source_channels has to be a list of AudioMixSourceChannel enums')
if all(isinstance(output, AudioMixSourceChannel) for output in new_value):
source_channels = []
for item in new_value:
source_channels.append(item)
self._source_channels = source_channels
else:
self._source_channels = new_value
def serialize(self):
serialized = super().serialize()
serialized['sourceChannels'] = self.source_channels
return serialized
@classmethod
def parse_from_json_object(cls, json_object):
channel_number = json_object.get('channelNumber')
source_channels = json_object.get('sourceChannels')
audio_mix_channel = AudioMixChannel(channel_number=channel_number, source_channels=source_channels)
return audio_mix_channel
| 33.6 | 107 | 0.696429 |
c83cd140584c07fff0f189b6fafd222d971738e7 | 2,280 | py | Python | graph/cycle.py | gate-lab/SymbiSATtack | cca7ef6b3768715d6c91ecbe621ac4ef6b379f7e | [
"BSD-3-Clause"
] | 10 | 2021-05-21T19:54:25.000Z | 2022-01-20T19:41:37.000Z | graph/cycle.py | gate-lab/SymbiSATtack | cca7ef6b3768715d6c91ecbe621ac4ef6b379f7e | [
"BSD-3-Clause"
] | 2 | 2021-09-07T14:02:00.000Z | 2022-01-27T18:40:47.000Z | graph/cycle.py | gate-lab/SymbiSATtack | cca7ef6b3768715d6c91ecbe621ac4ef6b379f7e | [
"BSD-3-Clause"
] | 6 | 2021-06-23T19:26:10.000Z | 2022-03-17T13:54:00.000Z | from datetime import datetime
import logging
from networkx.utils import *
from graph_tool import Graph, topology
from dateutil.relativedelta import relativedelta
cnt = 0
def diff(t_a, t_b):
t_diff = relativedelta(t_b, t_a) # later/end time comes first!
return '{h}h {m}m {s}s'.format(h=t_diff.hours, m=t_diff.minutes, s=t_diff.seconds)
def get_cyclic_cone(wire_in, fanin_cone):
if wire_in.type != "inp":
if wire_in not in fanin_cone:
fanin_cone.add(wire_in)
for i in range(len(wire_in.operands)):
get_cyclic_cone(wire_in.operands[i], fanin_cone)
def find_cycles(args, wires):
# implemented with networkx
G = nx.DiGraph()
lst = []
# for i in range(0, len(wires)):
# lst.append(i)
# G.add_nodes_from(lst)
for w in wires:
if wires[w].type != "inp":
for j in range(len(wires[w].operands)):
G.add_edges_from(zip([wires[w].operands[j]], [w]))
cycles = list(nx.simple_cycles(G))
logging.warning("there are {} cycles".format(len(cycles)))
if args.p > 0:
logging.info("list of cycles:")
for cycle in cycles:
tmp = ""
for c in cycle:
tmp += c + " "
print(tmp)
return cycles
def find_cycles2(args, wires):
# implemented with graph-tools
g = Graph()
t_a = datetime.now()
lst = []
for i in range(0, len(wires)):
lst.append(g.add_vertex())
for i in range(0, len(wires)):
if wires[i].type != "inp":
for j in range(len(wires[i].operands)):
g.add_edge(lst[wires[i].operands[j].index], lst[wires[i].index])
cycles = []
for c in all_circuits(g):
if len(cycles) > 100000:
logging.info("number of cycles is limited.")
break
cycles.append(c.tolist())
t_b = datetime.now()
logging.info("time of finding cycles: " + diff(t_a, t_b))
logging.info("there are" + str(len(cycles)) + "cycles")
if args.p:
logging.info("list of cycles:")
for cycle in cycles:
tmp = ""
for i in range(len(cycle)):
tmp += wires[cycle[i]].name + " "
logging.info(tmp)
print()
return cycles
| 27.804878 | 86 | 0.57193 |
0a18541f54288fb90b18f0fbea3ca9f020b5351c | 9,932 | py | Python | unsupervised_text_generation/instructor/real_data/trgan_instructor.py | Holmeswww/Improving-GAN-Training-with-Probability-Ratio-Clipping-and-Sample-Reweighting | 09331da364ff19f25155ca5a6f9c7a6517b790f6 | [
"MIT"
] | 25 | 2020-07-01T05:58:27.000Z | 2022-01-19T09:09:21.000Z | unsupervised_text_generation/instructor/real_data/trgan_instructor.py | Holmeswww/Improving-GAN-Training-with-Probability-Ratio-Clipping-and-Sample-Reweighting | 09331da364ff19f25155ca5a6f9c7a6517b790f6 | [
"MIT"
] | 2 | 2021-02-07T08:20:31.000Z | 2021-03-12T12:58:33.000Z | unsupervised_text_generation/instructor/real_data/trgan_instructor.py | Holmeswww/Improving-GAN-Training-with-Probability-Ratio-Clipping-and-Sample-Reweighting | 09331da364ff19f25155ca5a6f9c7a6517b790f6 | [
"MIT"
] | 5 | 2020-06-26T13:15:08.000Z | 2022-03-28T07:39:55.000Z | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : relgan_instructor.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import config as cfg
from instructor.train_data.instructor import BasicInstructor
from models.RelGAN_D import RelGAN_D
from models.RelGAN_G import RelGAN_G
from utils.helpers import get_fixed_temperature, get_losses
from torch import autograd
import os
class TRGANInstructor(BasicInstructor):
def __init__(self, opt):
super(TRGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen = RelGAN_G(cfg.mem_slots, cfg.num_heads, cfg.head_size, cfg.gen_embed_dim, cfg.gen_hidden_dim,
cfg.vocab_size, cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)
self.dis = RelGAN_D(cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.vocab_size, cfg.padding_idx,
gpu=cfg.CUDA)
self.dis_D = RelGAN_D(cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.vocab_size, cfg.padding_idx,
gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_adv_lr)
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
self.dis_D_opt = optim.Adam(self.dis_D.parameters(), lr=cfg.dis_D_lr)
def init_model(self):
if cfg.oracle_pretrain:
if not os.path.exists(cfg.oracle_state_dict_path):
create_oracle()
self.oracle.load_state_dict(torch.load(cfg.oracle_state_dict_path))
if cfg.dis_pretrain:
self.log.info(
'Load pretrained discriminator: {}'.format(cfg.pretrained_dis_path))
self.dis.load_state_dict(torch.load(cfg.pretrained_dis_path))
if cfg.gen_pretrain:
self.log.info('Load MLE pretrained generator gen: {}'.format(cfg.pretrained_gen_path))
self.gen.load_state_dict(torch.load(cfg.pretrained_gen_path, map_location='cuda:{}'.format(cfg.device)))
if cfg.CUDA:
self.oracle = self.oracle.cuda()
self.gen = self.gen.cuda()
self.dis = self.dis.cuda()
self.dis_D = self.dis_D.cuda()
def _run(self):
# ===PRE-TRAINING (GENERATOR)===
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path))
# # ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
progress = tqdm(range(cfg.ADV_train_epoch))
for adv_epoch in progress:
self.sig.update()
if self.sig.adv_sig:
g_loss = self.adv_train_generator(cfg.ADV_g_step) # Generator
d_loss = self.adv_train_discriminator(cfg.ADV_d_step) # Discriminator
self.update_temperature(adv_epoch, cfg.ADV_train_epoch) # update temperature
progress.set_description(
'g_loss: %.4f, d_loss: %.4f, temperature: %.4f' % (g_loss, d_loss, self.gen.temperature))
# TEST
if adv_epoch % cfg.adv_log_step == 0:
self.log.info('[ADV] epoch %d: g_loss: %.4f, d_loss: %.4f, %s' % (
adv_epoch, g_loss, d_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
progress.close()
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
# ===Train===
pre_loss = self.train_gen_epoch(self.gen, self.train_data.loader, self.mle_criterion, self.gen_opt)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info(
'[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (epoch, pre_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
criterion = nn.BCELoss()
total_loss = 0
with torch.no_grad():
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
if cfg.CUDA:
gen_samples = gen_samples.cuda()
D0 = torch.sigmoid(self.dis_D(gen_samples))
P0 = (1.-D0)/torch.clamp(D0, min = 1e-7)
for step in range(g_step):
real_samples = self.train_data.random_batch()['target']
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
real_label = torch.full((D0.shape[0],), 1.)
fake_label = torch.full((D0.shape[0],), 0.)
if cfg.CUDA:
real_samples, gen_samples, real_label, fake_label = real_samples.cuda(), gen_samples.cuda(), real_label.cuda(), fake_label.cuda()
# print(self.dis_D(real_samples).shape, real_label.shape)
errDD_real = criterion(torch.sigmoid(self.dis_D(real_samples)), real_label)
errDD_fake = criterion(torch.sigmoid(self.dis_D(gen_samples.detach())), fake_label)
self.optimize(self.dis_D_opt, errDD_real+errDD_fake, self.dis_D)
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True).cuda()
real_samples = F.one_hot(self.train_data.random_batch()['target'], cfg.vocab_size).float().cuda()
D1 = torch.sigmoid(self.dis_D(gen_samples))
P1 = (1.-D1)
ratio = (P1/torch.clamp(D1*P0, min = 1e-7))
ratio_clipped = torch.clamp(ratio, 1.0 - cfg.clip_param, 1.0 + cfg.clip_param)
# ===Train===
d_out_real = self.dis(real_samples)
d_out_fake = self.dis(gen_samples)
surr1 = ratio * d_out_fake
surr2 = ratio_clipped * d_out_fake
target = torch.where(d_out_fake>0, torch.min(surr1, surr2), torch.max(surr1, surr2))
g_loss, _ = get_losses(d_out_real, target, cfg.loss_type)
# g_loss = -d_out_fake.mean()
self.optimize(self.gen_adv_opt, g_loss, self.gen)
total_loss += g_loss.item()
return total_loss / g_step if g_step != 0 else 0
def calc_gradient_penalty(self, real_data, fake_data):
BATCH_SIZE = real_data.shape[0]
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(BATCH_SIZE, real_data.nelement()//BATCH_SIZE).contiguous().view(real_data.shape)
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
# disc_interpolates = netD(interpolates)
disc_interpolates = self.dis(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.contiguous().view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def adv_train_discriminator(self, d_step):
total_loss = 0
for step in range(d_step):
real_samples = self.train_data.random_batch()['target']
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
if cfg.CUDA:
real_samples, gen_samples = real_samples.cuda(), gen_samples.cuda()
# ===Train===
d_out_real = self.dis(real_samples)
d_out_fake = self.dis(gen_samples)
_, d_loss = get_losses(d_out_real, d_out_fake, cfg.loss_type)
if cfg.GP:
gradient_penalty = self.calc_gradient_penalty(real_samples.data, gen_samples.data)
d_loss = d_loss+cfg.LAMBDA*gradient_penalty
# print(d_loss.shape)
self.optimize(self.dis_opt, d_loss, self.dis)
total_loss += d_loss.item()
return total_loss / d_step if d_step != 0 else 0
def update_temperature(self, i, N):
self.gen.temperature = get_fixed_temperature(cfg.temperature, i, N, cfg.temp_adpt)
@staticmethod
def optimize(opt, loss, model=None, retain_graph=False):
"""Add clip_grad_norm_"""
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
if model is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip_norm)
opt.step()
| 44.738739 | 146 | 0.594543 |
43864f6e5d34382265e7853dc6bfb908b06a9593 | 30,586 | py | Python | baselines/run.py | jbreeding28/baselines | 517dc2e0d0c98b8b70c483f5ad1aaf5984f4c4b7 | [
"MIT"
] | null | null | null | baselines/run.py | jbreeding28/baselines | 517dc2e0d0c98b8b70c483f5ad1aaf5984f4c4b7 | [
"MIT"
] | null | null | null | baselines/run.py | jbreeding28/baselines | 517dc2e0d0c98b8b70c483f5ad1aaf5984f4c4b7 | [
"MIT"
] | null | null | null | import sys
import re
import multiprocessing
import os
import os.path as osp
import gym
import gc
import cloudpickle
from collections import defaultdict
import tensorflow as tf
import numpy as np
import datetime
import matplotlib.pyplot as plt
import time
import random
import cv2
cv2.ocl.setUseOpenCL(False)
from baselines.common.atari_wrappers import *
import matplotlib
import csv
from baselines.common.vec_env import VecFrameStack, VecNormalize, VecEnv
from baselines.common.vec_env.vec_video_recorder import VecVideoRecorder
from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env
from baselines.common.tf_util import get_session
from baselines.common import plot_util as pu
from baselines import logger
from importlib import import_module
try:
from mpi4py import MPI
except ImportError:
MPI = None
try:
import pybullet_envs
except ImportError:
pybullet_envs = None
try:
import roboschool
except ImportError:
roboschool = None
_game_envs = defaultdict(set)
for env in gym.envs.registry.all():
# TODO: solve this with regexes
env_type = env._entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id)
# reading benchmark names directly from retro requires
# importing retro here, and for some reason that crashes tensorflow
# in ubuntu
_game_envs['retro'] = {
'BubbleBobble-Nes',
'SuperMarioBros-Nes',
'TwinBee3PokoPokoDaimaou-Nes',
'SpaceHarrier-Nes',
'SonicTheHedgehog-Genesis',
'Vectorman-Genesis',
'FinalFight-Snes',
'SpaceInvaders-Snes',
}
def train(args, extra_args):
env_type, env_id = get_env_type(args)
print('env_type: {}'.format(env_type))
total_timesteps = int(args.num_timesteps)
seed = args.seed
learn = get_learn_function(args.alg)
alg_kwargs = get_learn_function_defaults(args.alg, env_type)
alg_kwargs.update(extra_args)
env = build_env(args)
if args.save_video_interval != 0:
env = VecVideoRecorder(env, osp.join(logger.get_dir(), "videos"), record_video_trigger=lambda x: x % args.save_video_interval == 0, video_length=args.save_video_length)
if args.network:
alg_kwargs['network'] = args.network
else:
if alg_kwargs.get('network') is None:
alg_kwargs['network'] = get_default_network(env_type)
print('Training {} on {}:{} with arguments \n{}'.format(args.alg, env_type, env_id, alg_kwargs))
# updated learning function, returning up to two models
# I have to pass in the multiplayer argument
model_1, model_2, sess_1, sess_2 = learn(
env=env,
seed=seed,
total_timesteps=total_timesteps,
print_freq=10,
multiplayer=args.multiplayer,
**alg_kwargs
)
return model_1, model_2, sess_1, sess_2, env
def build_env(args):
ncpu = multiprocessing.cpu_count()
if sys.platform == 'darwin': ncpu //= 2
nenv = args.num_env or ncpu
alg = args.alg
seed = args.seed
play = args.play
mode = args.mode
multiplayer = args.multiplayer
env_type, env_id = get_env_type(args)
isSpaceInvaders = False
if "SpaceInvaders" in args.env:
isSpaceInvaders = True
if env_type in {'atari', 'retro'}:
# this should be the only algorithm I'll use
if alg == 'deepq':
# BEGIN MY CODE
# clip reward when training
# don't clip when playing to see actual score
# add mode in as an environment parameter
if play:
# if I'm playing to see how well the network scores, I want to unclip rewards
env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True, 'clip_rewards': False}, env_kwargs={'game_mode': mode})
else:
# otherwise, keep the basic reward used by the base algorithm
if multiplayer and isSpaceInvaders:
# unclip rewards for space invaders multiplayer, I'll do it manually.
env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True, 'clip_rewards': False}, env_kwargs={'game_mode': mode})
else:
env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True, 'clip_rewards': True}, env_kwargs={'game_mode': mode})
# END MY CODE
elif alg == 'trpo_mpi':
env = make_env(env_id, env_type, seed=seed)
else:
frame_stack_size = 4
env = make_vec_env(env_id, env_type, nenv, seed, gamestate=args.gamestate, reward_scale=args.reward_scale)
env = VecFrameStack(env, frame_stack_size)
else:
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
get_session(config=config)
flatten_dict_observations = alg not in {'her'}
env = make_vec_env(env_id, env_type, args.num_env or 1, seed, reward_scale=args.reward_scale, flatten_dict_observations=flatten_dict_observations)
if env_type == 'mujoco':
env = VecNormalize(env, use_tf=True)
return env
def get_env_type(args):
env_id = args.env
if args.env_type is not None:
return args.env_type, env_id
# Re-parse the gym registry, since we could have new envs since last time.
for env in gym.envs.registry.all():
env_type = env._entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id) # This is a set so add is idempotent
if env_id in _game_envs.keys():
env_type = env_id
env_id = [g for g in _game_envs[env_type]][0]
else:
env_type = None
for g, e in _game_envs.items():
if env_id in e:
env_type = g
break
if ':' in env_id:
env_type = re.sub(r':.*', '', env_id)
assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys())
return env_type, env_id
def get_default_network(env_type):
if env_type in {'atari', 'retro'}:
return 'cnn'
else:
return 'mlp'
def get_alg_module(alg, submodule=None):
submodule = submodule or alg
try:
# first try to import the alg module from baselines
alg_module = import_module('.'.join(['baselines', alg, submodule]))
except ImportError:
# then from rl_algs
alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))
return alg_module
def get_learn_function(alg):
return get_alg_module(alg).learn
def get_learn_function_defaults(alg, env_type):
try:
alg_defaults = get_alg_module(alg, 'defaults')
kwargs = getattr(alg_defaults, env_type)()
except (ImportError, AttributeError):
kwargs = {}
return kwargs
def parse_cmdline_kwargs(args):
'''
convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible
'''
def parse(v):
assert isinstance(v, str)
try:
return eval(v)
except (NameError, SyntaxError):
return v
return {k: parse(v) for k,v in parse_unknown_args(args).items()}
def configure_logger(log_path, **kwargs):
if log_path is not None:
logger.configure(log_path)
else:
logger.configure(**kwargs)
def main(args):
# configure logger, disable logging in child MPI processes (with rank > 0)
arg_parser = common_arg_parser()
args, unknown_args = arg_parser.parse_known_args(args)
extra_args = parse_cmdline_kwargs(unknown_args)
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
rank = 0
configure_logger(args.log_path)
else:
rank = MPI.COMM_WORLD.Get_rank()
configure_logger(args.log_path, format_strs=[])
# return two models, two sessions, and the environment type
# if there's only a single model being trained, model_2 and sess_2 are None
model_1, model_2, sess_1, sess_2, env = train(args, extra_args)
# figure out if it's a multiplayer session
# multiplayer stuff is left entirely up to the user
multiplayer = args.multiplayer
if args.save_path is not None and rank == 0:
if multiplayer:
save_path_1 = osp.expanduser(args.save_path + "_player1")
# I needed the sessions to properly save the models here
# the variables are specifically linked to the sessions
model_1.save(save_path_1, sess_1)
save_path_2 = osp.expanduser(args.save_path + "_player2")
model_2.save(save_path_2, sess_2)
else:
save_path = osp.expanduser(args.save_path)
model_1.save(save_path, sess_1)
# play a number of games to evaluate the network
if args.play:
logger.log("Running trained model")
obs = env.reset()
action_path = osp.expanduser(args.log_path + "/actions.csv")
actions_1 = list()
actions_2 = list()
rewards_1 = list()
rewards_2 = list()
done_list = list()
lives_list = list()
state_1 = model_1.initial_state if hasattr(model_1, 'initial_state') else None
# copy what the first model is doing if there's multiple models
if multiplayer:
state_2 = model_2.initial_state if hasattr(model_2, 'initial_state') else None
dones = np.zeros((1,))
# BEGIN MY CODE
# create a bunch of variables for holding various types of scores
# episode reward is left over from the original but isn't really used
episode_rew_1 = 0
episode_rew_2 = 0
# these variables hold the score of the current game and score across all games
game_score = 0
game_score_1 = 0
game_score_2 = 0
total_score = 0
games_won = 0
# keep hold of the highest score, initialize to zero
max_score = 0
# keep track of how many games are played
game_count = 0
game_steps = 0
# get the number of games that are specified (default 10)
# dependent on the user to make sure the number is valid
num_games = args.num_games
# boolean variable which tells the program whether or not to render the game
render = args.render or args.render_fast
# the default display time for one frame
# due to the stacking of frames, only every fourth frame is displayed
# and these games run at 60 fps
# each fourth frame is rendered over the three missing frames
frame_time = float(1/60)
# get the render speed (default 3)
render_speed = args.render_speed
# constrain the speed to between 1x and 10x
if render_speed <= 1:
render_speed = 1
elif render_speed >= 10:
render_speed = 10
# calculate the appropriate frame speed
frame_time = frame_time/render_speed
computer_view = args.computer_view
# need special code to handle Pong
# create variable to keep track of whether or not I'm playing Pong
isPong = False
if "Pong" in args.env:
isPong = True
isSpaceInvaders = False
if "SpaceInvaders" in args.env:
isSpaceInvaders = True
# while loop carried over from base code
# this will play games until so many have been played
while True:
# each loop through, get the current time at the start
start_time = datetime.datetime.now()
# get the appropriate action based on the observation of the environment
if state_1 is not None:
action_1, _, state_1, _ = model_1.step(obs,S=state_1, M=dones)
# duplicate for a second model
if multiplayer and state_2 is not None:
action_2, _, state_2, _ = model_2.step(obs,S=state_2, M=dones)
else:
action_1, _, _, _ = model_1.step(obs)
# have the second model take an action if appropriate
if multiplayer:
action_2, _, _, _ = model_2.step(obs)
# take a step forward in the environment, return new observation
# return any reward and if the environment needs to be reset
# pass in both actions if there are two models
# reward in this case is the default reward
# in competitive multiplayer, this is Player 1's reward
if multiplayer:
obs, rew_1, rew_2, done, _ = env.step(action_1, action_2)
# otherwise, ignore the second
else:
obs, rew_1, rew_2, done, _ = env.step(action_1)
game_steps += 1
# check to see if either player has died in Space Invaders multiplayer
# this rewards a player when their opponent dies
# remove this just to measure the score gained from destroying aliens
if isSpaceInvaders and multiplayer:
if rew_1 >= 200:
rew_1 = rew_1 - 200
if rew_2 >= 200:
rew_2 = rew_2 - 200
# get the number of lives remaining, which is relevant in certain games
# in the multiplayer games I'll look at, the players should share a common life
#append actions, rewards, and done (converted to 0 or 1) to the lists
actions_1.append(action_1[0])
rewards_1.append(rew_1)
if multiplayer:
actions_2.append(action_2[0])
rewards_2.append(rew_2)
done_list.append(int(done == True))
lives = env.getLives()
# append number of lives to the list
lives_list.append(lives)
# add reward from previous step to overall score
episode_rew_1 += rew_1[0] if isinstance(env, VecEnv) else rew_1
episode_rew_2 += rew_2[0] if isinstance(env, VecEnv) else rew_2
# render the frame if the user wants it
if render:
if computer_view:
env.render(frame=obs)
else:
env.render()
done = done.any() if isinstance(done, np.ndarray) else done
# done is true whenever a reset is necessary
# occurs on death or game over
if done:
# Pong only uses done on game over, so make the episode reward the game score
if isPong:
game_score = episode_rew_1
# if it's not Pong, just do what I did before
else:
game_score += episode_rew_1 + episode_rew_2
game_score_1 += episode_rew_1
game_score_2 += episode_rew_2
if isPong:
total_score += episode_rew_1
games_won += (episode_rew_1 > 0)
else:
total_score += episode_rew_1 + episode_rew_2
# reset for next go around
episode_rew_1 = 0
episode_rew_2 = 0
# reset the environment
# on game over, this starts a new game
# otherwise, continues the game but returns player to initial position
obs = env.reset()
# can make the games run at a given framerate
if render and not args.render_fast:
# just wait until it's time to push a new frame
while (datetime.datetime.now() - start_time).total_seconds() < frame_time:
# pass means just wait and do nothing
pass
# if there are no lives left, the game is over
# use number of lives to differentiate between losing a life and game over
# Pong doesn't use lives, and doesn't return "done" until game over
# use the isPong variable to keep track of this
if (lives == 0 and not isPong) or (done and isPong):
# update highest score
if game_score > max_score:
max_score = game_score
# increment game counter
game_count += 1
# after the game is over, log the game number and score
# game number is just so the person running this understands where they are
# this method is based off of what I saw in the Deep Q code
# record the data to the logger
logger.record_tabular("game", game_count)
if multiplayer:
logger.record_tabular("total score", game_score)
logger.record_tabular("player 1 score", game_score_1)
logger.record_tabular("player 2 score", game_score_2)
else:
logger.record_tabular("score", game_score)
logger.record_tabular("steps", game_steps)
# then dump it to the log file and the terminal
logger.dump_tabular()
# game is over, reset the score
game_score = 0
game_score_1 = 0
game_score_2 = 0
game_steps = 0
# print out average and max score when number of games is finished
if game_count == num_games:
print(" ")
print('average score={}'.format(float(total_score/num_games)))
if isPong:
print('win percentage={}'.format(float(games_won * 100/num_games)))
else:
print('win percentage={}'.format(float(100)))
# break out of this true loop
break
# END MY CODE
# create file to save actions to
# open it for writing
action_file = open(action_path,'w+')
with action_file as csv_scores:
# filewriter object
filewriter = csv.writer(csv_scores, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
# if it's multiplayer, write values for both players
if multiplayer:
# column headers
filewriter.writerow(['model1_actions', 'model1_rew', 'model2_actions', 'model2_rew', 'lives', 'done'])
for j in range(0,len(actions_1)):
# iterate through arrays and write row by row
filewriter.writerow([actions_1[j],rewards_1[j],actions_2[j],rewards_2[j],lives_list[j],done_list[j]])
else:
# column headers for single-player
filewriter.writerow(['model1_actions', 'model1_rew', 'lives', 'done'])
for j in range(0,len(actions_1)):
# write row by row
filewriter.writerow([actions_1[j],rewards_1[j],lives_list[j],done_list[j]])
env.close()
if args.build_state_library:
# based off of the library path I specify, specify file locations for the library and list of actions
library_path = osp.expanduser(args.library_path + "/state_library")
action_path = osp.expanduser(args.library_path + "/actions.csv")
# empty list
state_library = list()
logger.log("Building state library")
# initialize environment
obs = env.reset()
state_1 = model_1.initial_state if hasattr(model_1, 'initial_state') else None
# copy what the first model is doing if there's multiple models
if multiplayer:
state_2 = model_2.initial_state if hasattr(model_2, 'initial_state') else None
dones = np.zeros((1,))
isPong = False
# Pong needs to be handled differently
if "Pong" in args.env:
isPong = True
model_1_actions = list()
if multiplayer:
model_2_actions = list()
while True:
state_library.append(StateWrapper(obs))
if state_1 is not None:
action_1, _, state_1, _ = model_1.step(obs,S=state_1, M=dones)
# duplicate for a second model
if multiplayer and state_2 is not None:
action_2, _, state_2, _ = model_2.step(obs,S=state_2, M=dones)
else:
action_1, _, _, _ = model_1.step(obs)
# have the second model take an action if appropriate
if multiplayer:
action_2, _, _, _ = model_2.step(obs)
# take a step forward in the environment, return new observation
# return any reward and if the environment needs to be reset
model_1_actions.append(action_1[0])
if multiplayer:
model_2_actions.append(action_2[0])
# pass in both actions if there are two models
# reward in this case is the default reward
# in competitive multiplayer, this is Player 1's reward
if multiplayer:
obs, _, _, done, _ = env.step(action_1, action_2)
# otherwise, ignore the second
else:
obs, _, _, done, _ = env.step(action_1)
lives = env.getLives()
done = done.any() if isinstance(done, np.ndarray) else done
# done is true whenever a reset is necessary
# occurs on death or game over
if done:
# Pong only uses done on game over, so make the episode reward the game score
if isPong:
break
# if it's not Pong, just do what I did before
elif lives == 0:
break
else:
obs = env.reset()
env.close()
dirname = os.path.dirname(library_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
library_file = open(library_path,'w+b')
cloudpickle.dump(state_library, library_file)
library_file.close()
action_file = open(action_path,'w+')
with action_file as csv_scores:
filewriter = csv.writer(csv_scores, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
if multiplayer:
filewriter.writerow(['model1_actions_groundtruth', 'model2_actions_groundtruth'])
for j in range(0,len(model_1_actions)):
filewriter.writerow([model_1_actions[j], model_2_actions[j]])
else:
filewriter.writerow(['model1_actions_groundtruth'])
for j in range(0,len(model_1_actions)):
filewriter.writerow([model_1_actions[j]])
image_path = osp.expanduser(args.library_path + "/state_images/")
dirname = os.path.dirname(image_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
# convert each state to an image
for i in range(1,len(state_library) + 1):
image_path = osp.expanduser(args.library_path + "/state_images/state" + str(i) + ".jpg")
img = state_library[i-1].state
frame1 = img[0:84,0:84,0]
frame2 = img[0:84,0:84,1]
frame3 = img[0:84,0:84,2]
frame1 = np.reshape(frame1, (84, 84, 1))
frame2 = np.reshape(frame2, (84, 84, 1))
frame3 = np.reshape(frame3, (84, 84, 1))
if "SpaceInvaders" in args.env:
frames = [frame1, frame2, frame3]
else:
frame4 = img[0:84,0:84,3]
frame4 = np.reshape(frame4, (84, 84, 1))
frames = [frame1, frame2, frame3, frame4]
frame = LazyFrames(frames)
if "SpaceInvaders" in args.env:
img=np.round(0.25*frame._frames[0])+np.round(0.5*frame._frames[1])+np.round(frame._frames[2])
else:
img=np.round(0.125*frame._frames[0])+np.round(0.25*frame._frames[1])+np.round(0.5*frame._frames[2])+np.round(frame._frames[3])
img = img.astype(np.dtype('u1'))
img=np.concatenate((img, img, img),axis=2)
height = np.shape(img)[0]
width = np.shape(img)[1]
size = 4
# resize the screen and return it as the image
img = cv2.resize(img, (width*size, height*size), interpolation=cv2.INTER_AREA)
matplotlib.image.imsave(image_path, img)
if args.evaluate_states:
library_path = osp.expanduser(args.library_path + "/state_library")
action_load_path = osp.expanduser(args.library_path + "/actions.csv")
action_test_path = osp.expanduser(args.eval_path + "/actions.csv")
loaded_models_path = osp.expanduser(args.eval_path + "/loaded_models.txt")
dirname = os.path.dirname(loaded_models_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
loaded_models_file = open(loaded_models_path,'w+')
loaded_models_file.write(str(extra_args))
loaded_models_file.close()
library_file = open(library_path, 'rb')
state_library = cloudpickle.load(library_file)
library_file.close()
model_1_actions = list()
if multiplayer:
model_2_actions = list()
for i in range(1,len(state_library) + 1):
# reconstruct the state
# get whole matrix
img = state_library[i-1].state
# extract each layer
frame1 = img[0:84,0:84,0]
frame2 = img[0:84,0:84,1]
frame3 = img[0:84,0:84,2]
# reshape each layer into the proper format
frame1 = np.reshape(frame1, (84, 84, 1))
frame2 = np.reshape(frame2, (84, 84, 1))
frame3 = np.reshape(frame3, (84, 84, 1))
if "SpaceInvaders" in args.env:
# concatenate
frames = [frame1, frame2, frame3]
else:
# fourth frame only used for Pong, doesn't work for Space Invaders
frame4 = img[0:84,0:84,3]
frame4 = np.reshape(frame4, (84, 84, 1))
# concatenate
frames = [frame1, frame2, frame3, frame4]
# create the state by passing in the concatenated frames to the LazyFrames class
obs = LazyFrames(frames)
# the observation can now be used with the models
# get actions and append them to my saved lists
action_1, _, _, _ = model_1.step(obs)
model_1_actions.append(action_1[0])
if multiplayer:
action_2, _, _, _ = model_2.step(obs)
model_2_actions.append(action_2[0])
# load the ground truth actions from the library
action_load_file = open(action_load_path, 'r')
reader = csv.reader(action_load_file)
# open new file for writing
model_action_file = open(action_test_path,'w+')
with model_action_file as csv_scores:
filewriter = csv.writer(csv_scores, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
row_num = 0
# loop through rows in ground truth file
for row in reader:
if row_num == 0:
# write headers if it's row 0
if multiplayer:
write_data = ['model1_actions', 'model2_actions']
else:
write_data = ['model1_actions']
else:
# otherwise, get actions to write
if multiplayer:
write_data = [model_1_actions[row_num - 1], model_2_actions[row_num - 1]]
else:
write_data = [model_1_actions[row_num - 1]]
# write the row of the ground truth actions plus the taken actions from whatever models I'm testing and write the row
if len(row) == 1:
filewriter.writerow([row[0]] + write_data)
else:
filewriter.writerow([row[0], row[1]] + write_data)
row_num += 1
# save the states as images
image_path = osp.expanduser(args.eval_path + "/state_images/")
dirname = os.path.dirname(image_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
# for each state
for i in range(1,len(state_library) + 1):
# reconstruct the state properly
image_path = osp.expanduser(args.eval_path + "/state_images/state" + str(i) + ".jpg")
img = state_library[i-1].state
frame1 = img[0:84,0:84,0]
frame2 = img[0:84,0:84,1]
frame3 = img[0:84,0:84,2]
frame1 = np.reshape(frame1, (84, 84, 1))
frame2 = np.reshape(frame2, (84, 84, 1))
frame3 = np.reshape(frame3, (84, 84, 1))
if "SpaceInvaders" in args.env:
frames = [frame1, frame2, frame3]
else:
frame4 = img[0:84,0:84,3]
frame4 = np.reshape(frame4, (84, 84, 1))
frames = [frame1, frame2, frame3, frame4]
frame = LazyFrames(frames)
# multiply different layers of the frame to visualize motion
if "SpaceInvaders" in args.env:
img=np.round(0.25*frame._frames[0])+np.round(0.5*frame._frames[1])+np.round(frame._frames[2])
else:
img=np.round(0.125*frame._frames[0])+np.round(0.25*frame._frames[1])+np.round(0.5*frame._frames[2])+np.round(frame._frames[3])
# convert to 8 bit unsigned integers
img = img.astype(np.dtype('u1'))
# concatenate to get an "RGB" image
img=np.concatenate((img, img, img),axis=2)
# upscale by a factor of 4
height = np.shape(img)[0]
width = np.shape(img)[1]
size = 4
# resize the screen and return it as the image
img = cv2.resize(img, (width*size, height*size), interpolation=cv2.INTER_AREA)
# save the image
matplotlib.image.imsave(image_path, img)
sess_1.close()
if multiplayer:
sess_2.close()
return model_1, model_2
# small class to easily access state observations in the saved lists I create
class StateWrapper(object):
def __init__(self, obj):
self.state = obj
if __name__ == '__main__':
main(sys.argv)
| 41.221024 | 176 | 0.591186 |
df5715a52a38ecdb9cd78a16ce7dd1dde887ebb7 | 3,863 | py | Python | axonius_api_client/examples/example_custom_cb.py | rwils83/axonius_api_client | 1990ed4d1287482a4648dc51edcaa5eb08255f5b | [
"MIT"
] | null | null | null | axonius_api_client/examples/example_custom_cb.py | rwils83/axonius_api_client | 1990ed4d1287482a4648dc51edcaa5eb08255f5b | [
"MIT"
] | 3 | 2021-05-18T14:28:30.000Z | 2021-09-06T20:01:56.000Z | axonius_api_client/examples/example_custom_cb.py | rwils83/axonius_api_client | 1990ed4d1287482a4648dc51edcaa5eb08255f5b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Utilities for this package."""
import json
import os
from datetime import datetime, timedelta
import axonius_api_client as axonapi
from axonius_api_client.constants import ALL_NAME
PAGE_SIZE = 10
MAX_PAGE_TOOK_SECS = 10
MAX_ROW_KB = 1000
MAX_KEY_KB = 2000
ROW_TMPL = (
"#{row_num} {axid} KB: {row_kb:0.2f}, KB TOTAL: "
"{rows_size:0.2f}, page took: {page_took:0.2f}"
)
KEY_TMPL = " KEY over {MAX_KEY_KB}: {k}, KB: {vkb:0.2f}"
def _field_wanted(field):
name = field.get("name_qual")
is_root = field.get("is_root")
is_selectable = field.get("selectable")
is_all = field["name_base"] == ALL_NAME
is_complex = field.get("is_complex")
return name and is_root and is_selectable and not is_all and not is_complex
def calc_row_size(self, row):
if not hasattr(self, "_rows_size"):
self._rows_size = 0
row_size = len(json.dumps(row))
row_kb = row_size / 1024
self._rows_size += row_kb
axid = row["internal_axon_id"]
row_num = self.STATE.get("rows_processed_total", 0)
fetch_secs = self.STATE.get("fetch_seconds_this_page", 0)
if not fetch_secs >= float(MAX_PAGE_TOOK_SECS):
return
msg = ROW_TMPL.format(
row_num=row_num,
axid=axid,
row_kb=row_kb,
rows_size=self._rows_size,
page_took=fetch_secs,
)
self.echo(msg=msg, warning=True, abort=False)
if row_kb >= float(MAX_ROW_KB):
for k, v in row.items():
vsize = len(json.dumps(v))
vkb = vsize / 1024
if vkb >= float(MAX_KEY_KB):
msg = KEY_TMPL.format(MAX_KEY_KB=MAX_KEY_KB, k=k, vkb=vkb)
self.echo(msg=msg, error=True, abort=False)
if __name__ == "__main__":
axonapi.constants.load_dotenv()
AX_URL = os.environ["AX_URL"]
AX_KEY = os.environ["AX_KEY"]
AX_SECRET = os.environ["AX_SECRET"]
AX_CLIENT_CERT_BOTH = os.environ.get("AX_CLIENT_CERT_BOTH", None) or None
AX_CLIENT_CERT_CERT = os.environ.get("AX_CLIENT_CERT_CERT", None) or None
AX_CLIENT_CERT_KEY = os.environ.get("AX_CLIENT_CERT_KEY", None) or None
def jdump(obj, **kwargs):
"""JSON dump utility."""
print(axonapi.tools.json_reload(obj, **kwargs))
ctx = axonapi.Connect(
url=AX_URL,
key=AX_KEY,
secret=AX_SECRET,
certwarn=False,
cert_client_both=AX_CLIENT_CERT_BOTH,
cert_client_cert=AX_CLIENT_CERT_CERT,
cert_client_key=AX_CLIENT_CERT_KEY,
# log_file=True,
# log_level_package="info",
# log_level_console="info",
# log_level_api="info",
# log_level_http="info",
# log_console=True,
)
ctx.start()
devices = ctx.devices
now = datetime.utcnow()
this_time = now.isoformat(sep=" ", timespec="seconds")
last_time = (now - timedelta(days=1)).isoformat(sep=" ", timespec="seconds")
filters = [
f'(specific_data.data.fetch_time < date("{this_time}"))',
f'(specific_data.data.fetch_time >= date("{last_time}"))',
]
query = " and ".join(filters)
agg_fields = devices.fields.get().get("agg")
get_fields = [field.get("name_qual") for field in agg_fields if _field_wanted(field)]
get_fields.extend(
["specific_data", "specific_data.data.network_interfaces.ips", "agent_versions"]
)
start_all = datetime.now()
# count = devices.count(query=query)
# print(f"About to fetch {count} assets with page size {PAGE_SIZE}")
# time.sleep(3)
assets = devices.get(
query=query,
fields=get_fields,
fields_default=False,
max_rows=1,
# page_size=PAGE_SIZE,
# custom_cbs=[calc_row_size],
# page_progress=None,
do_echo=True,
include_details=True,
# might be better than specific_data!?
)
| 30.179688 | 89 | 0.63707 |
651cc7a5fbd6216e8067d8732b96c3f0866753a4 | 5,615 | py | Python | p3dpy/io.py | WillRobotics/p3dpy | a607d0840b0871ee3122df7756336316435d0f18 | [
"MIT"
] | 11 | 2021-05-05T16:02:40.000Z | 2022-02-02T03:35:29.000Z | p3dpy/io.py | WillRobotics/p3dpy | a607d0840b0871ee3122df7756336316435d0f18 | [
"MIT"
] | 7 | 2021-03-16T14:05:06.000Z | 2021-06-15T23:08:56.000Z | p3dpy/io.py | WillRobotics/p3dpy | a607d0840b0871ee3122df7756336316435d0f18 | [
"MIT"
] | 1 | 2021-03-29T07:27:15.000Z | 2021-03-29T07:27:15.000Z | import struct
from typing import IO, Optional, Tuple, Union
import lzf
import numpy as np
import stl
from plyfile import PlyData
from . import pointcloud
_field_dict = {
"I1": "b",
"I2": "h",
"I4": "i",
"U1": "B",
"U2": "H",
"U4": "I",
"F4": "f",
"F8": "d",
}
_type_dict = {
"I1": int,
"I2": int,
"I4": int,
"U1": int,
"U2": int,
"U4": int,
"F4": float,
"F8": float,
}
def _parse_pcd_header(lines: list) -> Tuple[dict, str]:
config = {}
data_type = "ascii"
for c in lines:
c = c.split()
if len(c) == 0:
continue
if c[0] == "FIELDS" or c[0] == "SIZE" or c[0] == "TYPE" or c[0] == "COUNT":
config[c[0]] = c[1:]
elif c[0] == "WIDTH" or c[0] == "POINTS":
config[c[0]] = int(c[1])
elif c[0] == "DATA":
data_type = c[1]
break
else:
continue
return config, data_type
def load_pcd(fd: Union[IO, str]) -> pointcloud.PointCloud:
"""Load PCD file format
Parameters
----------
fd: BinaryIO, TextIO or str
Input file name or StringIO data type.
"""
if isinstance(fd, str):
fd = open(fd, "rb")
lines = []
while True:
ln = fd.readline().strip().decode()
lines.append(ln)
if ln.startswith("DATA"):
break
config, data_type = _parse_pcd_header(lines)
has_point = False
has_color = False
has_normal = False
if "x" in config["FIELDS"] and "y" in config["FIELDS"] and "z" in config["FIELDS"]:
has_point = True
if "rgb" in config["FIELDS"]:
has_color = True
if "normal_x" in config["FIELDS"] and "normal_y" in config["FIELDS"] and "normal_z" in config["FIELDS"]:
has_normal = True
field: Optional[pointcloud.FieldBase] = None
if has_point and has_color and has_normal:
field = pointcloud.PointXYZRGBNormalField()
elif has_point and has_color:
field = pointcloud.PointXYZRGBField()
elif has_point and has_normal:
field = pointcloud.PointXYZNormalField()
elif has_point:
field = pointcloud.PointXYZField()
else:
raise ValueError("Unsupport field type.")
pc = pointcloud.PointCloud(data=[], field=field)
fmt = ""
for i in range(len(config["FIELDS"])):
fmt += config["COUNT"][i] if int(config["COUNT"][i]) > 1 else ""
fmt += _field_dict[config["TYPE"][i] + config["SIZE"][i]]
loaddata = []
if data_type == "ascii":
data_lines = fd.read().splitlines()
for d in data_lines:
d = d.split()
cnt = 0
data = []
for i in range(len(config["FIELDS"])):
fcnt = int(config["COUNT"][i])
tp_s = config["TYPE"][i] + config["SIZE"][i]
if fcnt == 1:
data.append(_type_dict[tp_s](d[cnt]))
else:
data.append([_type_dict[tp_s](d[cnt + j]) for j in range(fcnt)])
cnt += fcnt
loaddata.append(data)
elif data_type == "binary":
bytedata = fd.read()
size = struct.calcsize(fmt)
for i in range(len(bytedata) // size):
loaddata.append(list(struct.unpack(fmt, bytedata[(i * size) : ((i + 1) * size)])))
elif data_type == "binary_compressed":
compressed_size, uncompressed_size = struct.unpack("II", fd.read(8))
compressed_data = fd.read(compressed_size)
buf = lzf.decompress(compressed_data, uncompressed_size)
size = struct.calcsize(fmt)
for i in range(len(buf) // size):
loaddata.append(list(struct.unpack(fmt, buf[(i * size) : ((i + 1) * size)])))
else:
raise ValueError(f"Unsupported data type {data_type}.")
for data in loaddata:
pc.data.append(np.zeros(pc.field.size()))
for f, d in zip(config["FIELDS"], data):
if f == "x":
pc.data[-1][pc.field.X] = d
elif f == "y":
pc.data[-1][pc.field.Y] = d
elif f == "z":
pc.data[-1][pc.field.Z] = d
elif f == "rgb":
d = int(d)
pc.data[-1][pc.field.R] = float((d >> 16) & 0x000FF) / 255.0
pc.data[-1][pc.field.G] = float((d >> 8) & 0x000FF) / 255.0
pc.data[-1][pc.field.B] = float((d) & 0x000FF) / 255.0
elif f == "normal_x":
pc.data[-1][pc.field.NX] = d
elif f == "normal_y":
pc.data[-1][pc.field.NY] = d
elif f == "normal_z":
pc.data[-1][pc.field.NZ] = d
pc.finalize()
return pc
def load_stl(fd: Union[IO, str], scale: float = 1.0) -> pointcloud.PointCloud:
"""Load STL file format
Parameters
----------
fd: BinaryIO, TextIO or str
Input file name or StringIO data type.
"""
if isinstance(fd, str):
fd = open(fd, "rb")
mesh = stl.mesh.Mesh.from_file("", fh=fd)
return pointcloud.PointCloud(data=mesh.points.reshape((-1, 3)) * scale, field=pointcloud.PointXYZField())
def load_ply(fd: Union[IO, str]) -> pointcloud.PointCloud:
"""Load PLY file format
Parameters
----------
fd: BinaryIO, TextIO or str
Input file name or StringIO data type.
"""
if isinstance(fd, str):
fd = open(fd, "rb")
plydata = PlyData.read(fd)
points = plydata["vertex"][["x", "y", "z"]]
return pointcloud.PointCloud(
data=points.view("<f4").reshape(points.shape + (-1,)), field=pointcloud.PointXYZField()
)
| 30.68306 | 109 | 0.530543 |
def85ffae829c5503d6b7a6563822bed234464b1 | 1,061 | py | Python | surveys/migrations/0002_auto_20200602_1145.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | 2 | 2022-01-24T23:30:18.000Z | 2022-01-26T00:21:22.000Z | surveys/migrations/0002_auto_20200602_1145.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | null | null | null | surveys/migrations/0002_auto_20200602_1145.py | Revibe-Music/core-services | 6b11cf16ad2c35d948f3a5c0e7a161e5b7cfc1b2 | [
"MIT"
] | null | null | null | # Generated by Django 3.0 on 2020-06-02 16:45
from django.db import migrations, models
import surveys.utils.models
class Migration(migrations.Migration):
dependencies = [
('surveys', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='artistoftheweek',
name='date_created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='artistoftheweek',
name='last_changed',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='artistoftheweek',
name='staff_notes',
field=models.TextField(blank=True, null=True, verbose_name='staff notes'),
),
migrations.AlterField(
model_name='artistoftheweek',
name='picture',
field=models.FileField(blank=True, null=True, upload_to=surveys.utils.models.custom_file_upload, verbose_name='picture'),
),
]
| 30.314286 | 133 | 0.611687 |
cbf348898e47ec57b95d89d6adff9818bf9e8507 | 8,482 | py | Python | app.py | alexoh554/Trig-Solver | ecebb68242c8141f3f1cd78244659c306cd6dd13 | [
"MIT"
] | null | null | null | app.py | alexoh554/Trig-Solver | ecebb68242c8141f3f1cd78244659c306cd6dd13 | [
"MIT"
] | null | null | null | app.py | alexoh554/Trig-Solver | ecebb68242c8141f3f1cd78244659c306cd6dd13 | [
"MIT"
] | null | null | null | from flask import Flask, redirect, render_template, request, url_for, session
from flask_session import Session
import requests
from tempfile import mkdtemp
from helpers import checkInput, checkAngles, findThirdAngle, countList, sinePossible, sineLawAngle, sineLawSide, ambiguousCalculate, ambiguousOrder, sineLawAmb, cosineAngle, cosineSide
app = Flask(__name__)
app.config["TEMPLATES_AUTO_RELOAD"] = True
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
@app.route("/", methods=["GET", "POST"])
def trig():
if request.method == "POST":
# Add user values to lists
tmpAngles = []
tmpAngles.append(request.form.get("A"))
tmpAngles.append(request.form.get("B"))
tmpAngles.append(request.form.get("C"))
tmpSides = []
tmpSides.append(request.form.get("a"))
tmpSides.append(request.form.get("b"))
tmpSides.append(request.form.get("c"))
ambOld = [] # Stores angles that were given to by the user
# Cast values to float. If error occurs redirect to error page
angles = []
for angle in tmpAngles:
if angle == "":
angles.append(None)
else:
try:
angles.append(float(angle))
ambOld.append(float(angle))
except ValueError:
session['error'] = "Invalid input"
return redirect("/error")
sides = []
for side in tmpSides:
if side == "":
sides.append(None)
else:
try:
sides.append(float(side))
except ValueError:
session['error'] = "Invalid input"
return redirect("/error")
# Check for correct input
correctSides = checkInput(sides)
correctAngles = checkInput(angles)
if correctSides == False or correctAngles == False:
session['error'] = "Invalid input"
return redirect("/error")
# If possible get the third angle
if checkAngles(angles) == True:
thirdAngle = findThirdAngle(angles)
for i in range(len(angles)):
if angles[i] == None:
angles[i] = thirdAngle
if checkAngles(angles) == "Error":
session['error'] = "Triangles must have a total angle of 180 degrees"
return redirect("/error")
knownAngles = countList(angles)
knownSides = countList(sides)
if knownSides < 1:
session['error'] = "You must provide at least 1 side to be able to solve"
return redirect("/error")
ambiguousCase = False # True if ambiguous case is possible
ambNew = [] # List that stores new angles found during sine law
# First check if sine law is possible
sineValue = sinePossible(angles, sides)
if sineValue != None:
# Calculate with sine law until all values are found
while(True):
for i in range(3):
if angles[i] == None:
if sides[i] == None:
continue
else:
angles[i] = sineLawAngle(angles[i], sides[i], sineValue)
if angles[i] == None:
session['error'] = "No solution"
return redirect("/error")
ambNew.append(angles[i])
if sides[i] == None:
if angles[i] == None:
continue
else:
sides[i] = sineLawSide(sides[i], angles[i], sineValue)
if checkAngles(angles) == True:
thirdAngle = findThirdAngle(angles)
for i in range(len(angles)):
if angles[i] == None:
angles[i] = thirdAngle
if None in sides:
continue
else:
break
# If possible solve ambiguous case
if len(ambOld) == 1 and len(ambNew) == 1:
ambiguousAngles = ambiguousCalculate(ambOld, ambNew)
if ambiguousAngles != None:
ambiguousCase = True
findAmb = ambiguousAngles[2]
ambiguousAngles = ambiguousOrder(ambiguousAngles, angles)
# Use sine law to find ambiguous side
ambiguousSides = sides.copy()
ambIndex = 0
for i in range(3):
if ambiguousAngles[i] == findAmb:
ambIndex = i
ambiguousSides[ambIndex] = sineLawAmb(ambiguousAngles, ambiguousSides, ambIndex)
for i in range(3):
ambiguousSides[i] = round(ambiguousSides[i], 2)
else:
while(True): # Calculate with cosine law
if knownSides == 3: # If all sides are known, calculate all angles
for i in range(3):
angles[i] = cosineAngle(sides, i)
if angles[i] == None:
session['error'] = "No solution"
return redirect("/error")
else: # Else find all sides and repeat loop until all values known
for j in range(3):
if sides[j] == None:
if angles[j] != None:
sides[j] = cosineSide(sides, angles[j], j)
knownSides = 3
if None in sides or None in angles:
continue
else:
break
# Round values to 2 decimal places and store in session
for i in range(3):
angles[i] = round(angles[i], 2)
session['angles'] = angles
for j in range(3):
sides[j] = round(sides[j], 2)
session['sides'] = sides
if ambiguousCase == True:
session['ambTrue'] = True
for k in range(3):
ambiguousAngles[k] = round(ambiguousAngles[k], 2)
session['ambAngles'] = ambiguousAngles
session['ambSides'] = ambiguousSides
else:
session['ambTrue'] = False
session['ambAngles'] = None
session['inputted'] = True
return redirect('/solution')
else:
return render_template("unsolved.html")
@app.route("/solution", methods=["GET", "POST"])
def solution():
if request.method == "POST":
# Clear session and return user to main page
session.clear()
return redirect("/")
else:
# Check if user has inputted an answer
if session.get('inputted') != True:
return redirect("/")
if session['ambTrue'] == True:
return render_template("solved.html", ambiguousCase=session['ambTrue'], angles=session['angles'], sides=session['sides'], ambAngles=session['ambAngles'], ambSides=session['ambSides'])
else:
return render_template("solved.html", angles=session['angles'], sides=session['sides'])
@app.route("/error", methods=["GET", "POST"])
def error():
if request.method == "POST":
session.clear()
return redirect("/")
else:
return render_template("error.html", error_message=session['error'])
if __name__ == '__main__':
app.secret_key = '\xe6\x0c\xa7\x0f\x8b\xf4u\xcbd\xb1\x17\xe1\xc54O!R\n\x01B\xb5S\x11X'
app.run()
| 41.174757 | 196 | 0.495048 |
b56070e366a056d400e509b912cbbade268151b1 | 36,516 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_vpn_gateways_operations.py | praveenkuttappan/azure-sdk-for-python | 4b79413667b7539750a6c7dde15737013a3d4bd5 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_vpn_gateways_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_vpn_gateways_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnGatewaysOperations(object):
"""VpnGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnGateway"
"""Retrieves the details of a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.VpnGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'VpnGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.VpnGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnGateway"]
"""Creates a virtual wan vpn gateway if it doesn't exist else updates the existing gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan vpn
gateway.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2019_07_01.models.VpnGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
gateway_name, # type: str
vpn_gateway_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnGateway"]
"""Updates virtual wan vpn gateway tags.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to update a virtual wan vpn gateway tags.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2019_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
def _reset_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VpnGateway"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
def begin_reset(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnGateway"]
"""Resets the primary of the vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnGatewaysResult"]
"""Lists all the VpnGateways in a resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnGatewaysResult"]
"""Lists all the VpnGateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnGateways'} # type: ignore
| 49.279352 | 188 | 0.660204 |
8cf426ca96b6d77bb61d239d21eaee2d5d2e0b29 | 2,781 | py | Python | instrumentation/opentelemetry-instrumentation-logging/setup.py | willarmiros/opentelemetry-python-contrib | 0d34ef26b75f9a3bc275bf828b5a806d39ba1a40 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-07-18T07:59:09.000Z | 2021-07-18T07:59:09.000Z | instrumentation/opentelemetry-instrumentation-logging/setup.py | willarmiros/opentelemetry-python-contrib | 0d34ef26b75f9a3bc275bf828b5a806d39ba1a40 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2020-12-30T17:37:13.000Z | 2021-06-06T01:02:30.000Z | instrumentation/opentelemetry-instrumentation-logging/setup.py | willarmiros/opentelemetry-python-contrib | 0d34ef26b75f9a3bc275bf828b5a806d39ba1a40 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-11-20T06:31:17.000Z | 2021-11-20T06:31:17.000Z | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM templates/instrumentation_setup.py.txt.
# RUN `python scripts/generate_setup.py` TO REGENERATE.
import distutils.cmd
import json
import os
from configparser import ConfigParser
import setuptools
config = ConfigParser()
config.read("setup.cfg")
# We provide extras_require parameter to setuptools.setup later which
# overwrites the extra_require section from setup.cfg. To support extra_require
# secion in setup.cfg, we load it here and merge it with the extra_require param.
extras_require = {}
if "options.extras_require" in config:
for key, value in config["options.extras_require"].items():
extras_require[key] = [v for v in value.split("\n") if v.strip()]
BASE_DIR = os.path.dirname(__file__)
PACKAGE_INFO = {}
VERSION_FILENAME = os.path.join(
BASE_DIR,
"src",
"opentelemetry",
"instrumentation",
"logging",
"version.py",
)
with open(VERSION_FILENAME) as f:
exec(f.read(), PACKAGE_INFO)
PACKAGE_FILENAME = os.path.join(
BASE_DIR,
"src",
"opentelemetry",
"instrumentation",
"logging",
"package.py",
)
with open(PACKAGE_FILENAME) as f:
exec(f.read(), PACKAGE_INFO)
# Mark any instruments/runtime dependencies as test dependencies as well.
extras_require["instruments"] = PACKAGE_INFO["_instruments"]
test_deps = extras_require.get("test", [])
for dep in extras_require["instruments"]:
test_deps.append(dep)
extras_require["test"] = test_deps
class JSONMetadataCommand(distutils.cmd.Command):
description = (
"print out package metadata as JSON. This is used by OpenTelemetry dev scripts to ",
"auto-generate code in other places",
)
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
metadata = {
"name": config["metadata"]["name"],
"version": PACKAGE_INFO["__version__"],
"instruments": PACKAGE_INFO["_instruments"],
}
print(json.dumps(metadata))
setuptools.setup(
cmdclass={"meta": JSONMetadataCommand},
version=PACKAGE_INFO["__version__"],
extras_require=extras_require,
)
| 27.81 | 92 | 0.709457 |
0f06d2378f649661aaa521cdedf1d8ec9eaedbaa | 25,672 | py | Python | tests/urls_tests.py | fga-eps-mds/2021.2-SysArq-Archives | fb2565dd827f24921a78e832a847e62306ff6974 | [
"MIT"
] | 1 | 2022-02-15T04:11:47.000Z | 2022-02-15T04:11:47.000Z | tests/urls_tests.py | fga-eps-mds/2021.2-SysArq-Archives | fb2565dd827f24921a78e832a847e62306ff6974 | [
"MIT"
] | 8 | 2022-03-15T18:48:39.000Z | 2022-03-29T14:30:39.000Z | tests/urls_tests.py | fga-eps-mds/2021.2-SysArq-Archives | fb2565dd827f24921a78e832a847e62306ff6974 | [
"MIT"
] | 3 | 2022-03-17T17:32:02.000Z | 2022-03-18T01:17:34.000Z | import pytest
from rest_framework.test import APIClient
from django.test import override_settings
from django.conf import settings
TESTS_MIDDLEWARE = [mc for mc in settings.MIDDLEWARE
if mc != 'archives_app.middleware.IsTokenValidMiddleware']
@pytest.mark.django_db(transaction=False)
class TestBoxAbreviationsEndpoints:
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_create(self):
data = {
"number": 8,
"abbreviation": "",
"name": "",
"year": 2020
}
api_client = APIClient()
response = api_client.post(
'/box-abbreviation/', data=data,
header={"Content-Type": "application/json"})
assert response.status_code == 201
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_list(self):
api_client = APIClient()
response = api_client.get('/box-abbreviation/')
assert response.status_code == 200
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_retrieve(self):
data2 = {
"number": 8,
"abbreviation": "",
"name": "",
"year": 2020
}
api_client = APIClient()
intermediary = api_client.post(
'/box-abbreviation/', data=data2,
header={"Content-Type": "application/json"})
assert intermediary.status_code == 201
response = api_client.get('/box-abbreviation/2/')
assert response.status_code == 200
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_update(self):
data3 = {
"number": 8,
"abbreviation": "",
"name": "",
"year": 2020
}
data4 = {
"number": 9,
"abbreviation": "",
"name": "",
"year": 2020
}
api_client = APIClient()
intermediary = api_client.post(
'/box-abbreviation/', data=data3,
header={"Content-Type": "application/json"})
assert intermediary.status_code == 201
response = api_client.put(
'/box-abbreviation/3/', data=data4,
header={"Content-Type": "application/json"})
assert response.status_code == 200
@pytest.mark.django_db(transaction=False)
class TestDocumentNameEndpoints:
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_create(self):
data = {
"subject_name": "",
"temporality": "",
"isPerma": "",
}
api_client = APIClient()
response = api_client.post(
'/document-name/', data=data,
header={"Content-Type": "application/json"})
assert response.status_code == 201
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_list(self):
api_client = APIClient()
response = api_client.get('/document-name/')
assert response.status_code == 200
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_retrieve(self):
data2 = {
"subject_name": "1",
"temporality": "",
"isPerma": "",
}
api_client = APIClient()
intermediary = api_client.post(
'/document-name/', data=data2,
header={"Content-Type": "application/json"})
assert intermediary.status_code == 201
response = api_client.get('/document-name/2/')
assert response.status_code == 200
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_update(self):
data3 = {
"subject_name": "2",
"temporality": "",
"isPerma": "true"
}
data4 = {
"subject_name": "3",
"temporality": "",
"isPerma": "false",
}
api_client = APIClient()
intermediary = api_client.post(
'/document-name/', data=data3,
header={"Content-Type": "application/json"})
assert intermediary.status_code == 201
response = api_client.put(
'/document-name/3/', data=data4,
header={"Content-Type": "application/json"})
assert response.status_code == 200
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_destroy(self):
data5 = {
"subject_name": "4",
"temporality": "",
"isPerma": "false"
}
api_client = APIClient()
intermediary = api_client.post(
'/document-name/', data=data5,
header={"Content-Type": "application/json"})
assert intermediary.status_code == 201
response = api_client.delete('/document-name/4/')
assert response.status_code == 204
@pytest.mark.django_db(transaction=False)
class TestUnityEndpoints:
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_create(self):
data = {
"name_of_unity": "1",
"unity_abbreviation": "",
"administrative_bond": "",
"bond_abbreviation": "",
"type_of_unity": "",
"municipality": "",
"telephone_number": "",
"notes": ""
}
api_client = APIClient()
response = api_client.post(
'/unity/', data=data,
header={"Content-Type": "application/json"})
assert response.status_code == 201
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_list(self):
api_client = APIClient()
response = api_client.get('/unity/')
assert response.status_code == 200
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_retrieve(self):
data2 = {
"name_of_unity": "2",
"unity_abbreviation": "",
"administrative_bond": "",
"bond_abbreviation": "",
"type_of_unity": "",
"municipality": "",
"telephone_number": "",
"notes": ""
}
api_client = APIClient()
intermediary = api_client.post(
'/unity/', data=data2,
header={"Content-Type": "application/json"})
assert intermediary.status_code == 201
response = api_client.get('/unity/2/')
assert response.status_code == 200
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_update(self):
data3 = {
"name_of_unity": "3",
"unity_abbreviation": "",
"administrative_bond": "",
"bond_abbreviation": "",
"type_of_unity": "",
"municipality": "",
"telephone_number": "",
"notes": ""
}
data4 = {
"name_of_unity": "4",
"unity_abbreviation": "",
"administrative_bond": "",
"bond_abbreviation": "",
"type_of_unity": "",
"municipality": "",
"telephone_number": "",
"notes": ""
}
api_client = APIClient()
intermediary = api_client.post(
'/unity/', data=data3,
header={"Content-Type": "application/json"})
assert intermediary.status_code == 201
response = api_client.put(
'/unity/3/', data=data4,
header={"Content-Type": "application/json"})
assert response.status_code == 200
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_destroy(self):
data5 = {
"name_of_unity": "5",
"unity_abbreviation": "",
"administrative_bond": "",
"bond_abbreviation": "",
"type_of_unity": "",
"municipality": "",
"telephone_number": "",
"notes": ""
}
api_client = APIClient()
intermediary = api_client.post(
'/unity/', data=data5,
header={"Content-Type": "application/json"})
assert intermediary.status_code == 201
response = api_client.delete('/unity/4/')
assert response.status_code == 204
@pytest.mark.django_db(transaction=False)
class TestshelfEndpoints:
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_create(self):
data = {
"number": 0,
}
api_client = APIClient()
response = api_client.post(
'/shelf/', data=data,
header={"Content-Type": "application/json"})
assert response.status_code == 201
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_list(self):
api_client = APIClient()
response = api_client.get('/shelf/')
assert response.status_code == 200
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_retrieve(self):
data = {
"number": 0,
}
api_client = APIClient()
intermediary = api_client.post(
'/shelf/', data=data,
header={"Content-Type": "application/json"})
assert intermediary.status_code == 201
response = api_client.get('/shelf/2/')
assert response.status_code == 200
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_update(self):
data = {
"number": 0,
}
data_2 = {
"number": 0,
}
api_client = APIClient()
intermediary = api_client.post(
'/shelf/', data=data,
header={"Content-Type": "application/json"})
assert intermediary.status_code == 201
response = api_client.put(
'/shelf/3/', data=data_2,
header={"Content-Type": "application/json"})
assert response.status_code == 200
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_destroy(self):
data = {
"number": 0,
}
api_client = APIClient()
intermediary = api_client.post(
'/shelf/', data=data,
header={"Content-Type": "application/json"})
assert intermediary.status_code == 201
response = api_client.delete('/shelf/4/')
assert response.status_code == 204
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_box_archiving_relation_get():
api_client = APIClient()
response = api_client.get('/box-archiving/')
assert response.status_code == 200
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def box_archiving():
api_client = APIClient()
data_sender = {
"telephone_number": "",
"note": "",
"unity_name": "",
"unity_abbreviation": "",
"administrative_bond": "",
"bond_abbreviation": "",
"type_of_unity": "",
"municipality": ""
}
response_sender = api_client.post(
'/unity/', data=data_sender,
header={"Content-Type": "application/json"})
assert response_sender.status_code == 201
data_name = {
"document_name": "teste",
"temporality": "1"
}
response_name = api_client.post(
'/document-name/', data=data_name,
header={"Content-name": "application/json"})
assert response_name.status_code == 201
data_shelf = {
"number": 555,
}
response_shelf = api_client.post(
'/shelf/', data=data_shelf,
header={"Content-Type": "application/json"})
assert response_shelf.status_code == 201
data_rack = {
"number": 555,
}
response_rack = api_client.post(
'/rack/', data=data_rack,
header={"Content-Type": "application/json"})
assert response_rack.status_code == 201
data_file_location = {
"file": "local",
}
response_file_location = api_client.post(
'/file-location/', data=data_file_location,
header={"Content-Type": "application/json"})
assert response_file_location.status_code == 201
data = {
"origin_boxes": [
{
"number": "1",
"year": 2020,
"rack_id": response_rack.data['id'],
"shelf_id": response_shelf.data['id'],
"file_location_id": response_file_location.data['id'],
"box_notes": "",
"subjects_list": [
{
"document_name_id": response_name.data['id'],
"year": ["2020"],
"month": ["11"],
}
]
},
],
"process_number": "1",
"sender_unity": response_sender.data['id'],
"notes": "1",
"received_date": "2020-11-11",
"document_url": "https://www.t.com/",
"cover_sheet": "1",
"filer_user": "1",
"is_filed": "",
"is_eliminated": "",
"send_date": "2020-11-11",
"box_process_number": "1",
"unity_id": response_sender.data['id']
}
return data
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_box_archiving_relation_get_pk():
api_client = APIClient()
data = box_archiving()
data['process_number'] = "2"
response_box_archiving = api_client.post(
'/box-archiving/', data=data,
format='json')
assert response_box_archiving.status_code == 201
response_box_archiving_get = api_client.get(
'/box-archiving/')
assert response_box_archiving_get.status_code == 200
response = api_client.get('/box-archiving/{}'.format(
response_box_archiving_get.data[0]['id']))
assert response.status_code == 200
response_box_archiving = api_client.post(
'/box-archiving/', data=data,
format='json')
assert response_box_archiving.status_code == 400
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_box_archiving_relation_get_pk_except():
api_client = APIClient()
response = api_client.get('/box-archiving/4000')
assert response.status_code == 404
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_box_archiving_relation_post():
api_client = APIClient()
data = box_archiving()
data['process_number'] = "3"
response_box_archiving = api_client.post(
'/box-archiving/', data=data,
format='json')
assert response_box_archiving.status_code == 201
# Testes do relatório
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_report_get():
api_client = APIClient()
response_report_get = api_client.get(
'/report/?document_name_id=1&initial_date=2022-10-04&final_date=2022-11-04&only_permanents=true')
assert response_report_get.status_code == 200
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_status_report_get():
api_client = APIClient()
response_report_get = api_client.get(
'/status-report/?status=desarquivado')
assert response_report_get.status_code == 200
response_report_get = api_client.get(
'/status-report/?status=eliminado')
assert response_report_get.status_code == 200
response_report_get = api_client.get(
'/status-report/?status=arquivado')
assert response_report_get.status_code == 200
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_administrative_process_report_get():
api_client = APIClient()
response_report_get = api_client.get(
'/administrative-process-report/?sender_unity=1&initial_date=2000-05-10&final_date=2022-05-04')
assert response_report_get.status_code == 200
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_frequency_sheet_report_get():
api_client = APIClient()
response_report_get = api_client.get(
'/frequency-sheet-report/?cpf=12345678910')
assert response_report_get.status_code == 200
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_frequency_relation_report_get():
api_client = APIClient()
response_report_get = api_client.get(
'/frequency-relation-report/?sender_unity=1&reference_period=05-2022')
assert response_report_get.status_code == 200
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_box_archiving_report_get():
api_client = APIClient()
response_report_get = api_client.get(
'/box-archiving-report/?sender_unity=1')
assert response_report_get.status_code == 200
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_delete_box_archiving_relation():
api_client = APIClient()
data = box_archiving()
data['process_number'] = "3"
response_box_archiving = api_client.post(
'/box-archiving/', data=data,
format='json')
assert response_box_archiving.status_code == 201
response_box_archiving_get = api_client.get(
'/box-archiving/')
assert response_box_archiving_get.status_code == 200
response = api_client.delete('/box-archiving/{}'.format(
response_box_archiving_get.data[0]['id']))
assert response.status_code == 204
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_delete_box_archiving_relation_except():
api_client = APIClient()
response = api_client.delete('/box-archiving/10000000000')
assert response.status_code == 404
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_search():
api_client = APIClient()
data = box_archiving()
data['process_number'] = "4"
response_box_archiving = api_client.post(
'/box-archiving/', data=data,
format='json')
assert response_box_archiving.status_code == 201
response = api_client.get('/search/?filter={"process_number":"1"}')
assert response.status_code == 200
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_search_without_specific_fields_from_box_archiving():
api_client = APIClient()
data = box_archiving()
data_shelf = {
"number": 123,
}
response_shelf = api_client.post(
'/shelf/', data=data_shelf,
header={"Content-Type": "application/json"})
assert response_shelf.status_code == 201
data['shelf_id'] = response_shelf.data['id']
data["process_number"] = "5"
response_box_archiving = api_client.post(
'/box-archiving/', data=data,
format='json')
assert response_box_archiving.status_code == 201
response = api_client.get('/search/?filter={"shelf_id":123}')
assert response.status_code == 200
data_rack = {
"number": 123,
}
response_rack = api_client.post(
'/rack/', data=data_rack,
header={"Content-Type": "application/json"})
assert response_rack.status_code == 201
data['rack_id'] = response_rack.data['id']
data["process_number"] = "6"
response_box_archiving = api_client.post(
'/box-archiving/', data=data,
format='json')
assert response_box_archiving.status_code == 201
response = api_client.get('/search/?filter={"rack_id":123}')
assert response.status_code == 200
data_abbreviation = {
"number": "123",
"abbreviation": "a",
"name": "a",
"year": 2020
}
data["process_number"] = "7"
response_abbreviation = api_client.post(
'/box-abbreviation/', data=data_abbreviation,
header={"Content-Type": "application/json"})
assert response_rack.status_code == 201
data['abbreviation_id'] = response_abbreviation.data['id']
data["process_number"] = "8"
response_box_archiving = api_client.post(
'/box-archiving/', data=data,
format='json')
assert response_box_archiving.status_code == 201
response = api_client.get('/search/?filter={"abbreviation_id":"a"}')
assert response.status_code == 200
data_unity = {
"unity_name": "unity1",
"unity_abbreviation": "u1",
"administrative_bond": "a",
"bond_abbreviation": "a",
"municipality": "test",
"telephone_number": "a",
"notes": "1"
}
response_unity = api_client.post(
'/unity/', data=data_unity,
header={"Content-Type": "application/json"})
assert response_unity.status_code == 201
data['sender_unity'] = response_unity.data['id']
data["process_number"] = "9"
response_box_archiving = api_client.post(
'/box-archiving/', data=data,
format='json')
assert response_box_archiving.status_code == 201
response = api_client.get('/search/?filter={"sender_unity":"unity1"}')
assert response.status_code == 200
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_search_without_specific_fields_from_admin_process():
api_client = APIClient()
data_document_name = {
"document_name": "name",
"temporality": 2020
}
response_document_name = api_client.post(
'/document-name/', data=data_document_name,
header={"Content-Type": "application/json"})
assert response_document_name.status_code == 201
data_unity = {
"unity_name": "unity1",
"unity_abbreviation": "u1",
"administrative_bond": "a",
"bond_abbreviation": "a",
"municipality": "test",
"telephone_number": "a",
"notes": "1"
}
response_unity = api_client.post(
'/unity/', data=data_unity,
header={"Content-Type": "application/json"})
assert response_unity.status_code == 201
data = {
"process_number": "12345",
"notes": "1",
"filer_user": "1",
"notice_date": "2020-11-11",
"interested": "1",
"reference_month_year": "2020-11-11",
"sender_user": None,
"archiving_date": "2020-11-11",
"is_filed": False,
"is_eliminated": False,
"temporality_date": 2021,
"send_date": "2021-11-11",
"administrative_process_number": "1",
"document_name_id": None,
"sender_unity": None
}
data['document_name_id'] = response_document_name.data['id']
data['sender_unity'] = response_unity.data['id']
response_admin = api_client.post(
'/administrative-process/', data=data,
format='json')
assert response_admin.status_code == 201
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_search_without_specific_fields_from_frequency_sheet():
api_client = APIClient()
data_unity = {
"unity_name": "unity1",
"unity_abbreviation": "u1",
"administrative_bond": "a",
"bond_abbreviation": "a",
"municipality": "test",
"telephone_number": "a",
"notes": "1"
}
data_name = {
"document_name": "name",
"temporality": 2020
}
response_name = api_client.post(
'/document-name/', data=data_name,
header={"Content-Type": "application/json"})
assert response_name.status_code == 201
response_unity = api_client.post(
'/unity/', data=data_unity,
header={"Content-Type": "application/json"})
assert response_unity.status_code == 201
data_pw = {
"name": "person1",
"cpf": "1111111111",
}
response_pw = api_client.post(
'/public-worker/', data=data_pw,
header={"Content-Type": "application/json"})
assert response_pw.status_code == 201
data = {
"person_id": None,
"cpf": "1",
"role": "1",
"category": "1",
"workplace": None,
"municipal_area": "1",
"reference_period": "2020-11-11",
"notes": "1",
"process_number": "1",
"document_name_id": None,
"temporality_date": 2021
}
data['workplace'] = response_unity.data['id']
data['document_name_id'] = response_name.data['id']
data['person_id'] = response_pw.data['id']
response_sheet = api_client.post(
'/frequency-sheet/', data=data,
format='json')
assert response_sheet.status_code == 201
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_get_year_by_abbreviation():
api_client = APIClient()
response = api_client.get('/year-by-abbreviation/a')
assert response.status_code == 204
data_box = {
"number": 1,
"abbreviation": 'a',
"name": "abc",
"year": 2020
}
response_box = api_client.post(
'/box-abbreviation/', data=data_box,
header={"Content-Type": "application/json"})
assert response_box.status_code == 201
response = api_client.get('/year-by-abbreviation/a')
assert response.status_code == 200
@pytest.mark.django_db(transaction=False)
@override_settings(MIDDLEWARE=TESTS_MIDDLEWARE)
def test_get_number_by_year_and_abbreviation():
api_client = APIClient()
response = api_client.get('/number-by-year-abbrevation/a/2021')
assert response.status_code == 204
data_box = {
"number": 1,
"abbreviation": 'a',
"name": "abc",
"year": 2021
}
response_box = api_client.post(
'/box-abbreviation/', data=data_box,
header={"Content-Type": "application/json"})
assert response_box.status_code == 201
response = api_client.get('/number-by-year-abbrevation/a/2021')
assert response.status_code == 200
| 29.339429 | 105 | 0.61125 |
c06ae4f95baba104deb821fb1dc81f64e290af3c | 247 | py | Python | fastapi/tutorial_user_guide/testing/test_main.py | erictapia/devstacklab | 2997a620c3f4d29c3a526d561ec0cfb4ba0cd6b4 | [
"MIT"
] | null | null | null | fastapi/tutorial_user_guide/testing/test_main.py | erictapia/devstacklab | 2997a620c3f4d29c3a526d561ec0cfb4ba0cd6b4 | [
"MIT"
] | null | null | null | fastapi/tutorial_user_guide/testing/test_main.py | erictapia/devstacklab | 2997a620c3f4d29c3a526d561ec0cfb4ba0cd6b4 | [
"MIT"
] | null | null | null | from fastapi.testclient import TestClient
from .testing_main import app
client = TestClient(app)
def test_read_main():
response = client.get("/")
assert response.status_code == 200
assert response.json() == {"msg": "Hello World"}
| 19 | 52 | 0.704453 |
c21652805060ee804b719e5bd852d4d24e68da0e | 618 | py | Python | walbot.py | skonnov/walbot | 5b9347bf04d65f099e18ff343b80527f6849daca | [
"MIT"
] | null | null | null | walbot.py | skonnov/walbot | 5b9347bf04d65f099e18ff343b80527f6849daca | [
"MIT"
] | 33 | 2021-01-22T06:05:26.000Z | 2022-03-18T11:03:04.000Z | walbot.py | skonnov/walbot | 5b9347bf04d65f099e18ff343b80527f6849daca | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
WalBot
Check out `python walbot.py -h` for list of available options
"""
import importlib
import sys
def main():
"""WalBot launcher entrypoint"""
if not ((sys.version_info.major == 3 and sys.version_info.minor >= 6) and
(sys.version_info.major == 3 and sys.version_info.minor <= 8)):
print("Python {}.{}.{} is not supported. You need Python 3.6 - 3.8".format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro))
sys.exit(1)
importlib.import_module("src.launcher").Launcher()
if __name__ == "__main__":
main()
| 26.869565 | 84 | 0.650485 |
1e63947c5923a88910e7b0826d75b8b81caa0d50 | 994 | py | Python | crawling/get_content.py | buttercrab/naver-kin-chatbot | e7f8b53ca8a4671134215ff8ead344fa9b66f269 | [
"MIT"
] | 15 | 2019-11-08T07:54:26.000Z | 2021-03-14T02:39:16.000Z | crawling/get_content.py | jshyunbin/naver-kin-chatbot | e7f8b53ca8a4671134215ff8ead344fa9b66f269 | [
"MIT"
] | 1 | 2020-01-02T08:26:13.000Z | 2020-01-02T08:26:13.000Z | crawling/get_content.py | jshyunbin/naver-kin-chatbot | e7f8b53ca8a4671134215ff8ead344fa9b66f269 | [
"MIT"
] | 2 | 2019-11-08T10:55:19.000Z | 2020-02-16T06:33:19.000Z | import requests
from bs4 import BeautifulSoup
def _content_to_str(s):
a = s.find_all('p')
if len(a) == 0:
return [str(s.text).strip()]
else:
return [str(i.text).strip() for i in a]
def get_content(url):
"""
get content from kin.naver.com
:param url: domain must be kin.naver.com
:return: question and answers
-question: title and array of lines
-answers: array of answer that contain array of lines
"""
r = requests.get(url)
if not r.ok:
return ConnectionError
html = r.text
soup = BeautifulSoup(html, 'html.parser')
question = {
'title': _content_to_str(soup.find_all(class_='title')[0]),
'content': _content_to_str(soup.find_all(class_='c-heading__content')[0])
}
answer = [_content_to_str(ans) for ans in soup.find_all(class_='_endContentsText c-heading-answer__content-user')]
return question, answer
if __name__ == '__main__':
a, b = get_content('https://kin.naver.com/qna/detail.nhn?d1id=13&dirId=13020103&docId=178920712')
print(a)
print(b)
| 23.116279 | 115 | 0.709256 |
b57d4511a44d558b7b64c80575e25a7a1f26bdd7 | 2,867 | py | Python | migrations/versions/0151_refactor_letter_rates.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 41 | 2019-11-28T16:58:41.000Z | 2022-01-28T21:11:16.000Z | migrations/versions/0151_refactor_letter_rates.py | cds-snc/notification-api | b1c1064f291eb860b494c3fa65ac256ad70bf47c | [
"MIT"
] | 1,083 | 2019-07-08T12:57:24.000Z | 2022-03-08T18:53:40.000Z | migrations/versions/0151_refactor_letter_rates.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 9 | 2020-01-24T19:56:43.000Z | 2022-01-27T21:36:53.000Z | """
Revision ID: 0151_refactor_letter_rates
Revises: 0150_another_letter_org
Create Date: 2017-12-05 10:24:41.232128
"""
import uuid
from datetime import datetime
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
revision = "0151_refactor_letter_rates"
down_revision = "0150_another_letter_org"
def upgrade():
op.drop_table("letter_rate_details")
op.drop_table("letter_rates")
op.create_table(
"letter_rates",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("start_date", sa.DateTime(), nullable=False),
sa.Column("end_date", sa.DateTime(), nullable=True),
sa.Column("sheet_count", sa.Integer(), nullable=False),
sa.Column("rate", sa.Numeric(), nullable=False),
sa.Column("crown", sa.Boolean(), nullable=False),
sa.Column("post_class", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
start_date = datetime(2016, 3, 31, 23, 00, 00)
op.execute("insert into letter_rates values('{}', '{}', null, 1, 0.30, True, 'second')".format(str(uuid.uuid4()), start_date))
op.execute("insert into letter_rates values('{}', '{}', null, 2, 0.33, True, 'second')".format(str(uuid.uuid4()), start_date))
op.execute("insert into letter_rates values('{}', '{}', null, 3, 0.36, True, 'second')".format(str(uuid.uuid4()), start_date))
op.execute(
"insert into letter_rates values('{}', '{}', null, 1, 0.33, False, 'second')".format(str(uuid.uuid4()), start_date)
)
op.execute(
"insert into letter_rates values('{}', '{}', null, 2, 0.39, False, 'second')".format(str(uuid.uuid4()), start_date)
)
op.execute(
"insert into letter_rates values('{}', '{}', null, 3, 0.45, False, 'second')".format(str(uuid.uuid4()), start_date)
)
def downgrade():
op.drop_table("letter_rates")
op.create_table(
"letter_rates",
sa.Column("id", postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column("valid_from", postgresql.TIMESTAMP(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint("id", name="letter_rates_pkey"),
postgresql_ignore_search_path=False,
)
op.create_table(
"letter_rate_details",
sa.Column("id", postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column("letter_rate_id", postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column("page_total", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column("rate", sa.NUMERIC(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(
["letter_rate_id"],
["letter_rates.id"],
name="letter_rate_details_letter_rate_id_fkey",
),
sa.PrimaryKeyConstraint("id", name="letter_rate_details_pkey"),
)
| 39.819444 | 130 | 0.654343 |
927663a08effbdb3a6503f90ccbf207003a98988 | 8,557 | py | Python | kernelmethods/tests/test_numeric_kernels.py | vishalbelsare/kernelmethods | f49eae7057e6223fe1bae52ca4f308af807fe347 | [
"Apache-2.0"
] | 43 | 2019-08-07T21:23:31.000Z | 2022-01-21T18:12:13.000Z | kernelmethods/tests/test_numeric_kernels.py | eecheonwu/kernelmethods | a52c96faf2bc24d0daae6eefa5314db87d203136 | [
"Apache-2.0"
] | 6 | 2020-05-28T16:08:13.000Z | 2020-07-15T20:36:36.000Z | kernelmethods/tests/test_numeric_kernels.py | eecheonwu/kernelmethods | a52c96faf2bc24d0daae6eefa5314db87d203136 | [
"Apache-2.0"
] | 10 | 2019-08-28T23:28:27.000Z | 2021-09-05T06:53:20.000Z |
from numbers import Number
import numpy as np
from hypothesis import (HealthCheck, given, settings as hyp_settings, strategies)
from pytest import raises
from kernelmethods.base import KernelMatrix
from kernelmethods.numeric_kernels import (Chi2Kernel, DEFINED_KERNEL_FUNCS,
GaussianKernel, LaplacianKernel,
LinearKernel, PolyKernel, SigmoidKernel,
HadamardKernel)
from kernelmethods.operations import is_positive_semidefinite
from kernelmethods.utils import check_callable
default_feature_dim = 10
range_feature_dim = [10, 50]
range_num_samples = [50, 100]
range_polynomial_degree = [2, 10] # degree=1 is tested in LinearKernel()
np.random.seed(42)
# choosing skip_input_checks=False will speed up test runs
# default values for parameters
num_tests_psd_kernel = 3
def gen_random_array(dim):
"""To better control precision and type of floats"""
# TODO input sparse arrays for test
return np.random.rand(dim)
def gen_random_sample(num_samples, sample_dim):
"""To better control precision and type of floats"""
# TODO input sparse arrays for test
return np.random.rand(num_samples, sample_dim)
def _test_for_all_kernels(kernel, sample_dim, check_PSDness=True):
"""Common tests that all kernels must pass."""
x = gen_random_array(sample_dim)
y = gen_random_array(sample_dim)
try:
result = kernel(x, y)
except Exception:
raise RuntimeError('{} unable to calculate!\n'
' on x {}\n y{}'.format(kernel, x, y))
if not isinstance(result, Number):
raise ValueError('result {} of type {} is not a number!\n'
'x={}\ny={}\nkernel={}\n'
''.format(result, type(result), x, y, kernel))
if kernel(y, x) != result:
raise ValueError('{} is not symmetric!'
'x={}\n y={}\n kernel={}\n'
''.format(kernel.name, x, y, kernel))
if check_PSDness:
# ensuring it produces a PSD KM
kernel.is_psd()
def test_kernel_design():
"""
Every kernel must be
1. must have a name defined
2. must be callable with two samples
3. returns a number
"""
for kernel in DEFINED_KERNEL_FUNCS:
# must be callable with 2 args
check_callable(kernel, min_num_args=2)
if not hasattr(kernel, 'name'):
raise TypeError('{} does not have name attribute!'.format(kernel))
# only numeric data is accepted and other dtypes must raise an error
for non_num in ['string',
[object, object] ]:
with raises(ValueError):
_ = kernel(non_num, non_num)
def _test_func_is_valid_kernel(kernel, sample_dim, num_samples):
"""A func is a valid kernel if the kernel matrix generated by it is PSD.
Not including this in tests for all kernels to allow for non-PSD kernels in the future
"""
KM = KernelMatrix(kernel, name='TestKM')
KM.attach_to(gen_random_sample(num_samples, sample_dim))
is_psd = is_positive_semidefinite(KM.full, verbose=True)
if not is_psd:
raise ValueError('{} is not PSD'.format(str(KM)))
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.integers(range_num_samples[0], range_num_samples[1]),
strategies.integers(range_polynomial_degree[0], range_polynomial_degree[1]),
strategies.floats(min_value=0, max_value=1e3,
allow_nan=False, allow_infinity=False))
def test_polynomial_kernel(sample_dim, num_samples,
poly_degree, poly_intercept):
"""Tests specific for Polynomial kernel."""
poly = PolyKernel(degree=poly_degree, b=poly_intercept, skip_input_checks=False)
_test_for_all_kernels(poly, sample_dim)
_test_func_is_valid_kernel(poly, sample_dim, num_samples)
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.integers(range_num_samples[0], range_num_samples[1]),
strategies.floats(min_value=0, max_value=1e6,
allow_nan=False, allow_infinity=False))
def test_gaussian_kernel(sample_dim, num_samples, sigma):
"""Tests specific for Gaussian kernel."""
gaussian = GaussianKernel(sigma=sigma, skip_input_checks=False)
_test_for_all_kernels(gaussian, sample_dim)
_test_func_is_valid_kernel(gaussian, sample_dim, num_samples)
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.integers(range_num_samples[0], range_num_samples[1]))
def test_linear_kernel(sample_dim, num_samples):
"""Tests specific for Linear kernel."""
linear = LinearKernel(skip_input_checks=False)
_test_for_all_kernels(linear, sample_dim)
_test_func_is_valid_kernel(linear, sample_dim, num_samples)
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.integers(range_num_samples[0], range_num_samples[1]),
strategies.floats(min_value=0, max_value=1e6,
allow_nan=False, allow_infinity=False))
def test_laplacian_kernel(sample_dim, num_samples, gamma):
"""Tests specific for Laplacian kernel."""
laplacian = LaplacianKernel(gamma=gamma, skip_input_checks=False)
_test_for_all_kernels(laplacian, sample_dim)
_test_func_is_valid_kernel(laplacian, sample_dim, num_samples)
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.integers(range_num_samples[0], range_num_samples[1]),
strategies.floats(min_value=0, max_value=1e6,
allow_nan=False, allow_infinity=False),
strategies.floats(min_value=0, max_value=1e6,
allow_nan=False, allow_infinity=False)
)
def test_sigmoid_kernel(sample_dim, num_samples, gamma, offset):
"""Tests specific for sigmoid kernel."""
sigmoid = SigmoidKernel(gamma=gamma, offset=offset, skip_input_checks=False)
# sigmoid is not always PSD
_test_for_all_kernels(sigmoid, sample_dim, check_PSDness=False)
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.integers(range_num_samples[0], range_num_samples[1]),
strategies.floats(min_value=0, max_value=1e6,
allow_nan=False, allow_infinity=False))
def test_chi2_kernel(sample_dim, num_samples, gamma):
"""Tests specific for Laplacian kernel."""
chi2 = Chi2Kernel(gamma=gamma, skip_input_checks=False)
_test_for_all_kernels(chi2, sample_dim)
_test_func_is_valid_kernel(chi2, sample_dim, num_samples)
def test_chi2_kernel_misc():
"""Tests specific for Laplacian kernel."""
chi2 = Chi2Kernel()
x = gen_random_array(10)
y = gen_random_array(10)
neg_x = x - x.mean() # some values would be negative
pos_y = np.abs(y)
from kernelmethods.config import Chi2NegativeValuesException
with raises(Chi2NegativeValuesException):
chi2(neg_x, pos_y)
with raises(Chi2NegativeValuesException):
chi2(pos_y, neg_x)
@hyp_settings(max_examples=num_tests_psd_kernel, deadline=None,
suppress_health_check=HealthCheck.all())
@given(strategies.integers(range_feature_dim[0], range_feature_dim[1]),
strategies.floats(min_value=1, max_value=1e6,
allow_nan=False, allow_infinity=False))
def test_Hadamard_kernel(sample_dim, alpha):
"""Tests specific for Hadamard kernel."""
had = HadamardKernel(alpha=alpha, skip_input_checks=False)
_test_for_all_kernels(had, sample_dim, check_PSDness=False)
def test_Hadamard_kernel_misc():
"""Tests specific for Hadamard kernel."""
with raises(ValueError):
had = HadamardKernel(alpha=0)
| 37.696035 | 90 | 0.698142 |
b66d92f85551b595953727af4b904458253399f7 | 2,655 | py | Python | api/environments/identities/traits/serializers.py | ekampf/flagsmith | 35d1944de9763f02de5d5d1793d5b29b7fe28993 | [
"BSD-3-Clause"
] | null | null | null | api/environments/identities/traits/serializers.py | ekampf/flagsmith | 35d1944de9763f02de5d5d1793d5b29b7fe28993 | [
"BSD-3-Clause"
] | null | null | null | api/environments/identities/traits/serializers.py | ekampf/flagsmith | 35d1944de9763f02de5d5d1793d5b29b7fe28993 | [
"BSD-3-Clause"
] | null | null | null | from core.constants import INTEGER
from rest_framework import exceptions, serializers
from environments.identities.models import Identity
from environments.identities.serializers import IdentitySerializer
from environments.identities.traits.fields import TraitValueField
from environments.identities.traits.models import Trait
class TraitSerializerFull(serializers.ModelSerializer):
identity = IdentitySerializer()
trait_value = serializers.SerializerMethodField()
class Meta:
model = Trait
fields = "__all__"
@staticmethod
def get_trait_value(obj):
return obj.get_trait_value()
class TraitSerializerBasic(serializers.ModelSerializer):
trait_value = TraitValueField()
class Meta:
model = Trait
fields = ("id", "trait_key", "trait_value")
read_only_fields = ("id",)
class IncrementTraitValueSerializer(serializers.Serializer):
trait_key = serializers.CharField()
increment_by = serializers.IntegerField(write_only=True)
identifier = serializers.CharField()
trait_value = serializers.IntegerField(read_only=True)
def to_representation(self, instance):
return {
"trait_key": instance.trait_key,
"trait_value": instance.integer_value,
"identifier": instance.identity.identifier,
}
def create(self, validated_data):
trait, _ = Trait.objects.get_or_create(
**self._build_query_data(validated_data),
defaults=self._build_default_data(),
)
if trait.value_type != INTEGER:
raise exceptions.ValidationError("Trait is not an integer.")
trait.integer_value += validated_data.get("increment_by")
trait.save()
return trait
def _build_query_data(self, validated_data):
identity_data = {
"identifier": validated_data.get("identifier"),
"environment": self.context.get("request").environment,
}
identity, _ = Identity.objects.get_or_create(**identity_data)
return {"trait_key": validated_data.get("trait_key"), "identity": identity}
def _build_default_data(self):
return {"value_type": INTEGER, "integer_value": 0}
class TraitKeysSerializer(serializers.Serializer):
keys = serializers.ListSerializer(child=serializers.CharField())
class DeleteAllTraitKeysSerializer(serializers.Serializer):
key = serializers.CharField()
def delete(self):
environment = self.context.get("environment")
Trait.objects.filter(
identity__environment=environment, trait_key=self.validated_data.get("key")
).delete()
| 31.987952 | 87 | 0.700942 |
acaf2ce935c1b6b081563af7c15472454b7956a0 | 3,605 | py | Python | src/meltano/cli/ui.py | Bjenhamin4Alfredolvchenki/meltano | b8d1d812f4051b6334986fc6b447d23c4d0d5043 | [
"MIT"
] | 8 | 2020-06-16T22:29:54.000Z | 2021-06-04T11:57:57.000Z | src/meltano/cli/ui.py | aroder/meltano | b8d1d812f4051b6334986fc6b447d23c4d0d5043 | [
"MIT"
] | 13 | 2021-03-10T19:44:58.000Z | 2022-02-27T05:31:12.000Z | src/meltano/cli/ui.py | aroder/meltano | b8d1d812f4051b6334986fc6b447d23c4d0d5043 | [
"MIT"
] | 2 | 2020-06-16T22:29:59.000Z | 2020-11-04T05:47:50.000Z | import asyncio
import click
import logging
import os
import secrets
import signal
import subprocess
from click_default_group import DefaultGroup
from . import cli
from .params import project
from meltano.core.config_service import ConfigService
from meltano.core.plugin.error import PluginMissingError
from meltano.core.db import project_engine
from meltano.core.tracking import GoogleAnalyticsTracker
from meltano.core.utils import truthy
from meltano.core.migration_service import MigrationService
from meltano.api.workers import (
MeltanoCompilerWorker,
AirflowWorker,
APIWorker,
UIAvailableWorker,
)
logger = logging.getLogger(__name__)
def start_workers(workers):
def stop_all():
logger.info("Stopping all background workers...")
for worker in workers:
worker.stop()
# start all workers
for worker in workers:
worker.start()
return stop_all
@cli.group(cls=DefaultGroup, default="start", default_if_no_args=True)
@project(migrate=True)
@click.pass_context
def ui(ctx, project):
ctx.obj["project"] = project
@ui.command()
@click.option("--reload", is_flag=True, default=False)
@click.option(
"--bind-port",
default=5000,
help="Port to run webserver on",
envvar="MELTANO_API_PORT",
type=int,
)
@click.option(
"--bind",
default="0.0.0.0",
help="The hostname (or IP address) to bind on",
envvar="MELTANO_API_HOSTNAME",
)
@click.pass_context
def start(ctx, reload, bind_port, bind):
project = ctx.obj["project"]
tracker = GoogleAnalyticsTracker(project)
tracker.track_meltano_ui()
workers = []
if not truthy(os.getenv("MELTANO_DISABLE_AIRFLOW", False)):
try:
config_service = ConfigService(project)
config_service.find_plugin("airflow")
workers.append(AirflowWorker(project))
except PluginMissingError:
pass
try:
compiler_worker = MeltanoCompilerWorker(project)
compiler_worker.compiler.compile()
workers.append(compiler_worker)
except Exception as e:
logger.error(f"Initial compilation failed: {e}")
workers.append(UIAvailableWorker("http://localhost:{bind_port}"))
workers.append(
APIWorker(
project,
f"{bind}:{bind_port}",
reload=reload or os.getenv("FLASK_ENV") == "development",
)
)
cleanup = start_workers(workers)
def handle_terminate(signal, frame):
cleanup()
signal.signal(signal.SIGTERM, handle_terminate)
logger.info("All workers started.")
@ui.command()
@click.argument("server_name")
@click.option("--bits", default=256)
@click.pass_context
def setup(ctx, server_name, **flags):
"""
Generates the `ui.cfg` file to keep the server secrets keys.
"""
project = ctx.obj["project"]
ui_file_path = project.root_dir("ui.cfg")
if ui_file_path.exists():
logging.critical(
f"Found secrets in file `{ui_file_path}`, please delete this file to regenerate the secrets."
)
raise click.Abort()
generate_secret = lambda: secrets.token_hex(int(flags["bits"] / 8)) # in bytes
config = {
"SERVER_NAME": server_name,
"SECRET_KEY": generate_secret(),
"SECURITY_PASSWORD_SALT": generate_secret(),
}
# Flask doesn't support `configparser` or any other configuration format
# than plain Python files.
#
# Luckily the format is trivial to generate
with ui_file_path.open("w") as f:
for k, v in config.items():
f.write(f'{k} = "{v}"\n')
| 26.123188 | 105 | 0.674064 |
7b806f038e25402cd4b43c832df3eab85d332fdf | 746 | py | Python | speechtotext.py | DeViL3998/Voice | 1f87f3d917b487635af1c8e808bd2fcc1f975e4d | [
"MIT"
] | null | null | null | speechtotext.py | DeViL3998/Voice | 1f87f3d917b487635af1c8e808bd2fcc1f975e4d | [
"MIT"
] | 1 | 2019-12-11T16:18:47.000Z | 2019-12-11T16:18:47.000Z | speechtotext.py | DeViL3998/Voice | 1f87f3d917b487635af1c8e808bd2fcc1f975e4d | [
"MIT"
] | 1 | 2019-11-28T19:01:06.000Z | 2019-11-28T19:01:06.000Z | import speech_recognition as sr
import re
import math
from comparison import compare
from random_generator import generate_random_words, select_paragraph
### Recognizes the audio to generate list of words and call compare function to calculate success percentage ###
r=sr.Recognizer()
num=10
case=1
if(case==0):
given_input=generate_random_words(num)
else:
given_input=re.sub("[^\w]", " ", select_paragraph().lower()).split()
num=len(given_input)
with sr.Microphone() as source:
print("Speak Now")
audio=r.listen(source)
print("Time over, thanks")
try:
user_input=r.recognize_google(audio)
print(user_input)
print("SUCCESS PERCENTAGE: ")
print(math.floor(compare(user_input, given_input, num)))
except:
print("ERROR") | 25.724138 | 114 | 0.752011 |
a3a409a40c9fc0b7b5003594bcff9b0bf4d4ddd6 | 3,707 | py | Python | tests/models/validators/v3_1_0/jsd_b8104a50fc565ae9a756d6d0152e0e5b.py | CiscoISE/ciscoisesdk | 860b0fc7cc15d0c2a39c64608195a7ab3d5f4885 | [
"MIT"
] | 36 | 2021-05-18T16:24:19.000Z | 2022-03-05T13:44:41.000Z | tests/models/validators/v3_1_1/jsd_b8104a50fc565ae9a756d6d0152e0e5b.py | CiscoISE/ciscoisesdk | 860b0fc7cc15d0c2a39c64608195a7ab3d5f4885 | [
"MIT"
] | 15 | 2021-06-08T19:03:37.000Z | 2022-02-25T14:47:33.000Z | tests/models/validators/v3_1_0/jsd_b8104a50fc565ae9a756d6d0152e0e5b.py | CiscoISE/ciscoisesdk | 860b0fc7cc15d0c2a39c64608195a7ab3d5f4885 | [
"MIT"
] | 6 | 2021-06-10T09:32:01.000Z | 2022-01-12T08:34:39.000Z | # -*- coding: utf-8 -*-
"""Identity Services Engine getCSRById data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorB8104A50Fc565Ae9A756D6D0152E0E5B(object):
"""getCSRById request schema definition."""
def __init__(self):
super(JSONSchemaValidatorB8104A50Fc565Ae9A756D6D0152E0E5B, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"properties": {
"csrContents": {
"type": "string"
},
"friendlyName": {
"type": "string"
},
"groupTag": {
"type": "string"
},
"hostName": {
"type": "string"
},
"id": {
"type": "string"
},
"keySize": {
"type": "string"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
},
"signatureAlgorithm": {
"type": "string"
},
"subject": {
"type": "string"
},
"timeStamp": {
"type": "string"
},
"usedFor": {
"type": "string"
}
},
"type": "object"
},
"version": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 31.415254 | 83 | 0.499056 |
3c2a14f40336b0316fcc86eafba211adefe037ef | 707 | py | Python | tools/scripts/checkLicense.py | nicramage/Catch2 | 6f21a3609cea360846a0ca93be55877cca14c86d | [
"BSL-1.0"
] | 9,861 | 2017-11-03T13:11:42.000Z | 2022-03-31T23:50:03.000Z | tools/scripts/checkLicense.py | nicramage/Catch2 | 6f21a3609cea360846a0ca93be55877cca14c86d | [
"BSL-1.0"
] | 1,409 | 2017-11-03T13:42:48.000Z | 2022-03-31T14:46:42.000Z | tools/scripts/checkLicense.py | nicramage/Catch2 | 6f21a3609cea360846a0ca93be55877cca14c86d | [
"BSL-1.0"
] | 2,442 | 2017-11-03T14:48:53.000Z | 2022-03-31T23:07:09.000Z | #!/usr/bin/env python3
import os
import sys
def get_license():
with open("src/catch2/catch_all.hpp", "r") as f:
license = f.readlines()[0:7]
return license
def check_license(license):
failed = 0
base_dir = "src/catch2/"
# The _ represents the list of directories in base_dir
for root, _, files in os.walk(base_dir):
for file in files:
with open(root + "/" + file, "r") as f:
file_license = f.readlines()[0:7]
if file_license != license:
print("File %s does not have license" % file)
failed = 1
return failed
license = get_license()
status = check_license(license)
sys.exit(status)
| 21.424242 | 61 | 0.592645 |
247bbd0996a839623a003cc95640de6a5c0b54ce | 234 | py | Python | SlidingPuzzle/sample/python/SlidingPuzzle.py | ksomemo/Typical-MM | c15004eaa25e0abe316e15378a455b258b90aee7 | [
"MIT"
] | 5 | 2020-08-03T05:29:50.000Z | 2021-12-15T22:53:20.000Z | SlidingPuzzle/sample/python/SlidingPuzzle.py | ksomemo/Typical-MM | c15004eaa25e0abe316e15378a455b258b90aee7 | [
"MIT"
] | null | null | null | SlidingPuzzle/sample/python/SlidingPuzzle.py | ksomemo/Typical-MM | c15004eaa25e0abe316e15378a455b258b90aee7 | [
"MIT"
] | null | null | null | import sys
N,M = map(int, raw_input().split())
Board = []
for r in range(N):
p = map(int, raw_input().split())
Board.append(p);
print N * M
for r in range(N):
for c in range(M):
print str(r) + " " + str(c)
sys.stdout.flush()
| 14.625 | 35 | 0.594017 |
fb9ccbf5d6a84099c5f55782b860a6d30d1e5199 | 9,516 | py | Python | tests/utils_tests/test_module_loading.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | 3 | 2015-09-26T13:33:07.000Z | 2020-03-08T07:34:38.000Z | tests/utils_tests/test_module_loading.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/utils_tests/test_module_loading.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | 15 | 2016-01-08T14:28:41.000Z | 2019-04-19T08:33:31.000Z | import imp
import os
import sys
import unittest
from importlib import import_module
from zipimport import zipimporter
from django.test import SimpleTestCase, modify_settings
from django.test.utils import extend_sys_path
from django.utils import six
from django.utils._os import upath
from django.utils.module_loading import (
autodiscover_modules, import_string, module_has_submodule,
)
class DefaultLoader(unittest.TestCase):
def setUp(self):
sys.meta_path.insert(0, ProxyFinder())
def tearDown(self):
sys.meta_path.pop(0)
def test_loader(self):
"Normal module existence can be tested"
test_module = import_module('utils_tests.test_module')
test_no_submodule = import_module(
'utils_tests.test_no_submodule')
# An importable child
self.assertTrue(module_has_submodule(test_module, 'good_module'))
mod = import_module('utils_tests.test_module.good_module')
self.assertEqual(mod.content, 'Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(test_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'utils_tests.test_module.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(test_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'utils_tests.test_module.no_such_module')
# A child that doesn't exist, but is the name of a package on the path
self.assertFalse(module_has_submodule(test_module, 'django'))
self.assertRaises(ImportError, import_module, 'utils_tests.test_module.django')
# Don't be confused by caching of import misses
import types # NOQA: causes attempted import of utils_tests.types
self.assertFalse(module_has_submodule(sys.modules['utils_tests'], 'types'))
# A module which doesn't have a __path__ (so no submodules)
self.assertFalse(module_has_submodule(test_no_submodule, 'anything'))
self.assertRaises(ImportError, import_module,
'utils_tests.test_no_submodule.anything')
class EggLoader(unittest.TestCase):
def setUp(self):
self.egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))
def tearDown(self):
sys.path_importer_cache.clear()
sys.modules.pop('egg_module.sub1.sub2.bad_module', None)
sys.modules.pop('egg_module.sub1.sub2.good_module', None)
sys.modules.pop('egg_module.sub1.sub2', None)
sys.modules.pop('egg_module.sub1', None)
sys.modules.pop('egg_module.bad_module', None)
sys.modules.pop('egg_module.good_module', None)
sys.modules.pop('egg_module', None)
def test_shallow_loader(self):
"Module existence can be tested inside eggs"
egg_name = '%s/test_egg.egg' % self.egg_dir
with extend_sys_path(egg_name):
egg_module = import_module('egg_module')
# An importable child
self.assertTrue(module_has_submodule(egg_module, 'good_module'))
mod = import_module('egg_module.good_module')
self.assertEqual(mod.content, 'Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'egg_module.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'egg_module.no_such_module')
def test_deep_loader(self):
"Modules deep inside an egg can still be tested for existence"
egg_name = '%s/test_egg.egg' % self.egg_dir
with extend_sys_path(egg_name):
egg_module = import_module('egg_module.sub1.sub2')
# An importable child
self.assertTrue(module_has_submodule(egg_module, 'good_module'))
mod = import_module('egg_module.sub1.sub2.good_module')
self.assertEqual(mod.content, 'Deep Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.no_such_module')
class ModuleImportTestCase(unittest.TestCase):
def test_import_string(self):
cls = import_string('django.utils.module_loading.import_string')
self.assertEqual(cls, import_string)
# Test exceptions raised
self.assertRaises(ImportError, import_string, 'no_dots_in_path')
msg = 'Module "utils_tests" does not define a "unexistent" attribute'
with six.assertRaisesRegex(self, ImportError, msg):
import_string('utils_tests.unexistent')
@modify_settings(INSTALLED_APPS={'append': 'utils_tests.test_module'})
class AutodiscoverModulesTestCase(SimpleTestCase):
def tearDown(self):
sys.path_importer_cache.clear()
sys.modules.pop('utils_tests.test_module.another_bad_module', None)
sys.modules.pop('utils_tests.test_module.another_good_module', None)
sys.modules.pop('utils_tests.test_module.bad_module', None)
sys.modules.pop('utils_tests.test_module.good_module', None)
sys.modules.pop('utils_tests.test_module', None)
def test_autodiscover_modules_found(self):
autodiscover_modules('good_module')
def test_autodiscover_modules_not_found(self):
autodiscover_modules('missing_module')
def test_autodiscover_modules_found_but_bad_module(self):
with six.assertRaisesRegex(self, ImportError, "No module named '?a_package_name_that_does_not_exist'?"):
autodiscover_modules('bad_module')
def test_autodiscover_modules_several_one_bad_module(self):
with six.assertRaisesRegex(self, ImportError, "No module named '?a_package_name_that_does_not_exist'?"):
autodiscover_modules('good_module', 'bad_module')
def test_autodiscover_modules_several_found(self):
autodiscover_modules('good_module', 'another_good_module')
def test_autodiscover_modules_several_found_with_registry(self):
from .test_module import site
autodiscover_modules('good_module', 'another_good_module', register_to=site)
self.assertEqual(site._registry, {'lorem': 'ipsum'})
def test_validate_registry_keeps_intact(self):
from .test_module import site
with six.assertRaisesRegex(self, Exception, "Some random exception."):
autodiscover_modules('another_bad_module', register_to=site)
self.assertEqual(site._registry, {})
def test_validate_registry_resets_after_erroneous_module(self):
from .test_module import site
with six.assertRaisesRegex(self, Exception, "Some random exception."):
autodiscover_modules('another_good_module', 'another_bad_module', register_to=site)
self.assertEqual(site._registry, {'lorem': 'ipsum'})
def test_validate_registry_resets_after_missing_module(self):
from .test_module import site
autodiscover_modules('does_not_exist', 'another_good_module', 'does_not_exist2', register_to=site)
self.assertEqual(site._registry, {'lorem': 'ipsum'})
class ProxyFinder(object):
def __init__(self):
self._cache = {}
def find_module(self, fullname, path=None):
tail = fullname.rsplit('.', 1)[-1]
try:
fd, fn, info = imp.find_module(tail, path)
if fullname in self._cache:
old_fd = self._cache[fullname][0]
if old_fd:
old_fd.close()
self._cache[fullname] = (fd, fn, info)
except ImportError:
return None
else:
return self # this is a loader as well
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
fd, fn, info = self._cache[fullname]
try:
return imp.load_module(fullname, fd, fn, info)
finally:
if fd:
fd.close()
class TestFinder(object):
def __init__(self, *args, **kwargs):
self.importer = zipimporter(*args, **kwargs)
def find_module(self, path):
importer = self.importer.find_module(path)
if importer is None:
return
return TestLoader(importer)
class TestLoader(object):
def __init__(self, importer):
self.importer = importer
def load_module(self, name):
mod = self.importer.load_module(name)
mod.__loader__ = self
return mod
class CustomLoader(EggLoader):
"""The Custom Loader test is exactly the same as the EggLoader, but
it uses a custom defined Loader and Finder that is intentionally
split into two classes. Although the EggLoader combines both functions
into one class, this isn't required.
"""
def setUp(self):
super(CustomLoader, self).setUp()
sys.path_hooks.insert(0, TestFinder)
sys.path_importer_cache.clear()
def tearDown(self):
super(CustomLoader, self).tearDown()
sys.path_hooks.pop(0)
| 39.8159 | 112 | 0.690206 |
bb4bc72ce5af829333f1546d951404afd551291b | 1,258 | py | Python | python_modules/libraries/dagster-azure/dagster_azure/blob/utils.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 4,606 | 2018-06-21T17:45:20.000Z | 2022-03-31T23:39:42.000Z | python_modules/libraries/dagster-azure/dagster_azure/blob/utils.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 6,221 | 2018-06-12T04:36:01.000Z | 2022-03-31T21:43:05.000Z | python_modules/libraries/dagster-azure/dagster_azure/blob/utils.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 619 | 2018-08-22T22:43:09.000Z | 2022-03-31T22:48:06.000Z | import warnings
try:
# Centralise Azure imports here so we only need to warn in one place
from azure.core.exceptions import ResourceNotFoundError
from azure.storage.blob import (
generate_blob_sas,
BlobServiceClient,
)
except ImportError:
msg = (
"Could not import required Azure objects. This probably means you have an old version "
"of azure-storage-blob installed. dagster-azure requires azure-storage-blob~=12.0.0; "
"this conflicts with dagster-snowflake which requires azure-storage-blob<12.0.0 and is "
"incompatible. Please uninstall dagster-snowflake and reinstall dagster-azure to fix "
"this error."
)
warnings.warn(msg)
raise
def _create_url(storage_account, subdomain):
return "https://{}.{}.core.windows.net/".format(storage_account, subdomain)
def create_blob_client(storage_account, credential):
"""
Create a Blob Storage client.
"""
account_url = _create_url(storage_account, "blob")
if hasattr(credential, "account_key"):
credential = credential.account_key
return BlobServiceClient(account_url, credential)
__all__ = ["create_blob_client", "generate_blob_sas", "BlobServiceClient", "ResourceNotFoundError"]
| 34 | 99 | 0.714626 |
b4dbe54826caea908d02ad7b22944695f27ff280 | 112,213 | py | Python | quantum/plugins/nicira/nicira_nvp_plugin/QuantumPlugin.py | hyunsun/quantum | 40c4ec9495d05620a3cced4cc2bb98d7f6e52bbb | [
"Apache-2.0"
] | null | null | null | quantum/plugins/nicira/nicira_nvp_plugin/QuantumPlugin.py | hyunsun/quantum | 40c4ec9495d05620a3cced4cc2bb98d7f6e52bbb | [
"Apache-2.0"
] | null | null | null | quantum/plugins/nicira/nicira_nvp_plugin/QuantumPlugin.py | hyunsun/quantum | 40c4ec9495d05620a3cced4cc2bb98d7f6e52bbb | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Aaron Rosen, Nicira Networks, Inc.
import hashlib
import logging
from oslo.config import cfg
from sqlalchemy.orm import exc as sa_exc
import webob.exc
from quantum.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from quantum.api.v2 import attributes as attr
from quantum.api.v2 import base
from quantum.common import constants
from quantum import context as q_context
from quantum.common import exceptions as q_exc
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum.db import agents_db
from quantum.db import agentschedulers_db
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
from quantum.db import l3_db
from quantum.db import models_v2
from quantum.db import portsecurity_db
# NOTE: quota_db cannot be removed, it is for db model
from quantum.db import quota_db
from quantum.db import securitygroups_db
from quantum.extensions import l3
from quantum.extensions import portsecurity as psec
from quantum.extensions import providernet as pnet
from quantum.extensions import securitygroup as ext_sg
from quantum.openstack.common import importutils
from quantum.openstack.common import rpc
from quantum.plugins.nicira.nicira_nvp_plugin.common import (metadata_access
as nvp_meta)
from quantum.plugins.nicira.nicira_nvp_plugin.common import (securitygroups
as nvp_sec)
from quantum import policy
from quantum.plugins.nicira.nicira_nvp_plugin.common import config
from quantum.plugins.nicira.nicira_nvp_plugin.common import (exceptions
as nvp_exc)
from quantum.plugins.nicira.nicira_nvp_plugin.extensions import (nvp_networkgw
as networkgw)
from quantum.plugins.nicira.nicira_nvp_plugin.extensions import (nvp_qos
as ext_qos)
from quantum.plugins.nicira.nicira_nvp_plugin import nicira_db
from quantum.plugins.nicira.nicira_nvp_plugin import (nicira_networkgw_db
as networkgw_db)
from quantum.plugins.nicira.nicira_nvp_plugin import nicira_qos_db as qos_db
from quantum.plugins.nicira.nicira_nvp_plugin import nvp_cluster
from quantum.plugins.nicira.nicira_nvp_plugin.nvp_plugin_version import (
PLUGIN_VERSION)
from quantum.plugins.nicira.nicira_nvp_plugin import NvpApiClient
from quantum.plugins.nicira.nicira_nvp_plugin import nvplib
LOG = logging.getLogger("QuantumPlugin")
NVP_NOSNAT_RULES_ORDER = 10
NVP_FLOATINGIP_NAT_RULES_ORDER = 200
NVP_EXTGW_NAT_RULES_ORDER = 255
# Provider network extension - allowed network types for the NVP Plugin
class NetworkTypes:
""" Allowed provider network types for the NVP Plugin """
L3_EXT = 'l3_ext'
STT = 'stt'
GRE = 'gre'
FLAT = 'flat'
VLAN = 'vlan'
def parse_config():
"""Parse the supplied plugin configuration.
:param config: a ConfigParser() object encapsulating nvp.ini.
:returns: A tuple: (clusters, plugin_config). 'clusters' is a list of
NVPCluster objects, 'plugin_config' is a dictionary with plugin
parameters (currently only 'max_lp_per_bridged_ls').
"""
# Warn if metadata_dhcp_host_route option is specified
if cfg.CONF.metadata_dhcp_host_route:
LOG.warning(_("The metadata_dhcp_host_route is now obsolete, and "
"will have no effect. Instead, please set the "
"enable_isolated_metadata option in dhcp_agent.ini"))
nvp_conf = config.ClusterConfigOptions(cfg.CONF)
cluster_names = config.register_cluster_groups(nvp_conf)
nvp_conf.log_opt_values(LOG, logging.DEBUG)
clusters_options = []
for cluster_name in cluster_names:
clusters_options.append(
{'name': cluster_name,
'default_tz_uuid':
nvp_conf[cluster_name].default_tz_uuid,
'nvp_cluster_uuid':
nvp_conf[cluster_name].nvp_cluster_uuid,
'nova_zone_id':
nvp_conf[cluster_name].nova_zone_id,
'nvp_controller_connection':
nvp_conf[cluster_name].nvp_controller_connection,
'default_l3_gw_service_uuid':
nvp_conf[cluster_name].default_l3_gw_service_uuid,
'default_l2_gw_service_uuid':
nvp_conf[cluster_name].default_l2_gw_service_uuid,
'default_interface_name':
nvp_conf[cluster_name].default_interface_name})
LOG.debug(_("Cluster options:%s"), clusters_options)
# If no api_extensions_path is provided set the following
if not cfg.CONF.api_extensions_path:
cfg.CONF.set_override(
'api_extensions_path',
'quantum/plugins/nicira/nicira_nvp_plugin/extensions')
if (cfg.CONF.NVP.enable_metadata_access_network and
not cfg.CONF.allow_overlapping_ips):
LOG.warn(_("Overlapping IPs must be enabled in order to setup "
"the metadata access network. Metadata access in "
"routed mode will not work with this configuration"))
return cfg.CONF.NVP, clusters_options
def parse_clusters_opts(clusters_opts, concurrent_connections,
nvp_gen_timeout, default_cluster_name):
# Will store the first cluster in case is needed for default
# cluster assignment
clusters = {}
first_cluster = None
for c_opts in clusters_opts:
# Password is guaranteed to be the same across all controllers
# in the same NVP cluster.
cluster = nvp_cluster.NVPCluster(c_opts['name'])
try:
for ctrl_conn in c_opts['nvp_controller_connection']:
args = ctrl_conn.split(':')
try:
args.extend([c_opts['default_tz_uuid'],
c_opts['nvp_cluster_uuid'],
c_opts['nova_zone_id'],
c_opts['default_l3_gw_service_uuid'],
c_opts['default_l2_gw_service_uuid'],
c_opts['default_interface_name']])
cluster.add_controller(*args)
except Exception:
LOG.exception(_("Invalid connection parameters for "
"controller %(ctrl)s in "
"cluster %(cluster)s"),
{'ctrl': ctrl_conn,
'cluster': c_opts['name']})
raise nvp_exc.NvpInvalidConnection(
conn_params=ctrl_conn)
except TypeError:
msg = _("No controller connection specified in cluster "
"configuration. Please ensure at least a value for "
"'nvp_controller_connection' is specified in the "
"[CLUSTER:%s] section") % c_opts['name']
LOG.exception(msg)
raise nvp_exc.NvpPluginException(err_msg=msg)
api_providers = [(x['ip'], x['port'], True)
for x in cluster.controllers]
cluster.api_client = NvpApiClient.NVPApiHelper(
api_providers, cluster.user, cluster.password,
request_timeout=cluster.request_timeout,
http_timeout=cluster.http_timeout,
retries=cluster.retries,
redirects=cluster.redirects,
concurrent_connections=concurrent_connections,
nvp_gen_timeout=nvp_gen_timeout)
if not clusters:
first_cluster = cluster
clusters[c_opts['name']] = cluster
if default_cluster_name and default_cluster_name in clusters:
default_cluster = clusters[default_cluster_name]
else:
default_cluster = first_cluster
return (clusters, default_cluster)
class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
l3_db.L3_NAT_db_mixin,
portsecurity_db.PortSecurityDbMixin,
securitygroups_db.SecurityGroupDbMixin,
networkgw_db.NetworkGatewayMixin,
qos_db.NVPQoSDbMixin,
nvp_sec.NVPSecurityGroups,
nvp_meta.NvpMetadataAccess,
agentschedulers_db.AgentSchedulerDbMixin):
"""
NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network
functionality using NVP.
"""
supported_extension_aliases = ["provider", "quotas", "port-security",
"router", "security-group", "nvp-qos",
"network-gateway"]
__native_bulk_support = True
# Map nova zones to cluster for easy retrieval
novazone_cluster_map = {}
# Default controller cluster (to be used when nova zone id is unspecified)
default_cluster = None
provider_network_view = "extension:provider_network:view"
provider_network_set = "extension:provider_network:set"
port_security_enabled_create = "create_port:port_security_enabled"
port_security_enabled_update = "update_port:port_security_enabled"
def __init__(self, loglevel=None):
if loglevel:
logging.basicConfig(level=loglevel)
nvplib.LOG.setLevel(loglevel)
NvpApiClient.LOG.setLevel(loglevel)
# Routines for managing logical ports in NVP
self._port_drivers = {
'create': {l3_db.DEVICE_OWNER_ROUTER_GW:
self._nvp_create_ext_gw_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_create_port,
l3_db.DEVICE_OWNER_FLOATINGIP:
self._nvp_create_fip_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_create_router_port,
networkgw_db.DEVICE_OWNER_NET_GW_INTF:
self._nvp_create_l2_gw_port,
'default': self._nvp_create_port},
'delete': {l3_db.DEVICE_OWNER_ROUTER_GW:
self._nvp_delete_ext_gw_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_delete_router_port,
l3_db.DEVICE_OWNER_FLOATINGIP:
self._nvp_delete_fip_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_delete_port,
networkgw_db.DEVICE_OWNER_NET_GW_INTF:
self._nvp_delete_port,
'default': self._nvp_delete_port}
}
self.nvp_opts, self.clusters_opts = parse_config()
if not self.clusters_opts:
msg = _("No cluster specified in NVP plugin configuration. "
"Unable to start. Please ensure at least a "
"[CLUSTER:<cluster_name>] section is specified in "
"the NVP Plugin configuration file.")
LOG.error(msg)
raise nvp_exc.NvpPluginException(err_msg=msg)
self.clusters, self.default_cluster = parse_clusters_opts(
self.clusters_opts, self.nvp_opts.concurrent_connections,
self.nvp_opts.nvp_gen_timeout, self.nvp_opts.default_cluster_name)
db.configure_db()
# Extend the fault map
self._extend_fault_map()
# Set up RPC interface for DHCP agent
self.setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver)
# TODO(salvatore-orlando): Handle default gateways in multiple clusters
self._ensure_default_network_gateway()
def _ensure_default_network_gateway(self):
# Add the gw in the db as default, and unset any previous default
def_l2_gw_uuid = self.default_cluster.default_l2_gw_service_uuid
try:
ctx = q_context.get_admin_context()
self._unset_default_network_gateways(ctx)
if not def_l2_gw_uuid:
return
try:
def_network_gw = self._get_network_gateway(ctx,
def_l2_gw_uuid)
except sa_exc.NoResultFound:
# Create in DB only - don't go on NVP
def_gw_data = {'id': def_l2_gw_uuid,
'name': 'default L2 gateway service',
'devices': []}
gw_res_name = networkgw.RESOURCE_NAME.replace('-', '_')
def_network_gw = super(
NvpPluginV2, self).create_network_gateway(
ctx, {gw_res_name: def_gw_data})
# In any case set is as default
self._set_default_network_gateway(ctx, def_network_gw['id'])
except Exception:
# This is fatal - abort startup
LOG.exception(_("Unable to process default l2 gw service:%s"),
def_l2_gw_uuid)
raise
def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None):
""" Build ip_addresses data structure for logical router port
No need to perform validation on IPs - this has already been
done in the l3_db mixin class
"""
ip_addresses = []
for ip in fixed_ips:
if not subnet_ids or (ip['subnet_id'] in subnet_ids):
subnet = self._get_subnet(context, ip['subnet_id'])
ip_prefix = '%s/%s' % (ip['ip_address'],
subnet['cidr'].split('/')[1])
ip_addresses.append(ip_prefix)
return ip_addresses
def _create_and_attach_router_port(self, cluster, context,
router_id, port_data,
attachment_type, attachment,
attachment_vlan=None,
subnet_ids=None):
# Use a fake IP address if gateway port is not 'real'
ip_addresses = (port_data.get('fake_ext_gw') and
['0.0.0.0/31'] or
self._build_ip_address_list(context,
port_data['fixed_ips'],
subnet_ids))
try:
lrouter_port = nvplib.create_router_lport(
cluster, router_id, port_data.get('tenant_id', 'fake'),
port_data.get('id', 'fake'), port_data.get('name', 'fake'),
port_data.get('admin_state_up', True), ip_addresses)
LOG.debug(_("Created NVP router port:%s"), lrouter_port['uuid'])
except NvpApiClient.NvpApiException:
LOG.exception(_("Unable to create port on NVP logical router %s"),
router_id)
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to create logical router port for quantum "
"port id %(port_id)s on router %(router_id)s") %
{'port_id': port_data.get('id'), 'router_id': router_id})
self._update_router_port_attachment(cluster, context, router_id,
port_data, attachment_type,
attachment, attachment_vlan,
lrouter_port['uuid'])
return lrouter_port
def _update_router_port_attachment(self, cluster, context,
router_id, port_data,
attachment_type, attachment,
attachment_vlan=None,
nvp_router_port_id=None):
if not nvp_router_port_id:
nvp_router_port_id = self._find_router_gw_port(context, port_data)
try:
nvplib.plug_router_port_attachment(cluster, router_id,
nvp_router_port_id,
attachment,
attachment_type,
attachment_vlan)
LOG.debug(_("Attached %(att)s to NVP router port %(port)s"),
{'att': attachment, 'port': nvp_router_port_id})
except NvpApiClient.NvpApiException:
# Must remove NVP logical port
nvplib.delete_router_lport(cluster, router_id,
nvp_router_port_id)
LOG.exception(_("Unable to plug attachment in NVP logical "
"router port %(r_port_id)s, associated with "
"Quantum %(q_port_id)s"),
{'r_port_id': nvp_router_port_id,
'q_port_id': port_data.get('id')})
raise nvp_exc.NvpPluginException(
err_msg=(_("Unable to plug attachment in router port "
"%(r_port_id)s for quantum port id %(q_port_id)s "
"on router %(router_id)s") %
{'r_port_id': nvp_router_port_id,
'q_port_id': port_data.get('id'),
'router_id': router_id}))
def _get_port_by_device_id(self, context, device_id, device_owner):
""" Retrieve ports associated with a specific device id.
Used for retrieving all quantum ports attached to a given router.
"""
port_qry = context.session.query(models_v2.Port)
return port_qry.filter_by(
device_id=device_id,
device_owner=device_owner,).all()
def _find_router_subnets_cidrs(self, context, router_id):
""" Retrieve subnets attached to the specified router """
ports = self._get_port_by_device_id(context, router_id,
l3_db.DEVICE_OWNER_ROUTER_INTF)
# No need to check for overlapping CIDRs
cidrs = []
for port in ports:
for ip in port.get('fixed_ips', []):
cidrs.append(self._get_subnet(context,
ip.subnet_id).cidr)
return cidrs
def _nvp_find_lswitch_for_port(self, context, port_data):
network = self._get_network(context, port_data['network_id'])
network_binding = nicira_db.get_network_binding(
context.session, port_data['network_id'])
max_ports = self.nvp_opts.max_lp_per_overlay_ls
allow_extra_lswitches = False
if (network_binding and
network_binding.binding_type in (NetworkTypes.FLAT,
NetworkTypes.VLAN)):
max_ports = self.nvp_opts.max_lp_per_bridged_ls
allow_extra_lswitches = True
try:
cluster = self._find_target_cluster(port_data)
return self._handle_lswitch_selection(
cluster, network, network_binding, max_ports,
allow_extra_lswitches)
except NvpApiClient.NvpApiException:
err_desc = _(("An exception occured while selecting logical "
"switch for the port"))
LOG.exception(err_desc)
raise nvp_exc.NvpPluginException(err_msg=err_desc)
def _nvp_create_port_helper(self, cluster, ls_uuid, port_data,
do_port_security=True):
return nvplib.create_lport(cluster, ls_uuid, port_data['tenant_id'],
port_data['id'], port_data['name'],
port_data['device_id'],
port_data['admin_state_up'],
port_data['mac_address'],
port_data['fixed_ips'],
port_data[psec.PORTSECURITY],
port_data[ext_sg.SECURITYGROUPS],
port_data[ext_qos.QUEUE])
def _nvp_create_port(self, context, port_data):
""" Driver for creating a logical switch port on NVP platform """
# FIXME(salvatore-orlando): On the NVP platform we do not really have
# external networks. So if as user tries and create a "regular" VIF
# port on an external network we are unable to actually create.
# However, in order to not break unit tests, we need to still create
# the DB object and return success
if self._network_is_external(context, port_data['network_id']):
LOG.error(_("NVP plugin does not support regular VIF ports on "
"external networks. Port %s will be down."),
port_data['network_id'])
# No need to actually update the DB state - the default is down
return port_data
try:
cluster = self._find_target_cluster(port_data)
selected_lswitch = self._nvp_find_lswitch_for_port(context,
port_data)
lport = self._nvp_create_port_helper(cluster,
selected_lswitch['uuid'],
port_data,
True)
nicira_db.add_quantum_nvp_port_mapping(
context.session, port_data['id'], lport['uuid'])
if (not port_data['device_owner'] in
(l3_db.DEVICE_OWNER_ROUTER_GW,
l3_db.DEVICE_OWNER_ROUTER_INTF)):
nvplib.plug_interface(cluster, selected_lswitch['uuid'],
lport['uuid'], "VifAttachment",
port_data['id'])
LOG.debug(_("_nvp_create_port completed for port %(name)s "
"on network %(network_id)s. The new port id is "
"%(id)s."), port_data)
except NvpApiClient.NvpApiException:
msg = (_("An exception occured while plugging the interface "
"into network:%s") % port_data['network_id'])
LOG.exception(msg)
raise q_exc.QuantumException(message=msg)
def _nvp_delete_port(self, context, port_data):
# FIXME(salvatore-orlando): On the NVP platform we do not really have
# external networks. So deleting regular ports from external networks
# does not make sense. However we cannot raise as this would break
# unit tests.
if self._network_is_external(context, port_data['network_id']):
LOG.error(_("NVP plugin does not support regular VIF ports on "
"external networks. Port %s will be down."),
port_data['network_id'])
return
nvp_port_id = self._nvp_get_port_id(context, self.default_cluster,
port_data)
if not nvp_port_id:
LOG.debug(_("Port '%s' was already deleted on NVP platform"), id)
return
# TODO(bgh): if this is a bridged network and the lswitch we just got
# back will have zero ports after the delete we should garbage collect
# the lswitch.
try:
nvplib.delete_port(self.default_cluster,
port_data['network_id'],
nvp_port_id)
LOG.debug(_("_nvp_delete_port completed for port %(port_id)s "
"on network %(net_id)s"),
{'port_id': port_data['id'],
'net_id': port_data['network_id']})
except q_exc.NotFound:
LOG.warning(_("port %s not found in NVP"), port_data['id'])
def _nvp_delete_router_port(self, context, port_data):
# Delete logical router port
lrouter_id = port_data['device_id']
nvp_port_id = self._nvp_get_port_id(context, self.default_cluster,
port_data)
if not nvp_port_id:
raise q_exc.PortNotFound(port_id=port_data['id'])
try:
nvplib.delete_peer_router_lport(self.default_cluster,
lrouter_id,
port_data['network_id'],
nvp_port_id)
except (NvpApiClient.NvpApiException, NvpApiClient.ResourceNotFound):
# Do not raise because the issue might as well be that the
# router has already been deleted, so there would be nothing
# to do here
LOG.exception(_("Ignoring exception as this means the peer "
"for port '%s' has already been deleted."),
nvp_port_id)
# Delete logical switch port
self._nvp_delete_port(context, port_data)
def _nvp_create_router_port(self, context, port_data):
""" Driver for creating a switch port to be connected to a router """
# No router ports on external networks!
if self._network_is_external(context, port_data['network_id']):
raise nvp_exc.NvpPluginException(
err_msg=(_("It is not allowed to create router interface "
"ports on external networks as '%s'") %
port_data['network_id']))
try:
selected_lswitch = self._nvp_find_lswitch_for_port(context,
port_data)
cluster = self._find_target_cluster(port_data)
# Do not apply port security here!
lport = self._nvp_create_port_helper(cluster,
selected_lswitch['uuid'],
port_data,
False)
nicira_db.add_quantum_nvp_port_mapping(
context.session, port_data['id'], lport['uuid'])
LOG.debug(_("_nvp_create_port completed for port %(name)s on "
"network %(network_id)s. The new port id is %(id)s."),
port_data)
except Exception:
# failed to create port in NVP delete port from quantum_db
LOG.exception(_("An exception occured while plugging "
"the interface"))
super(NvpPluginV2, self).delete_port(context, port_data["id"])
raise
def _find_router_gw_port(self, context, port_data):
router_id = port_data['device_id']
cluster = self._find_target_cluster(port_data)
if not router_id:
raise q_exc.BadRequest(_("device_id field must be populated in "
"order to create an external gateway "
"port for network %s"),
port_data['network_id'])
lr_port = nvplib.find_router_gw_port(context, cluster, router_id)
if not lr_port:
raise nvp_exc.NvpPluginException(
err_msg=(_("The gateway port for the router %s "
"was not found on the NVP backend")
% router_id))
return lr_port
def _nvp_create_ext_gw_port(self, context, port_data):
""" Driver for creating an external gateway port on NVP platform """
# TODO(salvatore-orlando): Handle NVP resource
# rollback when something goes not quite as expected
lr_port = self._find_router_gw_port(context, port_data)
ip_addresses = self._build_ip_address_list(context,
port_data['fixed_ips'])
# This operation actually always updates a NVP logical port
# instead of creating one. This is because the gateway port
# is created at the same time as the NVP logical router, otherwise
# the fabric status of the NVP router will be down.
# admin_status should always be up for the gateway port
# regardless of what the user specifies in quantum
cluster = self._find_target_cluster(port_data)
router_id = port_data['device_id']
nvplib.update_router_lport(cluster,
router_id,
lr_port['uuid'],
port_data['tenant_id'],
port_data['id'],
port_data['name'],
True,
ip_addresses)
ext_network = self.get_network(context, port_data['network_id'])
if ext_network.get(pnet.NETWORK_TYPE) == NetworkTypes.L3_EXT:
# Update attachment
self._update_router_port_attachment(
cluster, context, router_id, port_data,
"L3GatewayAttachment",
ext_network[pnet.PHYSICAL_NETWORK],
ext_network[pnet.SEGMENTATION_ID],
lr_port['uuid'])
# Set the SNAT rule for each subnet (only first IP)
for cidr in self._find_router_subnets_cidrs(context, router_id):
nvplib.create_lrouter_snat_rule(
cluster, router_id,
ip_addresses[0].split('/')[0],
ip_addresses[0].split('/')[0],
order=NVP_EXTGW_NAT_RULES_ORDER,
match_criteria={'source_ip_addresses': cidr})
LOG.debug(_("_nvp_create_ext_gw_port completed on external network "
"%(ext_net_id)s, attached to router:%(router_id)s. "
"NVP port id is %(nvp_port_id)s"),
{'ext_net_id': port_data['network_id'],
'router_id': router_id,
'nvp_port_id': lr_port['uuid']})
def _nvp_delete_ext_gw_port(self, context, port_data):
lr_port = self._find_router_gw_port(context, port_data)
# TODO(salvatore-orlando): Handle NVP resource
# rollback when something goes not quite as expected
try:
# Delete is actually never a real delete, otherwise the NVP
# logical router will stop working
cluster = self._find_target_cluster(port_data)
router_id = port_data['device_id']
nvplib.update_router_lport(cluster,
router_id,
lr_port['uuid'],
port_data['tenant_id'],
port_data['id'],
port_data['name'],
True,
['0.0.0.0/31'])
# Delete the SNAT rule for each subnet
for cidr in self._find_router_subnets_cidrs(context, router_id):
nvplib.delete_nat_rules_by_match(
cluster, router_id, "SourceNatRule",
max_num_expected=1, min_num_expected=1,
source_ip_addresses=cidr)
# Reset attachment
self._update_router_port_attachment(
cluster, context, router_id, port_data,
"L3GatewayAttachment",
self.default_cluster.default_l3_gw_service_uuid,
nvp_router_port_id=lr_port['uuid'])
except NvpApiClient.ResourceNotFound:
raise nvp_exc.NvpPluginException(
err_msg=_("Logical router resource %s not found "
"on NVP platform") % router_id)
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to update logical router"
"on NVP Platform"))
LOG.debug(_("_nvp_delete_ext_gw_port completed on external network "
"%(ext_net_id)s, attached to router:%(router_id)s"),
{'ext_net_id': port_data['network_id'],
'router_id': router_id})
def _nvp_create_l2_gw_port(self, context, port_data):
""" Create a switch port, and attach it to a L2 gateway attachment """
# FIXME(salvatore-orlando): On the NVP platform we do not really have
# external networks. So if as user tries and create a "regular" VIF
# port on an external network we are unable to actually create.
# However, in order to not break unit tests, we need to still create
# the DB object and return success
if self._network_is_external(context, port_data['network_id']):
LOG.error(_("NVP plugin does not support regular VIF ports on "
"external networks. Port %s will be down."),
port_data['network_id'])
# No need to actually update the DB state - the default is down
return port_data
try:
cluster = self._find_target_cluster(port_data)
selected_lswitch = self._nvp_find_lswitch_for_port(context,
port_data)
lport = self._nvp_create_port_helper(cluster,
selected_lswitch['uuid'],
port_data,
True)
nicira_db.add_quantum_nvp_port_mapping(
context.session, port_data['id'], lport['uuid'])
nvplib.plug_l2_gw_service(
cluster,
port_data['network_id'],
lport['uuid'],
port_data['device_id'],
int(port_data.get('gw:segmentation_id') or 0))
LOG.debug(_("_nvp_create_port completed for port %(name)s "
"on network %(network_id)s. The new port id "
"is %(id)s."), port_data)
except NvpApiClient.NvpApiException:
# failed to create port in NVP delete port from quantum_db
msg = (_("An exception occured while plugging the gateway "
"interface into network:%s") % port_data['network_id'])
LOG.exception(msg)
super(NvpPluginV2, self).delete_port(context, port_data["id"])
raise q_exc.QuantumException(message=msg)
def _nvp_create_fip_port(self, context, port_data):
# As we do not create ports for floating IPs in NVP,
# this is a no-op driver
pass
def _nvp_delete_fip_port(self, context, port_data):
# As we do not create ports for floating IPs in NVP,
# this is a no-op driver
pass
def _nvp_get_port_id(self, context, cluster, quantum_port):
""" Return the NVP port uuid for a given quantum port.
First, look up the Quantum database. If not found, execute
a query on NVP platform as the mapping might be missing because
the port was created before upgrading to grizzly. """
nvp_port_id = nicira_db.get_nvp_port_id(context.session,
quantum_port['id'])
if nvp_port_id:
return nvp_port_id
# Perform a query to NVP and then update the DB
try:
nvp_port = nvplib.get_port_by_quantum_tag(
cluster,
quantum_port['network_id'],
quantum_port['id'])
if nvp_port:
nicira_db.add_quantum_nvp_port_mapping(
context.session,
quantum_port['id'],
nvp_port['uuid'])
return nvp_port['uuid']
except:
LOG.exception(_("Unable to find NVP uuid for Quantum port %s"),
quantum_port['id'])
def _extend_fault_map(self):
""" Extends the Quantum Fault Map
Exceptions specific to the NVP Plugin are mapped to standard
HTTP Exceptions
"""
base.FAULT_MAP.update({nvp_exc.NvpInvalidNovaZone:
webob.exc.HTTPBadRequest,
nvp_exc.NvpNoMorePortsException:
webob.exc.HTTPBadRequest})
def _novazone_to_cluster(self, novazone_id):
if novazone_id in self.novazone_cluster_map:
return self.novazone_cluster_map[novazone_id]
LOG.debug(_("Looking for nova zone: %s"), novazone_id)
for x in self.clusters:
LOG.debug(_("Looking for nova zone %(novazone_id)s in "
"cluster: %(x)s"), locals())
if x.zone == str(novazone_id):
self.novazone_cluster_map[x.zone] = x
return x
LOG.error(_("Unable to find cluster config entry for nova zone: %s"),
novazone_id)
raise nvp_exc.NvpInvalidNovaZone(nova_zone=novazone_id)
def _find_target_cluster(self, resource):
""" Return cluster where configuration should be applied
If the resource being configured has a paremeter expressing
the zone id (nova_id), then select corresponding cluster,
otherwise return default cluster.
"""
if 'nova_id' in resource:
return self._novazone_to_cluster(resource['nova_id'])
else:
return self.default_cluster
def _check_view_auth(self, context, resource, action):
return policy.check(context, action, resource)
def _enforce_set_auth(self, context, resource, action):
return policy.enforce(context, action, resource)
def _handle_provider_create(self, context, attrs):
# NOTE(salvatore-orlando): This method has been borrowed from
# the OpenvSwtich plugin, altough changed to match NVP specifics.
network_type = attrs.get(pnet.NETWORK_TYPE)
physical_network = attrs.get(pnet.PHYSICAL_NETWORK)
segmentation_id = attrs.get(pnet.SEGMENTATION_ID)
network_type_set = attr.is_attr_set(network_type)
physical_network_set = attr.is_attr_set(physical_network)
segmentation_id_set = attr.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return
# Authorize before exposing plugin details to client
self._enforce_set_auth(context, attrs, self.provider_network_set)
err_msg = None
if not network_type_set:
err_msg = _("%s required") % pnet.NETWORK_TYPE
elif network_type in (NetworkTypes.GRE, NetworkTypes.STT,
NetworkTypes.FLAT):
if segmentation_id_set:
err_msg = _("Segmentation ID cannot be specified with "
"flat network type")
elif network_type == NetworkTypes.VLAN:
if not segmentation_id_set:
err_msg = _("Segmentation ID must be specified with "
"vlan network type")
elif (segmentation_id_set and
(segmentation_id < 1 or segmentation_id > 4094)):
err_msg = _("%s out of range (1 to 4094)") % segmentation_id
else:
# Verify segment is not already allocated
binding = nicira_db.get_network_binding_by_vlanid(
context.session, segmentation_id)
if binding:
raise q_exc.VlanIdInUse(vlan_id=segmentation_id,
physical_network=physical_network)
elif network_type == NetworkTypes.L3_EXT:
if (segmentation_id_set and
(segmentation_id < 1 or segmentation_id > 4094)):
err_msg = _("%s out of range (1 to 4094)") % segmentation_id
else:
err_msg = _("%(net_type_param)s %(net_type_value)s not "
"supported") % {'net_type_param': pnet.NETWORK_TYPE,
'net_type_value': network_type}
if err_msg:
raise q_exc.InvalidInput(error_message=err_msg)
# TODO(salvatore-orlando): Validate tranport zone uuid
# which should be specified in physical_network
def _extend_network_dict_provider(self, context, network, binding=None):
if self._check_view_auth(context, network, self.provider_network_view):
if not binding:
binding = nicira_db.get_network_binding(context.session,
network['id'])
# With NVP plugin 'normal' overlay networks will have no binding
# TODO(salvatore-orlando) make sure users can specify a distinct
# phy_uuid as 'provider network' for STT net type
if binding:
network[pnet.NETWORK_TYPE] = binding.binding_type
network[pnet.PHYSICAL_NETWORK] = binding.phy_uuid
network[pnet.SEGMENTATION_ID] = binding.vlan_id
def _handle_lswitch_selection(self, cluster, network,
network_binding, max_ports,
allow_extra_lswitches):
lswitches = nvplib.get_lswitches(cluster, network.id)
try:
# TODO find main_ls too!
return [ls for ls in lswitches
if (ls['_relations']['LogicalSwitchStatus']
['lport_count'] < max_ports)].pop(0)
except IndexError:
# Too bad, no switch available
LOG.debug(_("No switch has available ports (%d checked)"),
len(lswitches))
if allow_extra_lswitches:
main_ls = [ls for ls in lswitches if ls['uuid'] == network.id]
tag_dict = dict((x['scope'], x['tag']) for x in main_ls[0]['tags'])
if 'multi_lswitch' not in tag_dict:
tags = main_ls[0]['tags']
tags.append({'tag': 'True', 'scope': 'multi_lswitch'})
nvplib.update_lswitch(cluster,
main_ls[0]['uuid'],
main_ls[0]['display_name'],
network['tenant_id'],
tags=tags)
selected_lswitch = nvplib.create_lswitch(
cluster, network.tenant_id,
"%s-ext-%s" % (network.name, len(lswitches)),
network_binding.binding_type,
network_binding.phy_uuid,
network_binding.vlan_id,
network.id)
return selected_lswitch
else:
LOG.error(_("Maximum number of logical ports reached for "
"logical network %s"), network.id)
raise nvp_exc.NvpNoMorePortsException(network=network.id)
def setup_rpc(self):
# RPC support for dhcp
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.dispatcher = NVPRpcCallbacks().create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def get_all_networks(self, tenant_id, **kwargs):
networks = []
for c in self.clusters:
networks.extend(nvplib.get_all_networks(c, tenant_id, networks))
LOG.debug(_("get_all_networks() completed for tenant "
"%(tenant_id)s: %(networks)s"), locals())
return networks
def create_network(self, context, network):
net_data = network['network']
tenant_id = self._get_tenant_id_for_create(context, net_data)
self._ensure_default_security_group(context, tenant_id)
# Process the provider network extension
self._handle_provider_create(context, net_data)
# Replace ATTR_NOT_SPECIFIED with None before sending to NVP
for key, value in network['network'].iteritems():
if value is attr.ATTR_NOT_SPECIFIED:
net_data[key] = None
# FIXME(arosen) implement admin_state_up = False in NVP
if net_data['admin_state_up'] is False:
LOG.warning(_("Network with admin_state_up=False are not yet "
"supported by this plugin. Ignoring setting for "
"network %s"), net_data.get('name', '<unknown>'))
target_cluster = self._find_target_cluster(net_data)
external = net_data.get(l3.EXTERNAL)
if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external):
nvp_binding_type = net_data.get(pnet.NETWORK_TYPE)
if nvp_binding_type in ('flat', 'vlan'):
nvp_binding_type = 'bridge'
lswitch = nvplib.create_lswitch(
target_cluster, tenant_id, net_data.get('name'),
nvp_binding_type,
net_data.get(pnet.PHYSICAL_NETWORK),
net_data.get(pnet.SEGMENTATION_ID),
shared=net_data.get(attr.SHARED))
net_data['id'] = lswitch['uuid']
with context.session.begin(subtransactions=True):
new_net = super(NvpPluginV2, self).create_network(context,
network)
# Ensure there's an id in net_data
net_data['id'] = new_net['id']
# Process port security extension
self._process_network_create_port_security(context, net_data)
# DB Operations for setting the network as external
self._process_l3_create(context, net_data, new_net['id'])
# Process QoS queue extension
if network['network'].get(ext_qos.QUEUE):
new_net[ext_qos.QUEUE] = network['network'][ext_qos.QUEUE]
# Raises if not found
self.get_qos_queue(context, new_net[ext_qos.QUEUE])
self._process_network_queue_mapping(context, new_net)
self._extend_network_qos_queue(context, new_net)
if net_data.get(pnet.NETWORK_TYPE):
net_binding = nicira_db.add_network_binding(
context.session, new_net['id'],
net_data.get(pnet.NETWORK_TYPE),
net_data.get(pnet.PHYSICAL_NETWORK),
net_data.get(pnet.SEGMENTATION_ID, 0))
self._extend_network_dict_provider(context, new_net,
net_binding)
self._extend_network_port_security_dict(context, new_net)
self._extend_network_dict_l3(context, new_net)
self.schedule_network(context, new_net)
return new_net
def delete_network(self, context, id):
external = self._network_is_external(context, id)
# Before deleting ports, ensure the peer of a NVP logical
# port with a patch attachment is removed too
port_filter = {'network_id': [id],
'device_owner': ['network:router_interface']}
router_iface_ports = self.get_ports(context, filters=port_filter)
for port in router_iface_ports:
nvp_port_id = self._nvp_get_port_id(
context, self.default_cluster, port)
if nvp_port_id:
port['nvp_port_id'] = nvp_port_id
else:
LOG.warning(_("A nvp lport identifier was not found for "
"quantum port '%s'"), port['id'])
super(NvpPluginV2, self).delete_network(context, id)
# clean up network owned ports
for port in router_iface_ports:
try:
if 'nvp_port_id' in port:
nvplib.delete_peer_router_lport(self.default_cluster,
port['device_id'],
port['network_id'],
port['nvp_port_id'])
except (TypeError, KeyError,
NvpApiClient.NvpApiException,
NvpApiClient.ResourceNotFound):
# Do not raise because the issue might as well be that the
# router has already been deleted, so there would be nothing
# to do here
LOG.warning(_("Ignoring exception as this means the peer for "
"port '%s' has already been deleted."),
nvp_port_id)
# Do not go to NVP for external networks
if not external:
try:
# FIXME(salvatore-orlando): Failures here might lead NVP
# and quantum state to diverge
pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
for (cluster, switches) in pairs:
nvplib.delete_networks(cluster, id, switches)
LOG.debug(_("delete_network completed for tenant: %s"),
context.tenant_id)
except q_exc.NotFound:
LOG.warning(_("Did not found lswitch %s in NVP"), id)
def _get_lswitch_cluster_pairs(self, netw_id, tenant_id):
"""Figure out the set of lswitches on each cluster that maps to this
network id"""
pairs = []
for c in self.clusters.itervalues():
lswitches = []
try:
results = nvplib.get_lswitches(c, netw_id)
lswitches.extend([ls['uuid'] for ls in results])
except q_exc.NetworkNotFound:
continue
pairs.append((c, lswitches))
if len(pairs) == 0:
raise q_exc.NetworkNotFound(net_id=netw_id)
LOG.debug(_("Returning pairs for network: %s"), pairs)
return pairs
def get_network(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
# goto to the plugin DB and fetch the network
network = self._get_network(context, id)
# if the network is external, do not go to NVP
if not self._network_is_external(context, id):
# verify the fabric status of the corresponding
# logical switch(es) in nvp
try:
# FIXME(salvatore-orlando): This is not going to work
# unless we store the nova_id in the database once we'll
# enable multiple clusters
cluster = self._find_target_cluster(network)
lswitches = nvplib.get_lswitches(cluster, id)
nvp_net_status = constants.NET_STATUS_ACTIVE
quantum_status = network.status
for lswitch in lswitches:
relations = lswitch.get('_relations')
if relations:
lswitch_status = relations.get(
'LogicalSwitchStatus')
# FIXME(salvatore-orlando): Being unable to fetch
# logical switch status should be an exception.
if (lswitch_status and
not lswitch_status.get('fabric_status',
None)):
nvp_net_status = constants.NET_STATUS_DOWN
break
LOG.debug(_("Current network status:%(nvp_net_status)s; "
"Status in Quantum DB:%(quantum_status)s"),
locals())
if nvp_net_status != network.status:
# update the network status
network.status = nvp_net_status
except q_exc.NotFound:
network.status = constants.NET_STATUS_ERROR
except Exception:
err_msg = _("Unable to get logical switches")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
# Don't do field selection here otherwise we won't be able
# to add provider networks fields
net_result = self._make_network_dict(network, None)
self._extend_network_dict_provider(context, net_result)
self._extend_network_port_security_dict(context, net_result)
self._extend_network_dict_l3(context, net_result)
self._extend_network_qos_queue(context, net_result)
return self._fields(net_result, fields)
def get_networks(self, context, filters=None, fields=None):
nvp_lswitches = {}
filters = filters or {}
with context.session.begin(subtransactions=True):
quantum_lswitches = (
super(NvpPluginV2, self).get_networks(context, filters))
for net in quantum_lswitches:
self._extend_network_dict_provider(context, net)
self._extend_network_port_security_dict(context, net)
self._extend_network_dict_l3(context, net)
self._extend_network_qos_queue(context, net)
tenant_ids = filters and filters.get('tenant_id') or None
filter_fmt = "&tag=%s&tag_scope=os_tid"
if context.is_admin and not tenant_ids:
tenant_filter = ""
else:
tenant_ids = tenant_ids or [context.tenant_id]
tenant_filter = ''.join(filter_fmt % tid for tid in tenant_ids)
lswitch_filters = "uuid,display_name,fabric_status,tags"
lswitch_url_path_1 = (
"/ws.v1/lswitch?fields=%s&relations=LogicalSwitchStatus%s"
% (lswitch_filters, tenant_filter))
lswitch_url_path_2 = nvplib._build_uri_path(
nvplib.LSWITCH_RESOURCE,
fields=lswitch_filters,
relations='LogicalSwitchStatus',
filters={'tag': 'true', 'tag_scope': 'shared'})
try:
for c in self.clusters.itervalues():
res = nvplib.get_all_query_pages(
lswitch_url_path_1, c)
nvp_lswitches.update(dict(
(ls['uuid'], ls) for ls in res))
# Issue a second query for fetching shared networks.
# We cannot unfortunately use just a single query because tags
# cannot be or-ed
res_shared = nvplib.get_all_query_pages(
lswitch_url_path_2, c)
nvp_lswitches.update(dict(
(ls['uuid'], ls) for ls in res_shared))
except Exception:
err_msg = _("Unable to get logical switches")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
if filters.get('id'):
nvp_lswitches = dict(
(uuid, ls) for (uuid, ls) in nvp_lswitches.iteritems()
if uuid in set(filters['id']))
for quantum_lswitch in quantum_lswitches:
# Skip external networks as they do not exist in NVP
if quantum_lswitch[l3.EXTERNAL]:
continue
elif quantum_lswitch['id'] not in nvp_lswitches:
LOG.warning(_("Logical Switch %s found in quantum database "
"but not in NVP."), quantum_lswitch["id"])
quantum_lswitch["status"] = constants.NET_STATUS_ERROR
else:
# TODO(salvatore-orlando): be careful about "extended"
# logical switches
ls = nvp_lswitches.pop(quantum_lswitch['id'])
if (ls["_relations"]["LogicalSwitchStatus"]["fabric_status"]):
quantum_lswitch["status"] = constants.NET_STATUS_ACTIVE
else:
quantum_lswitch["status"] = constants.NET_STATUS_DOWN
# do not make the case in which switches are found in NVP
# but not in Quantum catastrophic.
if len(nvp_lswitches):
LOG.warning(_("Found %s logical switches not bound "
"to Quantum networks. Quantum and NVP are "
"potentially out of sync"), len(nvp_lswitches))
LOG.debug(_("get_networks() completed for tenant %s"),
context.tenant_id)
if fields:
ret_fields = []
for quantum_lswitch in quantum_lswitches:
row = {}
for field in fields:
row[field] = quantum_lswitch[field]
ret_fields.append(row)
return ret_fields
return quantum_lswitches
def update_network(self, context, id, network):
if network["network"].get("admin_state_up"):
if network['network']["admin_state_up"] is False:
raise q_exc.NotImplementedError(_("admin_state_up=False "
"networks are not "
"supported."))
with context.session.begin(subtransactions=True):
net = super(NvpPluginV2, self).update_network(context, id, network)
if psec.PORTSECURITY in network['network']:
self._update_network_security_binding(
context, id, network['network'][psec.PORTSECURITY])
if network['network'].get(ext_qos.QUEUE):
net[ext_qos.QUEUE] = network['network'][ext_qos.QUEUE]
self._delete_network_queue_mapping(context, id)
self._process_network_queue_mapping(context, net)
self._extend_network_port_security_dict(context, net)
self._process_l3_update(context, network['network'], id)
self._extend_network_dict_provider(context, net)
self._extend_network_dict_l3(context, net)
self._extend_network_qos_queue(context, net)
return net
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
quantum_lports = super(NvpPluginV2, self).get_ports(
context, filters)
for quantum_lport in quantum_lports:
self._extend_port_port_security_dict(context, quantum_lport)
if (filters.get('network_id') and len(filters.get('network_id')) and
self._network_is_external(context, filters['network_id'][0])):
# Do not perform check on NVP platform
return quantum_lports
vm_filter = ""
tenant_filter = ""
# This is used when calling delete_network. Quantum checks to see if
# the network has any ports.
if filters.get("network_id"):
# FIXME (Aaron) If we get more than one network_id this won't work
lswitch = filters["network_id"][0]
else:
lswitch = "*"
if filters.get("device_id"):
for vm_id in filters.get("device_id"):
vm_filter = ("%stag_scope=vm_id&tag=%s&" % (vm_filter,
hashlib.sha1(vm_id).hexdigest()))
else:
vm_id = ""
if filters.get("tenant_id"):
for tenant in filters.get("tenant_id"):
tenant_filter = ("%stag_scope=os_tid&tag=%s&" %
(tenant_filter, tenant))
nvp_lports = {}
lport_fields_str = ("tags,admin_status_enabled,display_name,"
"fabric_status_up")
try:
for c in self.clusters.itervalues():
lport_query_path = (
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
"&relations=LogicalPortStatus" %
(lswitch, lport_fields_str, vm_filter, tenant_filter))
try:
ports = nvplib.get_all_query_pages(lport_query_path, c)
except q_exc.NotFound:
LOG.warn(_("Lswitch %s not found in NVP"), lswitch)
ports = None
if ports:
for port in ports:
for tag in port["tags"]:
if tag["scope"] == "q_port_id":
nvp_lports[tag["tag"]] = port
except Exception:
err_msg = _("Unable to get ports")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
lports = []
for quantum_lport in quantum_lports:
# if a quantum port is not found in NVP, this migth be because
# such port is not mapped to a logical switch - ie: floating ip
if quantum_lport['device_owner'] in (l3_db.DEVICE_OWNER_FLOATINGIP,
l3_db.DEVICE_OWNER_ROUTER_GW):
lports.append(quantum_lport)
continue
try:
quantum_lport["admin_state_up"] = (
nvp_lports[quantum_lport["id"]]["admin_status_enabled"])
if (nvp_lports[quantum_lport["id"]]
["_relations"]
["LogicalPortStatus"]
["fabric_status_up"]):
quantum_lport["status"] = constants.PORT_STATUS_ACTIVE
else:
quantum_lport["status"] = constants.PORT_STATUS_DOWN
del nvp_lports[quantum_lport["id"]]
except KeyError:
quantum_lport["status"] = constants.PORT_STATUS_ERROR
LOG.debug(_("Quantum logical port %s was not found on NVP"),
quantum_lport['id'])
lports.append(quantum_lport)
# do not make the case in which ports are found in NVP
# but not in Quantum catastrophic.
if len(nvp_lports):
LOG.warning(_("Found %s logical ports not bound "
"to Quantum ports. Quantum and NVP are "
"potentially out of sync"), len(nvp_lports))
if fields:
ret_fields = []
for lport in lports:
row = {}
for field in fields:
row[field] = lport[field]
ret_fields.append(row)
return ret_fields
return lports
def create_port(self, context, port):
# If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED
# then we pass the port to the policy engine. The reason why we don't
# pass the value to the policy engine when the port is
# ATTR_NOT_SPECIFIED is for the case where a port is created on a
# shared network that is not owned by the tenant.
# TODO(arosen) fix policy engine to do this for us automatically.
if attr.is_attr_set(port['port'].get(psec.PORTSECURITY)):
self._enforce_set_auth(context, port,
self.port_security_enabled_create)
port_data = port['port']
with context.session.begin(subtransactions=True):
# First we allocate port in quantum database
quantum_db = super(NvpPluginV2, self).create_port(context, port)
# Update fields obtained from quantum db (eg: MAC address)
port["port"].update(quantum_db)
# port security extension checks
(port_security, has_ip) = self._determine_port_security_and_has_ip(
context, port_data)
port_data[psec.PORTSECURITY] = port_security
self._process_port_security_create(context, port_data)
# security group extension checks
if port_security and has_ip:
self._ensure_default_security_group_on_port(context, port)
elif attr.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
port_data[ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._process_port_create_security_group(
context, port_data, port_data[ext_sg.SECURITYGROUPS])
# QoS extension checks
port_data[ext_qos.QUEUE] = self._check_for_queue_and_create(
context, port_data)
self._process_port_queue_mapping(context, port_data)
# provider networking extension checks
# Fetch the network and network binding from Quantum db
try:
port_data = port['port'].copy()
port_create_func = self._port_drivers['create'].get(
port_data['device_owner'],
self._port_drivers['create']['default'])
port_create_func(context, port_data)
except Exception as e:
# FIXME (arosen) or the plugin_interface call failed in which
# case we need to garbage collect the left over port in nvp.
err_msg = _("Unable to create port or set port attachment "
"in NVP.")
LOG.exception(err_msg)
raise e
LOG.debug(_("create_port completed on NVP for tenant "
"%(tenant_id)s: (%(id)s)"), port_data)
# remove since it will be added in extend based on policy
del port_data[ext_qos.QUEUE]
self._extend_port_port_security_dict(context, port_data)
self._extend_port_qos_queue(context, port_data)
net = self.get_network(context, port_data['network_id'])
self.schedule_network(context, net)
return port_data
def update_port(self, context, id, port):
if attr.is_attr_set(port['port'].get(psec.PORTSECURITY)):
self._enforce_set_auth(context, port,
self.port_security_enabled_update)
delete_security_groups = self._check_update_deletes_security_groups(
port)
has_security_groups = self._check_update_has_security_groups(port)
with context.session.begin(subtransactions=True):
ret_port = super(NvpPluginV2, self).update_port(
context, id, port)
# copy values over - except fixed_ips as
# they've alreaby been processed
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
tenant_id = self._get_tenant_id_for_create(context, ret_port)
# populate port_security setting
if psec.PORTSECURITY not in port['port']:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
context, id)
has_ip = self._ip_on_port(ret_port)
# checks if security groups were updated adding/modifying
# security groups, port security is set and port has ip
if not (has_ip and ret_port[psec.PORTSECURITY]):
if has_security_groups:
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Update did not have security groups passed in. Check
# that port does not have any security groups already on it.
filters = {'port_id': [id]}
security_groups = (
super(NvpPluginV2, self)._get_port_security_group_bindings(
context, filters)
)
if security_groups and not delete_security_groups:
raise psec.PortSecurityPortHasSecurityGroup()
if (delete_security_groups or has_security_groups):
# delete the port binding and read it with the new rules.
self._delete_port_security_group_bindings(context, id)
sgids = self._get_security_groups_on_port(context, port)
self._process_port_create_security_group(context, ret_port,
sgids)
if psec.PORTSECURITY in port['port']:
self._update_port_security_binding(
context, id, ret_port[psec.PORTSECURITY])
ret_port[ext_qos.QUEUE] = self._check_for_queue_and_create(
context, ret_port)
self._delete_port_queue_mapping(context, ret_port['id'])
self._process_port_queue_mapping(context, ret_port)
self._extend_port_port_security_dict(context, ret_port)
LOG.warn(_("Update port request: %s"), port)
nvp_port_id = self._nvp_get_port_id(
context, self.default_cluster, ret_port)
if nvp_port_id:
try:
nvplib.update_port(self.default_cluster,
ret_port['network_id'],
nvp_port_id, id, tenant_id,
ret_port['name'], ret_port['device_id'],
ret_port['admin_state_up'],
ret_port['mac_address'],
ret_port['fixed_ips'],
ret_port[psec.PORTSECURITY],
ret_port[ext_sg.SECURITYGROUPS],
ret_port[ext_qos.QUEUE])
# Update the port status from nvp. If we fail here hide it
# since the port was successfully updated but we were not
# able to retrieve the status.
ret_port['status'] = nvplib.get_port_status(
self.default_cluster, ret_port['network_id'],
nvp_port_id)
# FIXME(arosen) improve exception handling.
except Exception:
ret_port['status'] = constants.PORT_STATUS_ERROR
LOG.exception(_("Unable to update port id: %s."),
nvp_port_id)
# If nvp_port_id is not in database or in nvp put in error state.
else:
ret_port['status'] = constants.PORT_STATUS_ERROR
# remove since it will be added in extend based on policy
del ret_port[ext_qos.QUEUE]
self._extend_port_qos_queue(context, ret_port)
return ret_port
def delete_port(self, context, id, l3_port_check=True,
nw_gw_port_check=True):
"""
Deletes a port on a specified Virtual Network,
if the port contains a remote interface attachment,
the remote interface is first un-plugged and then the port
is deleted.
:returns: None
:raises: exception.PortInUse
:raises: exception.PortNotFound
:raises: exception.NetworkNotFound
"""
# if needed, check to see if this is a port owned by
# a l3 router. If so, we should prevent deletion here
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
quantum_db_port = self._get_port(context, id)
# Perform the same check for ports owned by layer-2 gateways
if nw_gw_port_check:
self.prevent_network_gateway_port_deletion(context,
quantum_db_port)
port_delete_func = self._port_drivers['delete'].get(
quantum_db_port.device_owner,
self._port_drivers['delete']['default'])
port_delete_func(context, quantum_db_port)
self.disassociate_floatingips(context, id)
with context.session.begin(subtransactions=True):
queue = self._get_port_queue_bindings(context, {'port_id': [id]})
if (cfg.CONF.metadata_dhcp_host_route and
quantum_db_port.device_owner == constants.DEVICE_OWNER_DHCP):
self._ensure_metadata_host_route(
context, quantum_db_port.fixed_ips[0], is_delete=True)
super(NvpPluginV2, self).delete_port(context, id)
# Delete qos queue if possible
if queue:
self.delete_qos_queue(context, queue[0]['queue_id'], False)
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
quantum_db_port = super(NvpPluginV2, self).get_port(context,
id, fields)
self._extend_port_port_security_dict(context, quantum_db_port)
self._extend_port_qos_queue(context, quantum_db_port)
if self._network_is_external(context,
quantum_db_port['network_id']):
return quantum_db_port
nvp_id = self._nvp_get_port_id(context, self.default_cluster,
quantum_db_port)
# If there's no nvp IP do not bother going to NVP and put
# the port in error state
if nvp_id:
#TODO: pass the appropriate cluster here
try:
port = nvplib.get_logical_port_status(
self.default_cluster, quantum_db_port['network_id'],
nvp_id)
quantum_db_port["admin_state_up"] = (
port["admin_status_enabled"])
if port["fabric_status_up"]:
quantum_db_port["status"] = (
constants.PORT_STATUS_ACTIVE)
else:
quantum_db_port["status"] = constants.PORT_STATUS_DOWN
except q_exc.NotFound:
quantum_db_port["status"] = constants.PORT_STATUS_ERROR
else:
quantum_db_port["status"] = constants.PORT_STATUS_ERROR
return quantum_db_port
def create_router(self, context, router):
# NOTE(salvatore-orlando): We completely override this method in
# order to be able to use the NVP ID as Quantum ID
# TODO(salvatore-orlando): Propose upstream patch for allowing
# 3rd parties to specify IDs as we do with l2 plugin
r = router['router']
has_gw_info = False
tenant_id = self._get_tenant_id_for_create(context, r)
# default value to set - nvp wants it (even if we don't have it)
nexthop = '1.1.1.1'
try:
# if external gateway info are set, then configure nexthop to
# default external gateway
if 'external_gateway_info' in r and r.get('external_gateway_info'):
has_gw_info = True
gw_info = r['external_gateway_info']
del r['external_gateway_info']
# The following DB read will be performed again when updating
# gateway info. This is not great, but still better than
# creating NVP router here and updating it later
network_id = (gw_info.get('network_id', None) if gw_info
else None)
if network_id:
ext_net = self._get_network(context, network_id)
if not self._network_is_external(context, network_id):
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise q_exc.BadRequest(resource='router', msg=msg)
if len(ext_net.subnets):
ext_subnet = ext_net.subnets[0]
nexthop = ext_subnet.gateway_ip
cluster = self._find_target_cluster(router)
lrouter = nvplib.create_lrouter(cluster, tenant_id,
router['router']['name'],
nexthop)
# Use NVP identfier for Quantum resource
router['router']['id'] = lrouter['uuid']
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to create logical router on NVP Platform"))
# Create the port here - and update it later if we have gw_info
self._create_and_attach_router_port(cluster,
context,
lrouter['uuid'],
{'fake_ext_gw': True},
"L3GatewayAttachment",
cluster.default_l3_gw_service_uuid)
with context.session.begin(subtransactions=True):
router_db = l3_db.Router(id=lrouter['uuid'],
tenant_id=tenant_id,
name=r['name'],
admin_state_up=r['admin_state_up'],
status="ACTIVE")
context.session.add(router_db)
if has_gw_info:
self._update_router_gw_info(context, router_db['id'], gw_info)
return self._make_router_dict(router_db)
def update_router(self, context, id, router):
try:
# Either nexthop is updated or should be kept as it was before
r = router['router']
nexthop = None
if 'external_gateway_info' in r and r.get('external_gateway_info'):
gw_info = r['external_gateway_info']
# The following DB read will be performed again when updating
# gateway info. This is not great, but still better than
# creating NVP router here and updating it later
network_id = (gw_info.get('network_id', None) if gw_info
else None)
if network_id:
ext_net = self._get_network(context, network_id)
if not self._network_is_external(context, network_id):
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise q_exc.BadRequest(resource='router', msg=msg)
if len(ext_net.subnets):
ext_subnet = ext_net.subnets[0]
nexthop = ext_subnet.gateway_ip
cluster = self._find_target_cluster(router)
nvplib.update_lrouter(cluster, id,
router['router'].get('name'), nexthop)
except NvpApiClient.ResourceNotFound:
raise nvp_exc.NvpPluginException(
err_msg=_("Logical router %s not found on NVP Platform") % id)
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=_("Unable to update logical router on NVP Platform"))
return super(NvpPluginV2, self).update_router(context, id, router)
def delete_router(self, context, id):
with context.session.begin(subtransactions=True):
# Ensure metadata access network is detached and destroyed
# This will also destroy relevant objects on NVP platform.
# NOTE(salvatore-orlando): A failure in this operation will
# cause the router delete operation to fail too.
self._handle_metadata_access_network(context, id, do_create=False)
super(NvpPluginV2, self).delete_router(context, id)
# If removal is successful in Quantum it should be so on
# the NVP platform too - otherwise the transaction should
# be automatically aborted
# TODO(salvatore-orlando): Extend the object models in order to
# allow an extra field for storing the cluster information
# together with the resource
try:
nvplib.delete_lrouter(self.default_cluster, id)
except q_exc.NotFound:
LOG.warning(_("Logical router '%s' not found "
"on NVP Platform") % id)
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=(_("Unable to delete logical router"
"on NVP Platform")))
def get_router(self, context, id, fields=None):
router = self._get_router(context, id)
try:
# FIXME(salvatore-orlando): We need to
# find the appropriate cluster!
cluster = self.default_cluster
try:
lrouter = nvplib.get_lrouter(cluster, id)
except q_exc.NotFound:
lrouter = {}
router_op_status = constants.NET_STATUS_ERROR
relations = lrouter.get('_relations')
if relations:
lrouter_status = relations.get('LogicalRouterStatus')
# FIXME(salvatore-orlando): Being unable to fetch the
# logical router status should be an exception.
if lrouter_status:
router_op_status = (lrouter_status.get('fabric_status')
and constants.NET_STATUS_ACTIVE or
constants.NET_STATUS_DOWN)
if router_op_status != router.status:
LOG.debug(_("Current router status:%(router_status)s;"
"Status in Quantum DB:%(db_router_status)s"),
{'router_status': router_op_status,
'db_router_status': router.status})
# update the router status
with context.session.begin(subtransactions=True):
router.status = router_op_status
except NvpApiClient.NvpApiException:
err_msg = _("Unable to get logical router")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
return self._make_router_dict(router, fields)
def get_routers(self, context, filters=None, fields=None):
router_query = self._apply_filters_to_query(
self._model_query(context, l3_db.Router),
l3_db.Router, filters)
routers = router_query.all()
# Query routers on NVP for updating operational status
if context.is_admin and not filters.get("tenant_id"):
tenant_id = None
elif 'tenant_id' in filters:
tenant_id = filters.get('tenant_id')[0]
del filters['tenant_id']
else:
tenant_id = context.tenant_id
try:
nvp_lrouters = nvplib.get_lrouters(self.default_cluster,
tenant_id,
fields)
except NvpApiClient.NvpApiException:
err_msg = _("Unable to get logical routers from NVP controller")
LOG.exception(err_msg)
raise nvp_exc.NvpPluginException(err_msg=err_msg)
nvp_lrouters_dict = {}
for nvp_lrouter in nvp_lrouters:
nvp_lrouters_dict[nvp_lrouter['uuid']] = nvp_lrouter
for router in routers:
nvp_lrouter = nvp_lrouters_dict.get(router['id'])
if nvp_lrouter:
if (nvp_lrouter["_relations"]["LogicalRouterStatus"]
["fabric_status"]):
router.status = constants.NET_STATUS_ACTIVE
else:
router.status = constants.NET_STATUS_DOWN
nvp_lrouters.remove(nvp_lrouter)
else:
router.status = constants.NET_STATUS_ERROR
# do not make the case in which routers are found in NVP
# but not in Quantum catastrophic.
if len(nvp_lrouters):
LOG.warning(_("Found %s logical routers not bound "
"to Quantum routers. Quantum and NVP are "
"potentially out of sync"), len(nvp_lrouters))
return [self._make_router_dict(router, fields)
for router in routers]
def add_router_interface(self, context, router_id, interface_info):
router_iface_info = super(NvpPluginV2, self).add_router_interface(
context, router_id, interface_info)
# If the above operation succeded interface_info contains a reference
# to a logical switch port
port_id = router_iface_info['port_id']
subnet_id = router_iface_info['subnet_id']
# Add port to the logical router as well
# TODO(salvatore-orlando): Identify the appropriate cluster, instead
# of always defaulting to self.default_cluster
cluster = self.default_cluster
# The owner of the router port is always the same as the owner of the
# router. Use tenant_id from the port instead of fetching more records
# from the Quantum database
port = self._get_port(context, port_id)
# Find the NVP port corresponding to quantum port_id
results = nvplib.query_lswitch_lports(
cluster, '*',
filters={'tag': port_id, 'tag_scope': 'q_port_id'})
if len(results):
ls_port = results[0]
else:
raise nvp_exc.NvpPluginException(
err_msg=(_("The port %(port_id)s, connected to the router "
"%(router_id)s was not found on the NVP backend.")
% locals()))
# Create logical router port and patch attachment
self._create_and_attach_router_port(
cluster, context, router_id, port,
"PatchAttachment", ls_port['uuid'],
subnet_ids=[subnet_id])
subnet = self._get_subnet(context, subnet_id)
# If there is an external gateway we need to configure the SNAT rule.
# Fetch router from DB
router = self._get_router(context, router_id)
gw_port = router.gw_port
if gw_port:
# There is a change gw_port might have multiple IPs
# In that case we will consider only the first one
if gw_port.get('fixed_ips'):
snat_ip = gw_port['fixed_ips'][0]['ip_address']
nvplib.create_lrouter_snat_rule(
cluster, router_id, snat_ip, snat_ip,
order=NVP_EXTGW_NAT_RULES_ORDER,
match_criteria={'source_ip_addresses': subnet['cidr']})
nvplib.create_lrouter_nosnat_rule(
cluster, router_id,
order=NVP_NOSNAT_RULES_ORDER,
match_criteria={'destination_ip_addresses': subnet['cidr']})
# Ensure the NVP logical router has a connection to a 'metadata access'
# network (with a proxy listening on its DHCP port), by creating it
# if needed.
self._handle_metadata_access_network(context, router_id)
LOG.debug(_("Add_router_interface completed for subnet:%(subnet_id)s "
"and router:%(router_id)s"),
{'subnet_id': subnet_id, 'router_id': router_id})
return router_iface_info
def remove_router_interface(self, context, router_id, interface_info):
# TODO(salvatore-orlando): Usual thing about cluster selection
cluster = self.default_cluster
# The code below is duplicated from base class, but comes handy
# as we need to retrieve the router port id before removing the port
subnet = None
subnet_id = None
if 'port_id' in interface_info:
port_id = interface_info['port_id']
# find subnet_id - it is need for removing the SNAT rule
port = self._get_port(context, port_id)
if port.get('fixed_ips'):
subnet_id = port['fixed_ips'][0]['subnet_id']
if not (port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF and
port['device_id'] == router_id):
raise l3.RouterInterfaceNotFound(router_id=router_id,
port_id=port_id)
elif 'subnet_id' in interface_info:
subnet_id = interface_info['subnet_id']
subnet = self._get_subnet(context, subnet_id)
rport_qry = context.session.query(models_v2.Port)
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
network_id=subnet['network_id']).all()
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
port_id = p['id']
break
else:
raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
subnet_id=subnet_id)
results = nvplib.query_lswitch_lports(
cluster, '*', relations="LogicalPortAttachment",
filters={'tag': port_id, 'tag_scope': 'q_port_id'})
lrouter_port_id = None
if len(results):
lport = results[0]
attachment_data = lport['_relations'].get('LogicalPortAttachment')
lrouter_port_id = (attachment_data and
attachment_data.get('peer_port_uuid'))
else:
LOG.warning(_("The port %(port_id)s, connected to the router "
"%(router_id)s was not found on the NVP backend"),
locals())
# Finally remove the data from the Quantum DB
# This will also destroy the port on the logical switch
super(NvpPluginV2, self).remove_router_interface(context,
router_id,
interface_info)
# Destroy router port (no need to unplug the attachment)
# FIXME(salvatore-orlando): In case of failures in the Quantum plugin
# this migth leave a dangling port. We perform the operation here
# to leverage validation performed in the base class
if not lrouter_port_id:
LOG.warning(_("Unable to find NVP logical router port for "
"Quantum port id:%s. Was this port ever paired "
"with a logical router?"), port_id)
return
# Ensure the connection to the 'metadata access network'
# is removed (with the network) if this the last subnet
# on the router
self._handle_metadata_access_network(context, router_id)
try:
if not subnet:
subnet = self._get_subnet(context, subnet_id)
router = self._get_router(context, router_id)
# Remove SNAT rule if external gateway is configured
if router.gw_port:
nvplib.delete_nat_rules_by_match(
cluster, router_id, "SourceNatRule",
max_num_expected=1, min_num_expected=1,
source_ip_addresses=subnet['cidr'])
# Relax the minimum expected number as the nosnat rules
# do not exist in 2.x deployments
nvplib.delete_nat_rules_by_match(
cluster, router_id, "NoSourceNatRule",
max_num_expected=1, min_num_expected=0,
destination_ip_addresses=subnet['cidr'])
nvplib.delete_router_lport(cluster, router_id, lrouter_port_id)
except NvpApiClient.ResourceNotFound:
raise nvp_exc.NvpPluginException(
err_msg=(_("Logical router port resource %s not found "
"on NVP platform"), lrouter_port_id))
except NvpApiClient.NvpApiException:
raise nvp_exc.NvpPluginException(
err_msg=(_("Unable to update logical router"
"on NVP Platform")))
def _retrieve_and_delete_nat_rules(self, floating_ip_address,
internal_ip, router_id,
min_num_rules_expected=0):
#TODO(salvatore-orlando): Multiple cluster support
cluster = self.default_cluster
try:
nvplib.delete_nat_rules_by_match(
cluster, router_id, "DestinationNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
destination_ip_addresses=floating_ip_address)
# Remove SNAT rule associated with the single fixed_ip
# to floating ip
nvplib.delete_nat_rules_by_match(
cluster, router_id, "SourceNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
source_ip_addresses=internal_ip)
except NvpApiClient.NvpApiException:
LOG.exception(_("An error occurred while removing NAT rules "
"on the NVP platform for floating ip:%s"),
floating_ip_address)
raise
except nvp_exc.NvpNatRuleMismatch:
# Do not surface to the user
LOG.warning(_("An incorrect number of matching NAT rules "
"was found on the NVP platform"))
def _remove_floatingip_address(self, context, fip_db):
# Remove floating IP address from logical router port
# Fetch logical port of router's external gateway
router_id = fip_db.router_id
nvp_gw_port_id = nvplib.find_router_gw_port(
context, self.default_cluster, router_id)['uuid']
ext_quantum_port_db = self._get_port(context.elevated(),
fip_db.floating_port_id)
nvp_floating_ips = self._build_ip_address_list(
context.elevated(), ext_quantum_port_db['fixed_ips'])
nvplib.update_lrouter_port_ips(self.default_cluster,
router_id,
nvp_gw_port_id,
ips_to_add=[],
ips_to_remove=nvp_floating_ips)
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
""" Update floating IP association data.
Overrides method from base class.
The method is augmented for creating NAT rules in the process.
"""
if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and
not ('port_id' in fip and fip['port_id'])):
msg = _("fixed_ip_address cannot be specified without a port_id")
raise q_exc.BadRequest(resource='floatingip', msg=msg)
port_id = internal_ip = router_id = None
if 'port_id' in fip and fip['port_id']:
port_qry = context.session.query(l3_db.FloatingIP)
try:
port_qry.filter_by(fixed_port_id=fip['port_id']).one()
raise l3.FloatingIPPortAlreadyAssociated(
port_id=fip['port_id'],
fip_id=floatingip_db['id'],
floating_ip_address=floatingip_db['floating_ip_address'],
fixed_ip=floatingip_db['fixed_ip_address'],
net_id=floatingip_db['floating_network_id'])
except sa_exc.NoResultFound:
pass
port_id, internal_ip, router_id = self.get_assoc_data(
context,
fip,
floatingip_db['floating_network_id'])
cluster = self._find_target_cluster(fip)
floating_ip = floatingip_db['floating_ip_address']
# Retrieve and delete existing NAT rules, if any
if not router_id and floatingip_db.get('fixed_port_id'):
# This happens if we're disassociating. Need to explicitly
# find the router serving this floating IP
tmp_fip = fip.copy()
tmp_fip['port_id'] = floatingip_db['fixed_port_id']
_pid, internal_ip, router_id = self.get_assoc_data(
context, tmp_fip, floatingip_db['floating_network_id'])
# If there's no association router_id will be None
if router_id:
self._retrieve_and_delete_nat_rules(floating_ip,
internal_ip,
router_id)
# Fetch logical port of router's external gateway
nvp_gw_port_id = nvplib.find_router_gw_port(
context, self.default_cluster, router_id)['uuid']
nvp_floating_ips = self._build_ip_address_list(
context.elevated(), external_port['fixed_ips'])
LOG.debug(_("Address list for NVP logical router "
"port:%s"), nvp_floating_ips)
# Re-create NAT rules only if a port id is specified
if 'port_id' in fip and fip['port_id']:
try:
# Create new NAT rules
nvplib.create_lrouter_dnat_rule(
cluster, router_id, internal_ip,
order=NVP_FLOATINGIP_NAT_RULES_ORDER,
match_criteria={'destination_ip_addresses':
floating_ip})
# setup snat rule such that src ip of a IP packet when
# using floating is the floating ip itself.
nvplib.create_lrouter_snat_rule(
cluster, router_id, floating_ip, floating_ip,
order=NVP_FLOATINGIP_NAT_RULES_ORDER,
match_criteria={'source_ip_addresses': internal_ip})
# Add Floating IP address to router_port
nvplib.update_lrouter_port_ips(cluster,
router_id,
nvp_gw_port_id,
ips_to_add=nvp_floating_ips,
ips_to_remove=[])
except NvpApiClient.NvpApiException:
LOG.exception(_("An error occurred while creating NAT "
"rules on the NVP platform for floating "
"ip:%(floating_ip)s mapped to "
"internal ip:%(internal_ip)s"),
{'floating_ip': floating_ip,
'internal_ip': internal_ip})
raise nvp_exc.NvpPluginException(err_msg=msg)
elif floatingip_db['fixed_port_id']:
# This is a disassociation.
# Remove floating IP address from logical router port
nvplib.update_lrouter_port_ips(cluster,
router_id,
nvp_gw_port_id,
ips_to_add=[],
ips_to_remove=nvp_floating_ips)
floatingip_db.update({'fixed_ip_address': internal_ip,
'fixed_port_id': port_id,
'router_id': router_id})
def delete_floatingip(self, context, id):
fip_db = self._get_floatingip(context, id)
# Check whether the floating ip is associated or not
if fip_db.fixed_port_id:
self._retrieve_and_delete_nat_rules(fip_db.floating_ip_address,
fip_db.fixed_ip_address,
fip_db.router_id,
min_num_rules_expected=1)
# Remove floating IP address from logical router port
self._remove_floatingip_address(context, fip_db)
return super(NvpPluginV2, self).delete_floatingip(context, id)
def disassociate_floatingips(self, context, port_id):
try:
fip_qry = context.session.query(l3_db.FloatingIP)
fip_db = fip_qry.filter_by(fixed_port_id=port_id).one()
self._retrieve_and_delete_nat_rules(fip_db.floating_ip_address,
fip_db.fixed_ip_address,
fip_db.router_id,
min_num_rules_expected=1)
self._remove_floatingip_address(context, fip_db)
except sa_exc.NoResultFound:
LOG.debug(_("The port '%s' is not associated with floating IPs"),
port_id)
except q_exc.NotFound:
LOG.warning(_("Nat rules not found in nvp for port: %s"), id)
super(NvpPluginV2, self).disassociate_floatingips(context, port_id)
def create_network_gateway(self, context, network_gateway):
""" Create a layer-2 network gateway
Create the gateway service on NVP platform and corresponding data
structures in Quantum datase
"""
# Need to re-do authZ checks here in order to avoid creation on NVP
gw_data = network_gateway[networkgw.RESOURCE_NAME.replace('-', '_')]
tenant_id = self._get_tenant_id_for_create(context, gw_data)
cluster = self._find_target_cluster(gw_data)
devices = gw_data['devices']
# Populate default physical network where not specified
for device in devices:
if not device.get('interface_name'):
device['interface_name'] = cluster.default_interface_name
try:
nvp_res = nvplib.create_l2_gw_service(cluster, tenant_id,
gw_data['name'],
devices)
nvp_uuid = nvp_res.get('uuid')
except Exception:
raise nvp_exc.NvpPluginException(
err_msg=_("Create_l2_gw_service did not "
"return an uuid for the newly "
"created resource:%s") % nvp_res)
gw_data['id'] = nvp_uuid
return super(NvpPluginV2, self).create_network_gateway(context,
network_gateway)
def delete_network_gateway(self, context, id):
""" Remove a layer-2 network gateway
Remove the gateway service from NVP platform and corresponding data
structures in Quantum datase
"""
with context.session.begin(subtransactions=True):
try:
super(NvpPluginV2, self).delete_network_gateway(context, id)
nvplib.delete_l2_gw_service(self.default_cluster, id)
except NvpApiClient.ResourceNotFound:
# Do not cause a 500 to be returned to the user if
# the corresponding NVP resource does not exist
LOG.exception(_("Unable to remove gateway service from "
"NVP plaform - the resource was not found"))
def _ensure_tenant_on_net_gateway(self, context, net_gateway):
if not net_gateway['tenant_id']:
net_gateway['tenant_id'] = context.tenant_id
return net_gateway
def get_network_gateway(self, context, id, fields=None):
# Ensure the tenant_id attribute is populated on the returned gateway
#return self._ensure_tenant_on_net_gateway(
# context, super(NvpPluginV2, self).get_network_gateway(
# context, id, fields))
return super(NvpPluginV2, self).get_network_gateway(context,
id, fields)
def get_network_gateways(self, context, filters=None, fields=None):
# Ensure the tenant_id attribute is populated on returned gateways
net_gateways = super(NvpPluginV2,
self).get_network_gateways(context,
filters,
fields)
return net_gateways
def get_plugin_version(self):
return PLUGIN_VERSION
def create_security_group(self, context, security_group, default_sg=False):
"""Create security group.
If default_sg is true that means a we are creating a default security
group and we don't need to check if one exists.
"""
s = security_group.get('security_group')
tenant_id = self._get_tenant_id_for_create(context, s)
if not default_sg:
self._ensure_default_security_group(context, tenant_id)
nvp_secgroup = nvplib.create_security_profile(self.default_cluster,
tenant_id, s)
security_group['security_group']['id'] = nvp_secgroup['uuid']
return super(NvpPluginV2, self).create_security_group(
context, security_group, default_sg)
def delete_security_group(self, context, security_group_id):
"""Delete a security group
:param security_group_id: security group rule to remove.
"""
with context.session.begin(subtransactions=True):
security_group = super(NvpPluginV2, self).get_security_group(
context, security_group_id)
if not security_group:
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
if security_group['name'] == 'default' and not context.is_admin:
raise ext_sg.SecurityGroupCannotRemoveDefault()
filters = {'security_group_id': [security_group['id']]}
if super(NvpPluginV2, self)._get_port_security_group_bindings(
context, filters):
raise ext_sg.SecurityGroupInUse(id=security_group['id'])
nvplib.delete_security_profile(self.default_cluster,
security_group['id'])
return super(NvpPluginV2, self).delete_security_group(
context, security_group_id)
def create_security_group_rule(self, context, security_group_rule):
"""create a single security group rule"""
bulk_rule = {'security_group_rules': [security_group_rule]}
return self.create_security_group_rule_bulk(context, bulk_rule)[0]
def create_security_group_rule_bulk(self, context, security_group_rule):
""" create security group rules
:param security_group_rule: list of rules to create
"""
s = security_group_rule.get('security_group_rules')
tenant_id = self._get_tenant_id_for_create(context, s)
# TODO(arosen) is there anyway we could avoid having the update of
# the security group rules in nvp outside of this transaction?
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(context, tenant_id)
security_group_id = self._validate_security_group_rules(
context, security_group_rule)
# Check to make sure security group exists
security_group = super(NvpPluginV2, self).get_security_group(
context, security_group_id)
if not security_group:
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
# Check for duplicate rules
self._check_for_duplicate_rules(context, s)
# gather all the existing security group rules since we need all
# of them to PUT to NVP.
combined_rules = self._merge_security_group_rules_with_current(
context, s, security_group['id'])
nvplib.update_security_group_rules(self.default_cluster,
security_group['id'],
combined_rules)
return super(
NvpPluginV2, self).create_security_group_rule_bulk_native(
context, security_group_rule)
def delete_security_group_rule(self, context, sgrid):
""" Delete a security group rule
:param sgrid: security group id to remove.
"""
with context.session.begin(subtransactions=True):
# determine security profile id
security_group_rule = (
super(NvpPluginV2, self).get_security_group_rule(
context, sgrid))
if not security_group_rule:
raise ext_sg.SecurityGroupRuleNotFound(id=sgrid)
sgid = security_group_rule['security_group_id']
current_rules = self._get_security_group_rules_nvp_format(
context, sgid, True)
self._remove_security_group_with_id_and_id_field(
current_rules, sgrid)
nvplib.update_security_group_rules(
self.default_cluster, sgid, current_rules)
return super(NvpPluginV2, self).delete_security_group_rule(context,
sgrid)
def create_qos_queue(self, context, qos_queue, check_policy=True):
q = qos_queue.get('qos_queue')
if check_policy:
self._enforce_set_auth(context, q, ext_qos.qos_queue_create)
self._validate_qos_queue(context, q)
q['id'] = nvplib.create_lqueue(self.default_cluster,
self._nvp_lqueue(q))
return super(NvpPluginV2, self).create_qos_queue(context, qos_queue)
def delete_qos_queue(self, context, id, raise_in_use=True):
filters = {'queue_id': [id]}
queues = self._get_port_queue_bindings(context, filters)
if queues:
if raise_in_use:
raise ext_qos.QueueInUseByPort()
else:
return
nvplib.delete_lqueue(self.default_cluster, id)
return super(NvpPluginV2, self).delete_qos_queue(context, id)
def get_qos_queue(self, context, id, fields=None):
if not self._check_view_auth(context, {}, ext_qos.qos_queue_get):
# don't want the user to find out that they guessed the right id
# so we raise not found if the policy.json file doesn't allow them
raise ext_qos.QueueNotFound(id=id)
return super(NvpPluginV2, self).get_qos_queue(context, id, fields)
def get_qos_queues(self, context, filters=None, fields=None):
if not self._check_view_auth(context, {'qos_queue': []},
ext_qos.qos_queue_list):
return []
return super(NvpPluginV2, self).get_qos_queues(context, filters,
fields)
| 49.65177 | 79 | 0.571886 |
c37193dbca0fcd27871b83de94648ce8c2523575 | 4,369 | py | Python | example/rgbd/rgbd_simple_server.py | yuki-inaho/zense_grpc_pywrapper | 43de6233117722b4a35abd165a0e83ff2238dd28 | [
"Apache-2.0"
] | null | null | null | example/rgbd/rgbd_simple_server.py | yuki-inaho/zense_grpc_pywrapper | 43de6233117722b4a35abd165a0e83ff2238dd28 | [
"Apache-2.0"
] | null | null | null | example/rgbd/rgbd_simple_server.py | yuki-inaho/zense_grpc_pywrapper | 43de6233117722b4a35abd165a0e83ff2238dd28 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import time
from datetime import datetime
import os
import sys
WORKING_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(WORKING_DIR, '../..'))
from zense_grpc_pywrapper import PicoZenseGRPCServerImpl
sys.path.append(os.path.join(WORKING_DIR, '../../scripts'))
import zense_pb2_grpc
import zense_pb2 as Image
import zense_pb2
import grpc
from utils.convert_pb_ndarray import ndarray_to_bytes
from concurrent import futures
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
import pdb
CFG_PARAM_PATH = "{}/../../cfg/camera.toml".format(WORKING_DIR).encode('utf-8')
CAM_KEY = "Camera0".encode('utf-8')
# It is necesarry this object is unique.
zense = PicoZenseGRPCServerImpl(CFG_PARAM_PATH, CAM_KEY, 0)
class ZenseServiceServicerRGBDIR(zense_pb2_grpc.ZenseServiceServicer):
def __init__(self):
global zense
self.is_rgb = zense.is_rgb
self.is_ir = zense.is_ir
self.is_wdr = zense.is_wdr
# TODO : check inifinite roop
# TODO : implement exception process
def SendRGBDImage(self, request, context):
global zense
if not self.is_rgb:
print("Current Configuration is not RGB enabled")
return zense_pb2.ImageRGBDReply()
while not zense.update():
pass
self.rgb_image = zense.rgb_image.copy()
self.depth_image = zense.depth_image_range1.copy()
self.depth_range = zense.get_depth_range
timestamp = self.get_timestamp_microseconds()
rgb_img_pb = zense_pb2.Image(
height=self.rgb_image.shape[0],
width=self.rgb_image.shape[1],
timestamp=timestamp,
channel=3,
data = ndarray_to_bytes(self.rgb_image)
)
depth_img_pb = zense_pb2.Image(
height=self.depth_image.shape[0],
width=self.depth_image.shape[1],
timestamp=timestamp,
channel=1,
depth_range=self.depth_range,
data=ndarray_to_bytes(self.depth_image)
)
return zense_pb2.ImageRGBDReply(image_rgb=rgb_img_pb, image_depth=depth_img_pb)
def SendRGBDIRImage(self, request, context):
global zense
if not self.is_rgb:
print("Current Configuration is not RGB enabled")
return zense_pb2.ImageRGBDReply()
while not zense.update():
pass
self.rgb_image = zense.rgb_image.copy()
self.ir_image = zense.ir_image.copy()
self.depth_image = zense.depth_image_range1.copy()
self.depth_range = zense.get_depth_range
timestamp = self.get_timestamp_microseconds()
rgb_img_pb = zense_pb2.Image(
height=self.rgb_image.shape[0],
width=self.rgb_image.shape[1],
timestamp=timestamp,
channel=3,
data = ndarray_to_bytes(self.rgb_image)
)
ir_img_pb = zense_pb2.Image(
height=self.ir_image.shape[0],
width=self.ir_image.shape[1],
timestamp=timestamp,
channel=1,
data = ndarray_to_bytes(self.ir_image)
)
depth_img_pb = zense_pb2.Image(
height=self.depth_image.shape[0],
width=self.depth_image.shape[1],
timestamp=timestamp,
channel=1,
depth_range=self.depth_range,
data=ndarray_to_bytes(self.depth_image)
)
return zense_pb2.ImageRGBDIRReply(image_rgb=rgb_img_pb, image_ir=ir_img_pb, image_depth=depth_img_pb)
def get_timestamp_microseconds(self):
return int((datetime.now() - datetime.utcfromtimestamp(0)).total_seconds() * 1e6)
def from_microseconds_to_timestamp(self, msec):
return datetime.utcfromtimestamp(msec*1e-6)
def serve(self):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
zense_pb2_grpc.add_ZenseServiceServicer_to_server(
ZenseServiceServicerRGBDIR(), server)
server.add_insecure_port('localhost:50051')
server.start()
print('Zense Server Launched')
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
def main():
servicer = ZenseServiceServicerRGBDIR()
servicer.serve()
if __name__ == '__main__':
main()
| 30.767606 | 109 | 0.648661 |
680f9b7be8c72dea79bbe9b89823af8e6f3772c7 | 320 | py | Python | src/test_tower.py | cullinap/eldorado | 6dfae6e87fc0cd8a01c27957b72ea68862fa61c9 | [
"MIT"
] | null | null | null | src/test_tower.py | cullinap/eldorado | 6dfae6e87fc0cd8a01c27957b72ea68862fa61c9 | [
"MIT"
] | 2 | 2021-07-24T16:12:07.000Z | 2021-07-29T13:46:16.000Z | src/test_tower.py | cullinap/powr | 6dfae6e87fc0cd8a01c27957b72ea68862fa61c9 | [
"MIT"
] | null | null | null | import unittest
from tower import TowerBuilder, Tangent, DeadEnd
class Test_Tower_Build(unittest.TestCase):
def test_tower_build(self):
t = TowerBuilder.createTower(Tangent, 1, 'short')
expected = 1
self.assertEqual(t.tower_number, expected)
if __name__ == '__main__':
unittest.main()
| 24.615385 | 57 | 0.709375 |
cc40c40cd8d61c16e47b8294878119c66edc9d09 | 13,382 | py | Python | docs/_ext/djangodocs.py | zou-zhicheng/django-source | b3da003539987dcb2f1b43c32c09160063afde07 | [
"PSF-2.0",
"BSD-3-Clause"
] | 4 | 2021-02-01T10:28:11.000Z | 2021-02-01T10:34:40.000Z | docs/_ext/djangodocs.py | zou-zhicheng/django-source | b3da003539987dcb2f1b43c32c09160063afde07 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | docs/_ext/djangodocs.py | zou-zhicheng/django-source | b3da003539987dcb2f1b43c32c09160063afde07 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | """
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.directives.code import CodeBlock
from sphinx.domains.std import Cmdoption
from sphinx.errors import ExtensionError
from sphinx.util import logging
from sphinx.util.console import bold
from sphinx.writers.html import HTMLTranslator
logger = logging.getLogger(__name__)
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_object_type(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_directive('django-admin-option', Cmdoption)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
app.set_translator('djangohtml', DjangoHTMLTranslator)
app.set_translator('json', DjangoHTMLTranslator)
app.add_node(
ConsoleNode,
html=(visit_console_html, None),
latex=(visit_console_dummy, depart_console_dummy),
man=(visit_console_dummy, depart_console_dummy),
text=(visit_console_dummy, depart_console_dummy),
texinfo=(visit_console_dummy, depart_console_dummy),
)
app.add_directive('console', ConsoleDirective)
app.connect('html-page-context', html_page_context_hook)
return {'parallel_read_safe': True}
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
try:
env.get_domain('changeset').note_changeset(node)
except ExtensionError:
# Sphinx < 1.8: Domain 'changeset' is not registered
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(HTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_desc_parameterlist(self, node):
self.body.append('(') # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum(isinstance(c, addnodes.desc_parameter) for c in node.children)
def depart_desc_parameterlist(self, node):
self.body.append(')')
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
super().visit_section(node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env.ref_context['std:program'] = command
title = "django-admin %s" % sig
signode += addnodes.desc_name(title, title)
return command
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super().finish()
logger.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [
n for ((t, n), (k, a)) in xrefs.items()
if t == "templatetag" and k == "ref/templates/builtins"
],
"tfilters": [
n for ((t, n), (k, a)) in xrefs.items()
if t == "templatefilter" and k == "ref/templates/builtins"
],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
class ConsoleNode(nodes.literal_block):
"""
Custom node to override the visit/depart event handlers at registration
time. Wrap a literal_block object and defer to it.
"""
tagname = 'ConsoleNode'
def __init__(self, litblk_obj):
self.wrapped = litblk_obj
def __getattr__(self, attr):
if attr == 'wrapped':
return self.__dict__.wrapped
return getattr(self.wrapped, attr)
def visit_console_dummy(self, node):
"""Defer to the corresponding parent's handler."""
self.visit_literal_block(node)
def depart_console_dummy(self, node):
"""Defer to the corresponding parent's handler."""
self.depart_literal_block(node)
def visit_console_html(self, node):
"""Generate HTML for the console directive."""
if self.builder.name in ('djangohtml', 'json') and node['win_console_text']:
# Put a mark on the document object signaling the fact the directive
# has been used on it.
self.document._console_directive_used_flag = True
uid = node['uid']
self.body.append('''\
<div class="console-block" id="console-block-%(id)s">
<input class="c-tab-unix" id="c-tab-%(id)s-unix" type="radio" name="console-%(id)s" checked>
<label for="c-tab-%(id)s-unix" title="Linux/macOS">/</label>
<input class="c-tab-win" id="c-tab-%(id)s-win" type="radio" name="console-%(id)s">
<label for="c-tab-%(id)s-win" title="Windows"></label>
<section class="c-content-unix" id="c-content-%(id)s-unix">\n''' % {'id': uid})
try:
self.visit_literal_block(node)
except nodes.SkipNode:
pass
self.body.append('</section>\n')
self.body.append('<section class="c-content-win" id="c-content-%(id)s-win">\n' % {'id': uid})
win_text = node['win_console_text']
highlight_args = {'force': True}
linenos = node.get('linenos', False)
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(
win_text, 'doscon', warn=warner, linenos=linenos, **highlight_args
)
self.body.append(highlighted)
self.body.append('</section>\n')
self.body.append('</div>\n')
raise nodes.SkipNode
else:
self.visit_literal_block(node)
class ConsoleDirective(CodeBlock):
"""
A reStructuredText directive which renders a two-tab code block in which
the second tab shows a Windows command line equivalent of the usual
Unix-oriented examples.
"""
required_arguments = 0
# The 'doscon' Pygments formatter needs a prompt like this. '>' alone
# won't do it because then it simply paints the whole command line as a
# grey comment with no highlighting at all.
WIN_PROMPT = r'...\> '
def run(self):
def args_to_win(cmdline):
changed = False
out = []
for token in cmdline.split():
if token[:2] == './':
token = token[2:]
changed = True
elif token[:2] == '~/':
token = '%HOMEPATH%\\' + token[2:]
changed = True
elif token == 'make':
token = 'make.bat'
changed = True
if '://' not in token and 'git' not in cmdline:
out.append(token.replace('/', '\\'))
changed = True
else:
out.append(token)
if changed:
return ' '.join(out)
return cmdline
def cmdline_to_win(line):
if line.startswith('# '):
return 'REM ' + args_to_win(line[2:])
if line.startswith('$ # '):
return 'REM ' + args_to_win(line[4:])
if line.startswith('$ ./manage.py'):
return 'manage.py ' + args_to_win(line[13:])
if line.startswith('$ manage.py'):
return 'manage.py ' + args_to_win(line[11:])
if line.startswith('$ ./runtests.py'):
return 'runtests.py ' + args_to_win(line[15:])
if line.startswith('$ ./'):
return args_to_win(line[4:])
if line.startswith('$ python3'):
return 'py ' + args_to_win(line[9:])
if line.startswith('$ python'):
return 'py ' + args_to_win(line[8:])
if line.startswith('$ '):
return args_to_win(line[2:])
return None
def code_block_to_win(content):
bchanged = False
lines = []
for line in content:
modline = cmdline_to_win(line)
if modline is None:
lines.append(line)
else:
lines.append(self.WIN_PROMPT + modline)
bchanged = True
if bchanged:
return ViewList(lines)
return None
env = self.state.document.settings.env
self.arguments = ['console']
lit_blk_obj = super().run()[0]
# Only do work when the djangohtml HTML Sphinx builder is being used,
# invoke the default behavior for the rest.
if env.app.builder.name not in ('djangohtml', 'json'):
return [lit_blk_obj]
lit_blk_obj['uid'] = '%s' % env.new_serialno('console')
# Only add the tabbed UI if there is actually a Windows-specific
# version of the CLI example.
win_content = code_block_to_win(self.content)
if win_content is None:
lit_blk_obj['win_console_text'] = None
else:
self.content = win_content
lit_blk_obj['win_console_text'] = super().run()[0].rawsource
# Replace the literal_node object returned by Sphinx's CodeBlock with
# the ConsoleNode wrapper.
return [ConsoleNode(lit_blk_obj)]
def html_page_context_hook(app, pagename, templatename, context, doctree):
# Put a bool on the context used to render the template. It's used to
# control inclusion of console-tabs.css and activation of the JavaScript.
# This way it's include only from HTML files rendered from reST files where
# the ConsoleDirective is used.
context['include_console_assets'] = getattr(doctree, '_console_directive_used_flag', False)
| 35.780749 | 102 | 0.606785 |
e1dea52ea99963cf28bfb5433b939ab67c896f83 | 15,371 | py | Python | api/team_directory/users/views.py | Hipo/team-directory | dfc999a6b464e88c020cfebe3b569b960b5d7e3d | [
"MIT"
] | null | null | null | api/team_directory/users/views.py | Hipo/team-directory | dfc999a6b464e88c020cfebe3b569b960b5d7e3d | [
"MIT"
] | 2 | 2020-06-05T23:54:21.000Z | 2020-09-30T12:50:16.000Z | api/team_directory/users/views.py | Hipo/team-directory | dfc999a6b464e88c020cfebe3b569b960b5d7e3d | [
"MIT"
] | null | null | null | import json
from datetime import timedelta
from django.db import transaction
from django.utils import timezone
import requests
from django.conf import settings
from rest_framework.authtoken.models import Token
from rest_framework.generics import RetrieveUpdateAPIView, GenericAPIView, RetrieveAPIView, ListAPIView, UpdateAPIView
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from slack import WebClient
from team_directory.projects.models import Project
from team_directory.questions.models import Answer
from .serializers import UserSerializer, UserMeSerializer, UserDetailSerializer, OneLinerSerializer
from .models import User, TEAM_CHOICES, OneLiner
AGORA_HELP_TEXT = f"""
From now on, every few days, I’ll be asking a quirky ice-breaker question about you. Your answers will be added to your Agora profile. The aim here is to get to know you in a way that regular social media cannot capture, and share it only with your coworkers.
If you want to keep answering these questions without waiting a few days, just message me with *question* and I’ll send a new one for you. If you don’t like a question, just type *skip* and you’ll see a new one.
From your <{settings.WEB_APP_PROFILE_URL}|Agora Profile> you can also add your past and current projects you’re working on at Hipo. This allows you to find help on your project by contacting a teammate who has worked on that project before. Alternatively, on Slack, simply write *add project* to add a project you’re working on. You can also write *remove project* to remove a current active project.
Your Agora Profile also has a space for some one-liners about you. One-liners are little tidbits about you that appear in your Agora profile. Think of it as a way of expressing little things that make you, well... you =)
To add a one-liner, simply write *add oneliner <a sentence about you>*. So a sample would be:
*add oneliner I own 15 cats*
To remove a one-liner, simply write *remove oneliner* and you’ll get a list of your oneliners to remove
You can write *cancel* at any time
To get this message again and learn your Slack commands, simply write *help*
"""
class UsersView(ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = UserSerializer
def get_queryset(self):
users = User.objects.all()
if self.request.GET.get("team"):
users = users.filter(team=self.request.GET.get("team"))
if self.request.GET.get("project"):
users = users.filter(project=self.request.GET.get("project"))
if self.request.GET.get("search"):
users = users.filter(first_name__icontains=self.request.GET.get("search"))
return users
class UserDetailView(RetrieveAPIView):
permission_classes = [IsAuthenticated]
serializer_class = UserDetailSerializer
queryset = User.objects.all()
class UserMeView(RetrieveUpdateAPIView):
permission_classes = [IsAuthenticated]
serializer_class = UserMeSerializer
def get_object(self, queryset=None):
return self.request.user
class OneLinerDetailView(UpdateAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = OneLinerSerializer
def get_queryset(self):
return OneLiner.objects.filter(user=self.request.user)
class AuthenticationView(GenericAPIView):
permission_classes = [AllowAny]
def post(self, request, *args, **kwargs):
code = request.data["code"]
params = {
'client_id': settings.SLACK_CLIENT_ID,
'client_secret': settings.SLACK_CLIENT_SECRET,
'code': code,
'redirect_uri': settings.SLACK_REDIRECT_URL,
}
response_data = requests.get("https://slack.com/api/oauth.v2.access", params=params).json()
assert response_data["app_id"] == "ANVQNHT4N"
access_token = response_data["authed_user"]["access_token"]
slack_user_id = response_data["authed_user"]["id"]
user = User.objects.filter(slack_user_id=slack_user_id).first()
if not user:
user_data = requests.get(
url="https://slack.com/api/users.info",
params={
'token': access_token,
'user': slack_user_id,
'include_local': 'true'
}
).json()["user"]
user = User.objects.create(
slack_user_id=user_data["id"],
slack_access_token=access_token,
slack_username=user_data["name"],
email=user_data["profile"]["email"],
first_name=user_data["profile"].get("first_name") or user_data["name"],
last_name=user_data["profile"].get("last_name") or "",
image=user_data["profile"].get("image_original") or "",
timezone=user_data["tz"],
agora_initialized=True,
)
else:
user.slack_access_token = access_token
user.agora_initialized = True
user.save()
if not user.agora_welcome_message_sent:
message = f"""
Welcome back, and nice to meet you {user.first_name}!
Slack already told me your name and gave me your avatar. If you want to change any of those, you can change them in your <{settings.WEB_APP_PROFILE_URL}|Slack Profile> and it’ll get updated automatically on Agora.
What team are you a part of at Hipo? This helps teammates find you more easily.
"""
attachments = [
{
"callback_id": "set_team",
"fallback": "What team are you a part of at Hipo? This helps teammates find you more easily.",
"actions": [
{
"type": "select",
"name": "set_team",
"text": "What team are you a part of at Hipo? This helps teammates find you more easily.",
"options": [
{
"text": c[1],
"value": c[0],
}
for c in TEAM_CHOICES
]
}
]
}
]
user.send_slack_message(text=message, attachments=attachments)
user.agora_welcome_message_sent = True
user.save()
token, created = Token.objects.get_or_create(user=user)
data = UserSerializer(user, context=self.get_serializer_context()).data
data["token"] = token.key
return Response(data)
class SlackInteractionsView(GenericAPIView):
permission_classes = [AllowAny]
def post(self, request, *args, **kwargs):
payload = json.loads(request.POST["payload"])
user = User.objects.get(slack_user_id=payload["user"]["id"])
had_team = user.team
if payload["callback_id"] == "set_team":
user.team = payload["actions"][0]["selected_options"][0]["value"]
user.save()
if not had_team:
text = f"""
You’re now a proper resident of the Agora! Here’s what your <{settings.WEB_APP_PROFILE_URL}|Agora Profile> looks like.
I’ll also add this link to your Slack bio, so teammates can access it more easily. You’ll also see that everyone’s Slack bio is updated with their Agora profile.
First of all, please visit your <{settings.WEB_APP_PROFILE_URL}|Agora Profile> and add your birthday, phone number etc, so that teammates can find you (and celebrate your birthday!)
{AGORA_HELP_TEXT}
"""
slack_client = WebClient(settings.SLACK_BOT_USER_ACCESS_TOKEN)
slack_client.chat_postMessage(channel=user.slack_user_id, text=text, as_user=True)
elif payload["callback_id"] == "add_project":
project = Project.objects.get(pk=payload["actions"][0]["selected_options"][0]["value"])
project.users.add(user)
requests.post(payload["response_url"], data=json.dumps({
"text": f"Project `{project.name}` is added.",
"replace_original": True
}))
elif payload["callback_id"] == "remove_project":
project = Project.objects.get(pk=payload["actions"][0]["selected_options"][0]["value"])
project.users.remove(user)
requests.post(payload["response_url"], data=json.dumps({
"text": f"Project `{project.name}` is removed.",
"replace_original": True
}))
elif payload["callback_id"] == "remove_one_liner":
one_liner = OneLiner.objects.get(user=user, pk=payload["actions"][0]["selected_options"][0]["value"])
one_liner.delete()
requests.post(payload["response_url"], data=json.dumps({
"text": f"One-liner`{one_liner.body}` is removed.",
"replace_original": True
}))
return Response()
class SlackEventsView(GenericAPIView):
permission_classes = [AllowAny]
@transaction.atomic
def post(self, request, *args, **kwargs):
if request.data.get("challenge"):
# Activate events subscription.
return Response(request.data["challenge"])
event = request.data["event"]
if event["type"] == "message" and event.get("user") and event["user"] != User.BOT_USER_SLACK_ID:
user = User.objects.get(slack_user_id=event["user"])
message = event["text"]
command = message.replace(" ", "").lower()
if command in ["question", "skip"]:
user.send_next_question()
elif command in ["cancel"]:
# Cancel active commands.
user.clear_last_asked_question()
elif command in ["help"]:
user.send_slack_message(text=AGORA_HELP_TEXT)
elif command in ["selam", "hi"]:
# Cancel active commands.
user.clear_last_asked_question()
user.send_slack_message(text=f"a.s")
elif command in ["addproject"]:
user.clear_last_asked_question()
attachments = [
{
"callback_id": "add_project",
"fallback": "Add project.",
"actions": [
{
"type": "select",
"name": "add_project",
"text": "Add project.",
"options": [
{
"text": c[1],
"value": c[0],
}
for c in Project.objects.values_list("id", "name")
]
}
]
}
]
user.send_slack_message(attachments=attachments)
elif command in ["removeproject"]:
user.clear_last_asked_question()
if not user.projects.exists():
user.send_slack_message(text="You don't have any project.")
else:
attachments = [
{
"callback_id": "remove_project",
"fallback": "Remove project.",
"actions": [
{
"type": "select",
"name": "remove_project",
"text": "Remove project.",
"options": [
{
"text": c[1],
"value": c[0],
}
for c in user.projects.values_list("id", "name")
]
}
]
}
]
user.send_slack_message(attachments=attachments)
elif command in ["removeoneliner"]:
user.clear_last_asked_question()
if not user.one_liners.exists():
user.send_slack_message(text="You don't have any one-liner.")
else:
attachments = [
{
"callback_id": "remove_one_liner",
"fallback": "Remove one-liner.",
"actions": [
{
"type": "select",
"name": "remove_one_liner",
"text": "Remove one-liner.",
"options": [
{
"text": c[1],
"value": c[0],
}
for c in user.one_liners.values_list("id", "body")
]
}
]
}
]
user.send_slack_message(attachments=attachments)
elif message.startswith("add oneliner") or message.startswith("Add oneliner"):
user.clear_last_asked_question()
one_liner = message.replace("add oneliner", "").replace("Add oneliner", "").strip()
if not one_liner:
user.send_slack_message(text="Please provide one-liner. Sample command: add oneliner Lives in istanbul")
else:
OneLiner.objects.create(user=user, body=one_liner)
user.send_slack_message(text=f"One-liner `{one_liner}` is added.")
else:
is_waiting_for_answer = user.last_question_asked_datetime and (user.last_question_asked_datetime > (timezone.now() - timedelta(minutes=5)))
if is_waiting_for_answer:
Answer.objects.create(
question=user.last_question_asked,
body=message,
user=user
)
user.last_question_asked = None
user.last_question_asked_datetime = None
user.save()
user.send_slack_message(text=f"Good answer! I’ll save that in your <{settings.WEB_APP_PROFILE_URL}|Agora Profile>. If you want to change your answer, you can do it from there.")
else:
user.clear_last_asked_question()
message = "Hmm, I’m not sure what to do with that command. If you’re stuck, you can type *help* and I’ll give you a list of commands I can actually understand =)"
user.send_slack_message(text=message)
return Response()
| 44.944444 | 400 | 0.540173 |
8ae65a439aa2cef37955f5c156baf28cb273b5ce | 3,485 | py | Python | bindings/python/ensmallen/datasets/string/shewanellaspucdkl21.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-09-10T18:31:58.000Z | 2022-03-24T04:28:04.000Z | bindings/python/ensmallen/datasets/string/shewanellaspucdkl21.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/shewanellaspucdkl21.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Shewanella sp. UCD-KL21.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def ShewanellaSpUcdKl21(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Shewanella sp. UCD-KL21 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Shewanella sp. UCD-KL21 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="ShewanellaSpUcdKl21",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.190476 | 223 | 0.676327 |
402bf3354cf4006ee8d9ae6c504b0886e0c5b656 | 12,884 | py | Python | fling/flingPretrained.py | fastboardAI/linguisticFeatureExtractor | 6cb2b6e7133e0f42acde1ce6e0344ffcbb578a7a | [
"MIT"
] | 4 | 2020-05-03T10:08:40.000Z | 2020-10-16T17:31:45.000Z | fling/flingPretrained.py | fastboardAI/linguisticFeatureExtractor | 6cb2b6e7133e0f42acde1ce6e0344ffcbb578a7a | [
"MIT"
] | 2 | 2020-09-15T17:30:57.000Z | 2020-09-27T04:25:03.000Z | fling/flingPretrained.py | fastboardAI/linguisticFeatureExtractor | 6cb2b6e7133e0f42acde1ce6e0344ffcbb578a7a | [
"MIT"
] | 2 | 2020-05-03T06:50:38.000Z | 2020-09-27T08:58:30.000Z | import matplotlib as mpl
from imp import reload
from nltk.corpus import stopwords
from collections import Counter
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import nltk,re,pprint,scipy
import sys,glob,os
import operator, string, argparse, math, random, statistics
import matplotlib.pyplot as plt
from sklearn import metrics
class flingPretrained:
'''
Trains linguistic models: doc2vec, fastText, word2vec, SDAE
Load pretrained linguistic models: doc2vec, fastText, word2vec, SDAE
Save group characteristics
All embeddings available/level/word-order-preserval:
Glove : word / No
Doc2Vec : document / Yes
Word2Vec : word / No
TF-IDF : document / No
tfIDF weighted GloVe / No
'''
def __init__(self,data):
self.data = data
self.nDocs = len(self.data)
self.nDocsTest = 0
self.allDistances = {}
self.groupedCharacteristic = {'glove' : None, 'vec_tfidf-doc2vec' : None, 'vec_tfidf-glove' : None, 'doc2vec' : None}
self.wordVecModel = {'glove':None, 'doc2vec':None}
print("\nWorking on pretrained word embeddings!\n")
'''
Load pretrained word vectors: gloVe, fastText, doc2vec, word2vec, SDAE
by calling the appropriate load function for the vector type.
'''
def loadPretrainedWordVectors(self,vecType):
if vecType == 'glove':
self.wordVecModel['glove'] = self.loadGloveModel()
print("GloVe Vectors Loaded!\n")
'''
Loads the glove model provided a filename.
TASK: edit the function to take a filename instead of hard-coding the location of the GloVe model.
'''
def loadGloveModel(self):
print("Loading Glove Model\n")
try:
f = open('../datasets/glove.6B/glove.6B.50d.txt','r')
except:
f = open('datasets/glove.6B/glove.6B.50d.txt','r')
gloveModel = {}
for line in f:
splitLines = line.split()
word = splitLines[0]
wordEmbedding = np.array([float(value) for value in splitLines[1:]])
gloveModel[word] = wordEmbedding
print(len(gloveModel)," words loaded!\n")
return(gloveModel)
'''
Returns the computed GloVe vector for the document. Note: a document contains multiple words,
and we have word vectors corresponding to every word in Glove
'''
def getDocVector(self,doc_Id):
gvl=self.getGloveVectorList(listx)
glove_dv = np.mean(gvl,axis=0)
return(glove_dv)
'''
Returns a list of GloVe vectors for all words in the document.
'''
def getGloveVectorList(self,listx):
vecList = []
nf = []
presenceBit = []
for w in listx:
try:
vecList.append(self.wordVecModel['glove'][w])
presenceBit.append(1)
except:
presenceBit.append(0)
nf.append(w)
continue
if len(vecList)==0:
return([[0]*50],[])
vecArray = np.stack(vecList, axis=0)
return vecArray,presenceBit
'''
Add two new computed vectors to the data.
a) glove-vector : plain GloVe vectors non-weighted
b) glove-tfidf : GloVe vectors weighted with their tfIDF scores
uses numpy.average(a, axis=None, weights=None, returned=False)[source]
'''
def addDocumentGloveVectors(self):
vecL = []
vecWL = []
for indx in range(self.nDocs):
listWords_1 = set(list(self.data['tfMatrix'][int(indx)]['word']))
tFreqs = np.asarray(list(self.data['tfMatrix'][int(indx)]['tf']))
gvl,prBit = self.getGloveVectorList(listWords_1)
if prBit == []:
vecL.append([0]*50)
vecWL.append([0]*50)
continue;
termFreqs = [a*b for (a,b) in zip(prBit,tFreqs) if a*b!=0] #print("listWords1,termFreqs",listWords_1,termFreqs)
vecL.append(np.nanmean(gvl,axis=0))
vecWL.append(np.average(gvl, axis=0, weights=termFreqs))
self.data['glove-vector'] = vecL
self.getDistanceDistribution(100,'glove-vector')
self.data['glove-tfIDF'] = vecWL
self.getDistanceDistribution(100,'glove-tfIDF')
'''
Distance between two documents using TF-IDF dictionaries.
Method used: Using 'percentage of importance' by using tf-idf score as weights
'''
def distanceBtnTwoDocs(self, docId_1, docId_2):
listWords_1 = set(list(self.data['tfMatrix'][int(docId_1)]['word']))
listWords_2 = set(list(self.data['tfMatrix'][int(docId_2)]['word']))
common = listWords_1.intersection(listWords_2)
diff1_2 = listWords_1.difference(listWords_2)
diff2_1 = listWords_2.difference(listWords_1)
sumwt1 = self.data['sumTFIDF'][docId_1]
sumwt2 = self.data['sumTFIDF'][docId_2]
score_common, score_doc1, score_doc2 = 0,0,0
#print(len(common),len(diff1_2),len(diff2_1))
for word_c in common:
score_1 = float(self.data['tfMatrix'][docId_1].loc[self.data['tfMatrix'][docId_1]['word'] == word_c]['tf-idf'])
score_2 = float(self.data['tfMatrix'][docId_2].loc[self.data['tfMatrix'][docId_2]['word'] == word_c]['tf-idf'])
score_common += abs(score_1/float(sumwt1) - score_2/float(sumwt2))
for word_d12 in diff1_2:
score_1 = float(self.data['tfMatrix'][docId_1].loc[self.data['tfMatrix'][docId_1]['word'] == word_d12]['tf-idf'])
score_doc1 += score_1/float(sumwt1)
for word_d21 in diff2_1:
score_2 = float(self.data['tfMatrix'][docId_2].loc[self.data['tfMatrix'][docId_2]['word'] == word_d21]['tf-idf'])
score_doc2 += score_2/float(sumwt2)
score_total = score_common + score_doc1 + score_doc2
return(score_total)
#document vector is the average of all the word vectors gloVe
def getDocVector(self,listx):
gvl=self.getGloveVectorList(listx)
glove_dv = np.mean(gvl,axis=0)
return(glove_dv)
'''
Returns the distance between two GloVe vectors.
'''
def getGloveDistance(self,docId_1,docId_2,method):
listWords_1 = set(list(self.data['tfMatrix'].iloc[int(docId_1)]['word']))
listWords_2 = set(list(self.data['tfMatrix'].iloc[int(docId_2)]['word']))
if method == 'average':
dv_1 = self.getDocVector(listWords_1)
dv_2 = self.getDocVector(listWords_2)
dist = np.linalg.norm(dv_1-dv_2)
return dist
def drawProgressBar(self, percent, barLen = 50): #just a progress bar so that you dont lose patience
sys.stdout.write("\r")
progress = ""
for i in range(barLen):
if i<int(barLen * percent):
progress += "="
else:
progress += " "
sys.stdout.write("[ %s ] %.2f%%" % (progress, percent * 100))
sys.stdout.flush()
def getDistance(self,docId_1,docId_2,vectorName):
if method == 'glove':
dv_1 = self.data['glove-vector'][int(docId_1)]
dv_2 = self.data['glove-vector'][int(docId_2)]
elif method == 'tfidf':
dv_1 = self.data['tfidf2vec-tfidf'][int(docId_1)]
dv_2 = self.data['tfidf2vec-tfidf'][int(docId_2)]
dist = np.linalg.norm(dv_1-dv_2)
return dist
'''
Get sample distance distribution between numx random documents in the data and plot histogram
'''
def getDistanceDistribution(self,numx,vectorName):
numHalf = int(numx/2)
doca,docb = [],[]
for i in range(numHalf):
doca.append(random.randint(1,1026))
docb.append(random.randint(1027,2053))
distanceSample = []
total = numHalf*numHalf
for doc_1 in range(len(doca)):
for doc_2 in range(len(docb)):
dv_1 = self.data[vectorName][int(doc_1)]
dv_2 = self.data[vectorName][int(doc_2)]
dist = np.linalg.norm(dv_1-dv_2)
distanceSample.append(dist)
cov = doc_1*numHalf + doc_2
prog=(cov+1)/total
self.drawProgressBar(prog)
pltx = plt.hist(distanceSample,bins=50)
return(pltx)
'''
Returns the gloVe vector for the word from the pre-trained gloVe vectors.
'''
def getGloveScore(self,w):
try:
return(self.wordVecModel['glove'][w])
except:
return([0*50])
'''
Combines document tfIDF dictionary with other document vectors to create combined vectors.
'''
def doctfidf2vec(self,docId,mode):
docVecList = []
listWords = list(self.data['tfMatrix'][int(docId)]['word'])
if mode == "tf-only":
scores = list(self.data['tfMatrix'][int(docId)]['tf'])
elif mode == "tf-idf":
scores = list(self.data['tfMatrix'][int(docId)]['tf-idf'])
lenW =len(listWords)
gloveScores = [self.getGloveScore(el) for el in listWords]
for j in range(lenW):
temp = [float(scores[j])]*50
#gloveScores[j]
res = [a*b for (a,b) in zip(temp,gloveScores[j])]
if len(res)==1:
continue;
else:
docVecList.append(res)
return(np.nanmean(docVecList,axis=0))
'''
For each group in the specified column, average all the document vectors in the
group to create a group characteristic
TASK: explore more options of averaging the vectors. '''
def createGroupedCharacteristics(self,column):
vecList = ['glove-vector','doc2vec','vec_tfidf-glove','glove-tfIDF']
self.dataTrain.groupby([column])
print("\nComputing groupCharacteristics for,",column)
for vec in vecList:
self.groupedCharacteristic[vec] = self.dataTrain.groupby(column)[vec].apply(np.average).to_frame()
'''
Function to return the group most simimar to the vector, based on distance computed with every group characteristics.
'''
def getNearestGroup(self,vec,vectorName):
minDist = math.inf
minGroup = None
for colx in self.groupedCharacteristic[vectorName].index.values:
vecy = self.groupedCharacteristic[vectorName].loc[colx].to_numpy(dtype=object)
if not np.all(vec):
vec = ([0.0001]*50)
distx = np.linalg.norm(scipy.spatial.distance.cosine(vec,vecy))
if distx < minDist:
minDist = distx
minGroup = colx
return minGroup
'''
Explore options to optimize space using function.
'''
def splitTestTrain(self):
mPt = int(self.nDocs*0.7)
self.dataTrain = self.data[:mPt]
self.dataTest = self.data[mPt:]
self.nDocsTest = len(self.dataTest)
'''
Add computed group as a new column.
'''
def addVectorComputedGroup(self,vectorName,groupName):
computedGroups = []
for docId in range(self.nDocsTest):
computedGroup = self.getNearestGroup(self.dataTest[vectorName].iloc[docId],vectorName)
computedGroups.append(computedGroup)
self.dataTest[groupName] = computedGroups
'''
Simple percentage count of documents which got the correct labels assigned.
'''
def getAccuracy(self,compareWith,vecName):
countCorrect = 0
for d in range(self.nDocsTest):
if self.dataTest[vecName].iloc[d] == self.dataTest[compareWith].iloc[d]:
countCorrect+=1
print("Accuracy of",vecName,countCorrect/self.nDocsTest*100,"%")
'''
Convert tfIDF dictionary for every document with precomputed word-embeddings
'''
def tfidf2vec(self,mode,method):
vecL = []
if mode == 'tf-only':
columnName = 'vec_tf-' + method
print("\nComputing column:",columnName)
for indx in range(self.nDocs):
gvl=self.doctfidf2vec(indx,'tf-only')
vecL.append(gvl)
prog=(indx+1)/self.nDocs
self.drawProgressBar(prog)
else:
columnName = 'vec_tfidf-' + method
print("\nComputing column:",columnName)
for indx in range(self.nDocs):
gvl=self.doctfidf2vec(indx,'tf-idf')
vecL.append(gvl)
prog=(indx+1)/self.nDocs
self.drawProgressBar(prog)
self.data[columnName] = vecL
try:
self.getDistanceDistribution(100,'glove-tfIDF')
except:
return | 40.2625 | 134 | 0.593061 |
ab693a36c18d08c064411f5396a6dfddbc8e163a | 632 | py | Python | manage.py | RacherinTest/pricetracker | 4b9fc5c12301ae35fba7a78b18cf6bfd5b21a481 | [
"Apache-2.0"
] | 3 | 2020-10-03T14:37:40.000Z | 2021-03-28T17:21:44.000Z | manage.py | bhuiyanmobasshir94/price_tracker | e65e4d46d2b4ebda406061a0c18a5e452ed208a6 | [
"MIT"
] | 8 | 2021-02-08T20:41:55.000Z | 2021-09-22T18:36:38.000Z | manage.py | bhuiyanmobasshir94/price_tracker | e65e4d46d2b4ebda406061a0c18a5e452ed208a6 | [
"MIT"
] | 1 | 2020-10-03T14:37:41.000Z | 2020-10-03T14:37:41.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pricetracker.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.727273 | 76 | 0.685127 |
a7c055a14b1786d28f91ec5ada3b180cbc758261 | 3,809 | py | Python | LPES-video/04.01-operacje_na_plikach/04.01.05-napisy.py | opcode-eu-org-website/LPES-wyklady-src | dd4d413f2bb5560790b6365fa7e68e8f1a8a65b0 | [
"MIT"
] | null | null | null | LPES-video/04.01-operacje_na_plikach/04.01.05-napisy.py | opcode-eu-org-website/LPES-wyklady-src | dd4d413f2bb5560790b6365fa7e68e8f1a8a65b0 | [
"MIT"
] | null | null | null | LPES-video/04.01-operacje_na_plikach/04.01.05-napisy.py | opcode-eu-org-website/LPES-wyklady-src | dd4d413f2bb5560790b6365fa7e68e8f1a8a65b0 | [
"MIT"
] | 1 | 2021-07-03T16:43:47.000Z | 2021-07-03T16:43:47.000Z | # Copyright (c) 2020-2021 Matematyka dla Ciekawych Świata (http://ciekawi.icm.edu.pl/)
# Copyright (c) 2020-2021 Robert Ryszard Paciorek <rrp@opcode.eu.org>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try: clipData
except NameError: clipData = []
clipData += [
{ 'comment': 'napisy' },
{
'console': [
[0.0, eduMovie.runCommandString(r"echo 'Ala ma kota' | sed -e 's#kota#psa#g'")],
["grep", eduMovie.runCommandString(r"echo 'Ala ma bota' | grep bot >/dev/null && echo 'pasuje'")],
["wc", eduMovie.runCommandString(r"echo 'aąbc ćd' | wc -m")],
["wc + 0.5", eduMovie.runCommandString(r"echo 'aąbc ćd' | wc -c")],
["wc + 1.0", eduMovie.runCommandString(r"echo 'aąbc ćd' | wc -w")],
["wc + 1.0", eduMovie.runCommandString(r"wc -l /etc/passwd")],
],
'text' : [
'Manipulując różnymi komendami takimi jak grep, cut, sed <m> możemy operować nie tylko na zawartości plików, <m> ale też na napisach, gdyż możemy na przykład <m>'
"wypisać jakiś napis przy pomocy echo, <m> przekazać go strumieniem do sed'a i uzyskać napis zmodyfikowany." + '<mark name="grep" />'
'Podobnie wysyłając ciąg do polecenia grep i sprawdzając kod <m> powrotu można sprawdzić czy napis ten pasuje do wyrażenia regularnego. <m>'
'Mamy zatem podstawowe narzędzia do modyfikacji <m> i wykonywania innych operacji na napisach. <mark name="wc" />'
'W przypadku zarówno plików jak i napisów przydatna może być także <m> komenda <wc>[Wu Ce], która umożliwia nam liczenie liter (za pomocą opcji <-m>[minus M]), bajtów (opcja <-c>[minus C])'
', słów (opcja <-w>[minus W]) oraz linii (<-l>[minus L]). <m>'
'Warto zauważyć różnicę pomiędzy liczeniem bajtów a znaków, <m> która objawia się dla znaków z poza podstawowego zakresu ASCII. <m>'
'W tym wypadku bajtów mamy o 2 więcej, <m> bo użyliśmy dwóch polskich znaków które w utf-8 są kodowane dwubajtowo. <m>'
]
},
{
'console': [
[0.0, eduMovie.runCommandString(r"basename /etc/network/firewall.sh")],
[2.0, eduMovie.runCommandString(r"basename /etc/network/firewall.sh .sh")],
["dirname", eduMovie.runCommandString(r"dirname /etc/network/firewall.sh")],
],
'text' : [
'Mamy również polecenia związane z przetwarzaniem ścieżek. <m>'
'basename wypisuje nazwę pliku czyli ostatni element ścieżki, <m> może być także użyty do usunięcia wskazanego rozszerzenia. <mark name="dirname" />'
'dirname wypisuje nazwę katalogu, <m> czyli wszystko co nie jest ostatnim elementem ścieżki. <m>'
'Należy pamiętać że komendy te działają w oparciu o przetwarzanie napisów, <m> czyli ich nie interesuje czy dana ścieżka istnieje <m> i czy w rzeczywistości wskazuje katalog czy zwykły plik. <m>'
'Polecenia te przetwarzają napisy, a nie ścieżki jako obiekty w systemie plików. <m>'
]
},
]
| 56.850746 | 198 | 0.719874 |
f9eb1cd79b0c722456ba5f3727a4f59854319593 | 49,392 | py | Python | tests/testapp/test_cache.py | javiplx/django-mysql | 4a0aa54b1cd37374a982e821e819f72016ef2600 | [
"BSD-3-Clause"
] | null | null | null | tests/testapp/test_cache.py | javiplx/django-mysql | 4a0aa54b1cd37374a982e821e819f72016ef2600 | [
"BSD-3-Clause"
] | null | null | null | tests/testapp/test_cache.py | javiplx/django-mysql | 4a0aa54b1cd37374a982e821e819f72016ef2600 | [
"BSD-3-Clause"
] | null | null | null | import imp
import os
import pickle
import time
from decimal import Decimal
from io import StringIO
import django
import pytest
from django.core.cache import CacheKeyWarning, cache, caches
from django.core.management import CommandError, call_command
from django.db import IntegrityError, OperationalError, connection
from django.db.migrations.state import ProjectState
from django.http import HttpResponse
from django.middleware.cache import FetchFromCacheMiddleware, UpdateCacheMiddleware
from django.test import RequestFactory, TestCase, TransactionTestCase
from django.test.utils import override_settings
from parameterized import parameterized
from django_mysql.cache import BIGINT_SIGNED_MAX, BIGINT_SIGNED_MIN, MySQLCache
from tests.testapp.models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
class MyInt(int):
def times2(self):
return self * 2
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
def reverse_custom_key_func(full_key):
"The reverse of custom_key_func"
# Remove CUSTOM-
full_key = full_key[len('CUSTOM-'):]
first_dash = full_key.find('-')
key_prefix = full_key[:first_dash]
second_dash = full_key.find('-', first_dash + 1)
version = int(full_key[first_dash + 1:second_dash])
key = full_key[second_dash + 1:]
return key, key_prefix, version
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func,
'REVERSE_KEY_FUNCTION': reverse_custom_key_func},
'custom_key2': {
'KEY_FUNCTION': __name__ + '.custom_key_func',
'REVERSE_KEY_FUNCTION': __name__ + '.reverse_custom_key_func',
},
'cull': {'OPTIONS': {'CULL_PROBABILITY': 1,
'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0,
'CULL_PROBABILITY': 1,
'MAX_ENTRIES': 30}},
'no_cull': {'OPTIONS': {'CULL_FREQUENCY': 2,
'CULL_PROBABILITY': 0,
'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(options=None, **params):
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
setting = {k: {} for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
if options is not None:
cache_params['OPTIONS'] = cache_params.get('OPTIONS', {}).copy()
cache_params['OPTIONS'].update(**options)
return setting
# Spaces are used in the table name to ensure quoting/escaping is working
def override_cache_settings(BACKEND='django_mysql.cache.MySQLCache', LOCATION='test cache table', **kwargs):
return override_settings(
CACHES=caches_setting_for_tests(
BACKEND=BACKEND,
LOCATION=LOCATION,
**kwargs
),
)
class MySQLCacheTableMixin(TransactionTestCase):
table_name = 'test cache table'
@classmethod
def create_table(self):
sql = MySQLCache.create_table_sql.format(table_name=self.table_name)
with connection.cursor() as cursor:
cursor.execute(sql)
@classmethod
def drop_table(self):
with connection.cursor() as cursor:
cursor.execute('DROP TABLE `%s`' % self.table_name)
@override_cache_settings()
class MySQLCacheTests(MySQLCacheTableMixin, TestCase):
@classmethod
def setUpClass(cls):
cls.create_table()
super(MySQLCacheTests, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(MySQLCacheTests, cls).tearDownClass()
cls.drop_table()
def table_count(self):
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM `%s`" % self.table_name)
return cursor.fetchone()[0]
# These tests were copied from django's tests/cache/tests.py file
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
assert cache.get("key") == "value"
def test_add(self):
# A key can be added to a cache
result = cache.add("addkey1", "value")
assert result
result = cache.add("addkey1", "newvalue")
assert not result
assert cache.get("addkey1") == "value"
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
assert not caches['prefix'].has_key('somekey') # noqa
caches['prefix'].set('somekey', 'value2')
assert cache.get('somekey') == 'value'
assert caches['prefix'].get('somekey') == 'value2'
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
assert cache.get("does_not_exist") is None
assert cache.get("does_not_exist", "bang!") == "bang!"
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
assert cache.get("key1") == "spam"
cache.delete("key1")
assert cache.get("key1") is None
assert cache.get("key2") == "eggs"
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
assert cache.has_key("hello1") # noqa
assert not cache.has_key("goodbye1") # noqa
cache.set("no_expiry", "here", None)
assert cache.has_key("no_expiry") # noqa
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
assert "hello2" in cache
assert "goodbye2" not in cache
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
assert cache.incr('answer') == 42
assert cache.get('answer') == 42
assert cache.incr('answer', 10) == 52
assert cache.get('answer') == 52
assert cache.incr('answer', -10) == 42
with pytest.raises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
assert cache.decr('answer') == 42
assert cache.get('answer') == 42
assert cache.decr('answer', 10) == 32
assert cache.get('answer') == 32
assert cache.decr('answer', -10) == 42
with pytest.raises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
assert hasattr(cache, 'close')
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
assert cache.get("stuff") == stuff
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
assert Poll.objects.count() == 1
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
assert cached_poll.pub_date == pub_date
# We only want the default expensive calculation run once
assert expensive_calculation.num_runs == 1
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache
# write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
assert expensive_calculation.num_runs == 1
defer_qs = Poll.objects.all().defer('question')
assert defer_qs.count() == 1
assert expensive_calculation.num_runs == 1
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
assert expensive_calculation.num_runs == 1
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
assert expensive_calculation.num_runs == 1
defer_qs = Poll.objects.all().defer('question')
assert defer_qs.count() == 1
cache.set('deferred_queryset', defer_qs)
assert expensive_calculation.num_runs == 1
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and
# set
assert expensive_calculation.num_runs == runs_before_cache_read
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1},
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
assert cache.get(key) == value
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
assert cache.get(key) == value
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
assert cache.get(key) == value
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
assert compressed_value == compressed_result
assert value == decompress(compressed_result).decode()
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
assert compressed_value == compressed_result
assert value == decompress(compressed_result).decode()
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
assert compressed_value == compressed_result
assert value == decompress(compressed_result).decode()
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
assert cache.get("key1") is None
assert cache.get("key2") is None
def test_touch_without_timeout(self):
cache.set("key1", "spam", timeout=0.1)
cache.touch("key1", timeout=0.4)
time.sleep(0.2)
assert "key1" in cache
def test_touch_with_timeout(self):
cache.set("key1", "spam", timeout=0.1)
cache.touch("key1")
time.sleep(0.2)
assert "key1" in cache
def test_touch_already_expired(self):
cache.set("key1", "spam", timeout=0.1)
time.sleep(0.2)
cache.touch("key1", timeout=0.4)
assert "key1" not in cache
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
assert cache.get('key1') == 'eggs'
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
assert cache.get('key2') == 'ham'
cache.set_many(
{'key3': 'sausage', 'key4': 'lobster bisque'},
60 * 60 * 24 * 30 + 1
)
assert cache.get('key3') == 'sausage'
assert cache.get('key4') == 'lobster bisque'
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
assert cache.get('key1') == 'eggs'
cache.add('key2', 'ham', None)
assert cache.get('key2') == 'ham'
added = cache.add('key1', 'new eggs', None)
assert not added
assert cache.get('key1') == 'eggs'
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
assert cache.get('key3') == 'sausage'
assert cache.get('key4') == 'lobster bisque'
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
assert cache.get('key1') is None
cache.add('key2', 'ham', 0)
assert cache.get('key2') is None
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
assert cache.get('key3') is None
assert cache.get('key4') is None
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
assert cache.get("key1") == "spam"
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
assert cache.get('answer1') == 42
assert cache.get('answer1', version=1) == 42
assert cache.get('answer1', version=2) is None
assert caches['v2'].get('answer1') is None
assert caches['v2'].get('answer1', version=1) == 42
assert caches['v2'].get('answer1', version=2) is None
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
assert cache.get('answer2') is None
assert cache.get('answer2', version=1) is None
assert cache.get('answer2', version=2) == 42
assert caches['v2'].get('answer2') == 42
assert caches['v2'].get('answer2', version=1) is None
assert caches['v2'].get('answer2', version=2) == 42
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
assert cache.get('answer3') is None
assert cache.get('answer3', version=1) is None
assert cache.get('answer3', version=2) == 42
assert caches['v2'].get('answer3') == 42
assert caches['v2'].get('answer3', version=1) is None
assert caches['v2'].get('answer3', version=2) == 42
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
assert cache.get('answer4') == 42
assert cache.get('answer4', version=1) == 42
assert cache.get('answer4', version=2) is None
assert caches['v2'].get('answer4') is None
assert caches['v2'].get('answer4', version=1) == 42
assert caches['v2'].get('answer4', version=2) is None
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
assert cache.get('answer1', version=1) is None
assert cache.get('answer1', version=2) == 42
cache.add('answer1', 37, version=2)
assert cache.get('answer1', version=1) is None
assert cache.get('answer1', version=2) == 42
cache.add('answer1', 37, version=1)
assert cache.get('answer1', version=1) == 37
assert cache.get('answer1', version=2) == 42
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
assert cache.get('answer2', version=1) is None
assert cache.get('answer2', version=2) == 42
caches['v2'].add('answer2', 37)
assert cache.get('answer2', version=1) is None
assert cache.get('answer2', version=2) == 42
caches['v2'].add('answer2', 37, version=1)
assert cache.get('answer2', version=1) == 37
assert cache.get('answer2', version=2) == 42
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
assert cache.get('answer3', version=1) == 42
assert cache.get('answer3', version=2) is None
caches['v2'].add('answer3', 37, version=1)
assert cache.get('answer3', version=1) == 42
assert cache.get('answer3', version=2) is None
caches['v2'].add('answer3', 37)
assert cache.get('answer3', version=1) == 42
assert cache.get('answer3', version=2) == 37
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
assert cache.has_key('answer1') # noqa
assert cache.has_key('answer1', version=1) # noqa
assert not cache.has_key('answer1', version=2) # noqa
assert not caches['v2'].has_key('answer1') # noqa
assert caches['v2'].has_key('answer1', version=1) # noqa
assert not caches['v2'].has_key('answer1', version=2) # noqa
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
assert cache.get('answer1', version=1) is None
assert cache.get('answer1', version=2) == 42
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
assert cache.get('answer2', version=1) == 37
assert cache.get('answer2', version=2) is None
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
assert cache.get('answer3', version=1) == 37
assert cache.get('answer3', version=2) is None
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
assert cache.get('answer4', version=1) is None
assert cache.get('answer4', version=2) == 42
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
assert cache.get('answer1', version=1) == 38
assert cache.get('answer1', version=2) == 42
cache.decr('answer1')
assert cache.get('answer1', version=1) == 37
assert cache.get('answer1', version=2) == 42
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
assert cache.get('answer2', version=1) == 37
assert cache.get('answer2', version=2) == 43
cache.decr('answer2', version=2)
assert cache.get('answer2', version=1) == 37
assert cache.get('answer2', version=2) == 42
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
assert cache.get('answer3', version=1) == 37
assert cache.get('answer3', version=2) == 43
caches['v2'].decr('answer3')
assert cache.get('answer3', version=1) == 37
assert cache.get('answer3', version=2) == 42
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
assert cache.get('answer4', version=1) == 38
assert cache.get('answer4', version=2) == 42
caches['v2'].decr('answer4', version=1)
assert cache.get('answer4', version=1) == 37
assert cache.get('answer4', version=2) == 42
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
assert cache.get_many(['ford1', 'arthur1']) == {'ford1': 37, 'arthur1': 42}
assert cache.get_many(['ford1', 'arthur1'], version=1) == {'ford1': 37, 'arthur1': 42}
assert cache.get_many(['ford1', 'arthur1'], version=2) == {}
assert caches['v2'].get_many(['ford1', 'arthur1']) == {}
assert caches['v2'].get_many(['ford1', 'arthur1'], version=1) == {'ford1': 37, 'arthur1': 42}
assert caches['v2'].get_many(['ford1', 'arthur1'], version=2) == {}
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
assert cache.get_many(['ford2', 'arthur2']) == {}
assert cache.get_many(['ford2', 'arthur2'], version=1) == {}
assert cache.get_many(['ford2', 'arthur2'], version=2) == {'ford2': 37, 'arthur2': 42}
assert caches['v2'].get_many(['ford2', 'arthur2']) == {'ford2': 37, 'arthur2': 42}
assert caches['v2'].get_many(['ford2', 'arthur2'], version=1) == {}
assert (
caches['v2'].get_many(['ford2', 'arthur2'], version=2)
== {'ford2': 37, 'arthur2': 42}
)
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
assert cache.get_many(['ford3', 'arthur3']) == {}
assert cache.get_many(['ford3', 'arthur3'], version=1) == {}
assert cache.get_many(['ford3', 'arthur3'], version=2) == {'ford3': 37, 'arthur3': 42}
assert caches['v2'].get_many(['ford3', 'arthur3']) == {'ford3': 37, 'arthur3': 42}
assert caches['v2'].get_many(['ford3', 'arthur3'], version=1) == {}
assert caches['v2'].get_many(['ford3', 'arthur3'], version=2) == {'ford3': 37, 'arthur3': 42}
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
assert cache.get_many(['ford4', 'arthur4']) == {'ford4': 37, 'arthur4': 42}
assert cache.get_many(['ford4', 'arthur4'], version=1) == {'ford4': 37, 'arthur4': 42}
assert cache.get_many(['ford4', 'arthur4'], version=2) == {}
assert caches['v2'].get_many(['ford4', 'arthur4']) == {}
assert caches['v2'].get_many(['ford4', 'arthur4'], version=1) == {'ford4': 37, 'arthur4': 42}
assert caches['v2'].get_many(['ford4', 'arthur4'], version=2) == {}
def test_incr_version(self):
cache.set('answer', 42, version=2)
assert cache.get('answer') is None
assert cache.get('answer', version=1) is None
assert cache.get('answer', version=2) == 42
assert cache.get('answer', version=3) is None
assert cache.incr_version('answer', version=2) == 3
assert cache.get('answer') is None
assert cache.get('answer', version=1) is None
assert cache.get('answer', version=2) is None
assert cache.get('answer', version=3) == 42
caches['v2'].set('answer2', 42)
assert caches['v2'].get('answer2') == 42
assert caches['v2'].get('answer2', version=1) is None
assert caches['v2'].get('answer2', version=2) == 42
assert caches['v2'].get('answer2', version=3) is None
assert caches['v2'].incr_version('answer2') == 3
assert caches['v2'].get('answer2') is None
assert caches['v2'].get('answer2', version=1) is None
assert caches['v2'].get('answer2', version=2) is None
assert caches['v2'].get('answer2', version=3) == 42
with pytest.raises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
assert cache.get('answer') is None
assert cache.get('answer', version=1) is None
assert cache.get('answer', version=2) == 42
assert cache.decr_version('answer', version=2) == 1
assert cache.get('answer') == 42
assert cache.get('answer', version=1) == 42
assert cache.get('answer', version=2) is None
caches['v2'].set('answer2', 42)
assert caches['v2'].get('answer2') == 42
assert caches['v2'].get('answer2', version=1) is None
assert caches['v2'].get('answer2', version=2) == 42
assert caches['v2'].decr_version('answer2') == 1
assert caches['v2'].get('answer2') is None
assert caches['v2'].get('answer2', version=1) == 42
assert caches['v2'].get('answer2', version=2) is None
with pytest.raises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
assert cache.get('answer1') == 42
assert caches['custom_key'].get('answer1') is None
assert caches['custom_key2'].get('answer1') is None
caches['custom_key'].set('answer2', 42)
assert cache.get('answer2') is None
assert caches['custom_key'].get('answer2') == 42
assert caches['custom_key2'].get('answer2') == 42
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
factory = RequestFactory()
request = factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
assert get_cache_data is None
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
assert get_cache_data is not None
assert get_cache_data.content == content.encode('utf-8')
assert get_cache_data.cookies == response.cookies
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
assert get_cache_data is not None
assert get_cache_data.content == content.encode('utf-8')
assert get_cache_data.cookies == response.cookies
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with pytest.raises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with pytest.raises(pickle.PickleError):
cache.set('unpickable', Unpickable())
def test_get_or_set(self):
assert cache.get('projector') is None
assert cache.get_or_set('projector', 42) == 42
assert cache.get('projector') == 42
def test_get_or_set_callable(self):
def my_callable():
return 'value'
assert cache.get_or_set('mykey', my_callable) == 'value'
def test_get_or_set_version(self):
msg_re = r"get_or_set\(\) missing 1 required positional argument: 'default'"
cache.get_or_set('brian', 1979, version=2)
with pytest.raises(TypeError, match=msg_re):
cache.get_or_set('brian')
with pytest.raises(TypeError, match=msg_re):
cache.get_or_set('brian', version=1)
assert cache.get('brian', version=1) is None
assert cache.get_or_set('brian', 42, version=1) == 42
assert cache.get_or_set('brian', 1979, version=2) == 1979
assert cache.get('brian', version=3) is None
# Modified Django tests
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 0.1)
cache.set('expire2', 'very quickly', 0.1)
cache.set('expire3', 'very quickly', 0.1)
time.sleep(0.2)
assert cache.get("expire1") is None
cache.add("expire2", "newvalue")
assert cache.get("expire2") == "newvalue"
assert not cache.has_key("expire3") # noqa
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
with self.assertNumQueries(1):
value = cache.get_many(['a', 'c', 'd'])
assert value == {'a': 'a', 'c': 'c', 'd': 'd'}
with self.assertNumQueries(1):
value = cache.get_many(['a', 'b', 'e'])
assert value == {'a': 'a', 'b': 'b'}
def test_get_many_with_one_expired(self):
# Multiple cache keys can be returned using get_many
the_cache = caches['no_cull']
the_cache.set('a', 'a', 0.1)
time.sleep(0.2)
the_cache.set('b', 'b')
the_cache.set('c', 'c')
the_cache.set('d', 'd')
with self.assertNumQueries(1):
value = the_cache.get_many(['a', 'c', 'd'])
assert value == {'c': 'c', 'd': 'd'}
with self.assertNumQueries(1):
value = the_cache.get_many(['a', 'b', 'e'])
assert value == {'b': 'b'}
def test_set_many(self):
# Single keys can be set using set_many
# Perform a single query first to avoid spurious on-connect queries
caches['no_cull'].get('nonexistent')
with self.assertNumQueries(1):
result = caches['no_cull'].set_many({"key1": "spam"})
assert result == []
# Multiple keys can be set using set_many
with self.assertNumQueries(1):
result = caches['no_cull'].set_many({
'key1': 'spam',
'key2': 'eggs',
})
assert result == []
assert cache.get("key1") == "spam"
assert cache.get("key2") == "eggs"
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
# Perform a single query first to avoid spurious on-connect queries
caches['no_cull'].get('nonexistent')
with self.assertNumQueries(1):
caches['no_cull'].set_many({"key1": "spam", "key2": "eggs"}, 0.1)
cache.set("key3", "ham")
time.sleep(0.2)
assert cache.get("key1") is None
assert cache.get("key2") is None
assert cache.get("key3") == "ham"
# set_many expired values can be replaced
with self.assertNumQueries(1):
caches['no_cull'].set_many(
{"key1": "spam", "key2": "egg", "key3": "spam", "key4": "ham"},
1,
)
v = cache.get("key1")
assert v == "spam"
assert cache.get("key2") == "egg"
assert cache.get("key3") == "spam"
assert cache.get("key4") == "ham"
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
with self.assertNumQueries(1):
cache.delete_many(["key1", "key2"])
assert cache.get("key1") is None
assert cache.get("key2") is None
assert cache.get("key3") == "ham"
def test_invalid_keys(self):
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with pytest.warns(CacheKeyWarning):
# memcached does not allow whitespace or control characters in
# keys
cache.set('space key', 'value')
with pytest.raises(ValueError):
# memcached limits key length to 250
# We have a 250 character max length on our table
cache.set('a' * 251, 'value')
finally:
cache.key_func = old_func
# Original tests
def test_base_set_bad_value(self):
with pytest.raises(ValueError) as excinfo:
cache._base_set('foo', 'key', 'value')
assert "'mode' should be" in str(excinfo.value)
def test_add_with_expired(self):
cache.add("mykey", "value", 0.3)
assert cache.get("mykey") == "value"
result = cache.add("mykey", "newvalue", 0.3)
assert not result
assert cache.get("mykey") == "value"
time.sleep(0.4)
result = cache.add("mykey", "newvalue", 60)
assert result
assert cache.get("mykey") == "newvalue"
@override_cache_settings(options={'COMPRESS_MIN_LENGTH': 10})
def test_compressed(self):
cache.set("key", "a" * 11)
assert cache.get("key") == "a" * 11
@override_cache_settings(options={'COMPRESS_MIN_LENGTH': 10,
'COMPRESS_LEVEL': 9})
def test_compress_level(self):
cache.set("key", "a" * 11)
assert cache.get("key") == "a" * 11
# Check a bad compression level = zlib error
with override_cache_settings(options={'COMPRESS_MIN_LENGTH': 10,
'COMPRESS_LEVEL': 123}):
with pytest.raises(Exception) as excinfo:
cache.set("key", "a" * 11)
assert "Bad compression level" in str(excinfo.value)
@override_cache_settings(options={'COMPRESS_MIN_LENGTH': 10})
def test_changing_compressed_option_leaves_compressed_data_readable(self):
a11 = "a" * 11
cache.set("key", a11)
# Turn it off - remains readable and settable
with override_cache_settings(options={'COMPRESS_MIN_LENGTH': 0}):
assert cache.get("key") == a11
cache.set("key", a11)
assert cache.get("key") == a11
# Back on, still readable
assert cache.get("key") == a11
cache.set("key", a11)
assert cache.get("key") == a11
def test_our_options_quacks_like_djangos(self):
from django.core.cache.backends.db import Options
from django_mysql.cache import Options as OurOptions
theirs = Options('something')
ours = OurOptions('something')
assert set(ours.__dict__.keys()) == set(theirs.__dict__.keys())
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 30)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 20)
def test_no_cull_only_deletes_when_told(self):
self._perform_cull_test(caches['no_cull'], 50, 50)
caches['no_cull'].cull()
assert self.table_count() == 25
def test_cull_deletes_expired_first(self):
cull_cache = caches['cull']
cull_cache.set("key", "value", 0.3)
time.sleep(0.4)
# Add 30 more entries. The expired key should get deleted, leaving the
# 30 new keys
self._perform_cull_test(cull_cache, 30, 30)
assert cull_cache.get('key') is None
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count + 1):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count + 1):
if cull_cache.has_key('cull%d' % i): # noqa
count = count + 1
assert count == final_count
def test_incr_range(self):
cache.set('overwhelm', BIGINT_SIGNED_MAX - 1)
cache.incr('overwhelm')
if django.VERSION >= (2, 0):
expected = IntegrityError
else:
expected = OperationalError
with pytest.raises(expected):
cache.incr('overwhelm')
def test_decr_range(self):
cache.set('underwhelm', BIGINT_SIGNED_MIN + 1)
cache.decr('underwhelm')
if django.VERSION >= (2, 0):
# IntegrityError on MySQL 5.7+ and MariaDB,
# OperationalError on MySQL 5.6...
expected = (IntegrityError, OperationalError)
else:
expected = OperationalError
with pytest.raises(expected):
cache.decr('underwhelm')
def test_cant_incr_decimals(self):
# Cached values that aren't ints can't be incremented
cache.set('answer', Decimal('1.1'))
with pytest.raises(ValueError):
cache.incr('answer')
def test_cant_decr_decimals(self):
# Cached values that aren't ints can't be decremented
cache.set('answer', Decimal('9.9'))
with pytest.raises(ValueError):
cache.decr('answer')
def test_set_int_subclass(self):
# Storing an int subclass should return that int subclass
cache.set('myint', MyInt(2))
val = cache.get('myint')
assert val.times2() == 4
# Can't increment it since it's a pickle object on the table, not an
# integer
with pytest.raises(ValueError):
cache.incr('myint')
def test_unknown_value_type_errors(self):
# Unknown value_type values should be errors, since we don't know how
# to deserialize them. New value_types will probably be introduced by
# later versions or subclasses of MySQLCache
cache.set('mykey', 123)
with connection.cursor() as cursor:
cursor.execute(
"UPDATE `%s` SET value_type = '?'" % self.table_name,
)
with pytest.raises(ValueError):
cache.get('mykey')
def test_key_case_sensitivity(self):
"""
Check that we can store both upper and lowercase keys separately
At first MySQLCache did not use a binary collation for cache_key, which
meant it was not case sensitive.
"""
cache.set('akey', 123)
cache.set('Akey', 456)
assert cache.get('akey') == 123
assert cache.get('Akey') == 456
def test_value_type_case_sensitivity(self):
cache.set('akey', 123)
with connection.cursor() as cursor:
# Check that value_type is 'i' for integer
cursor.execute("SELECT value_type FROM `%s`" % self.table_name)
t = cursor.fetchone()[0]
assert t == 'i'
# Should be case-sensitive, so i != I
cursor.execute(
"""SELECT COUNT(*) FROM `%s`
WHERE value_type = 'I'""" % self.table_name)
n = cursor.fetchone()[0]
assert n == 0
def test_bad_key_prefix_for_reverse_function(self):
override = override_cache_settings(KEY_PREFIX='a:bad:prefix')
with override, pytest.raises(ValueError) as excinfo:
caches['default']
assert str(excinfo.value).startswith(
"Cannot use the default KEY_FUNCTION")
@parameterized.expand(['default', 'prefix', 'custom_key', 'custom_key2'])
def test_keys_with_prefix(self, cache_name):
cache = caches[cache_name]
assert cache.keys_with_prefix('') == set()
assert cache.keys_with_prefix('K') == set()
cache.set('A2', True)
cache.set('K1', True)
cache.set('K23', True, 1000)
cache.set('K99', True, 0.1)
time.sleep(0.2)
assert cache.keys_with_prefix('') == {'A2', 'K1', 'K23'}
assert cache.keys_with_prefix('K') == {'K1', 'K23'}
cache.delete('K1')
assert cache.keys_with_prefix('K') == {'K23'}
cache.clear()
assert cache.keys_with_prefix('') == set()
assert cache.keys_with_prefix('K') == set()
@parameterized.expand(['default', 'prefix', 'custom_key', 'custom_key2'])
def test_keys_with_prefix_version(self, cache_name):
cache = caches[cache_name]
cache.set('V12', True, version=1)
cache.set('V12', True, version=2)
cache.set('V2', True, version=2)
cache.set('V3', True, version=3)
assert cache.keys_with_prefix('V', version=1) == {'V12'}
assert cache.keys_with_prefix('V', version=2) == {'V12', 'V2'}
assert cache.keys_with_prefix('V', version=3) == {'V3'}
@override_cache_settings(KEY_FUNCTION=custom_key_func)
def test_keys_with_prefix_with_bad_cache(self):
with pytest.raises(ValueError) as excinfo:
cache.keys_with_prefix('')
assert str(excinfo.value).startswith(
"To use the _with_prefix commands")
@parameterized.expand(['default', 'prefix', 'custom_key', 'custom_key2'])
def test_get_with_prefix(self, cache_name):
cache = caches[cache_name]
assert cache.get_with_prefix('') == {}
assert cache.get_with_prefix('K') == {}
cache.set('A2', [True])
cache.set('K1', "Value1")
cache.set('K23', 2, 1000)
cache.set('K99', ["Value", 99], 0.1)
time.sleep(0.2)
assert (
cache.get_with_prefix('')
== {'A2': [True], 'K1': "Value1", 'K23': 2}
)
assert (
cache.get_with_prefix('K')
== {'K1': "Value1", 'K23': 2}
)
cache.delete('K1')
assert cache.get_with_prefix('K') == {'K23': 2}
cache.clear()
assert cache.get_with_prefix('') == {}
assert cache.get_with_prefix('K') == {}
@parameterized.expand(['default', 'prefix', 'custom_key', 'custom_key2'])
def test_get_with_prefix_version(self, cache_name):
cache = caches[cache_name]
cache.set('V12', ('version1',), version=1)
cache.set('V12', "str", version=2)
cache.set('V2', 2, version=2)
cache.set('V3', object, version=3)
assert cache.get_with_prefix('V', version=1) == {'V12': ('version1',)}
assert cache.get_with_prefix('V', version=2) == {'V12': "str", 'V2': 2}
assert cache.get_with_prefix('V', version=3) == {'V3': object}
@override_cache_settings(KEY_FUNCTION=custom_key_func)
def test_get_with_prefix_with_bad_cache(self):
with pytest.raises(ValueError) as excinfo:
cache.get_with_prefix('')
assert str(excinfo.value).startswith(
"To use the _with_prefix commands")
@parameterized.expand(['default', 'prefix', 'custom_key', 'custom_key2'])
def test_delete_with_prefix(self, cache_name):
cache = caches[cache_name]
# Check it runs on an empty cache
assert cache.delete_with_prefix('') == 0
assert cache.delete_with_prefix('K') == 0
cache.set('A1', True)
cache.set('A2', True)
cache.set('K2', True)
cache.set('K44', True)
assert cache.keys_with_prefix('') == {'A1', 'A2', 'K2', 'K44'}
assert cache.delete_with_prefix('A') == 2
assert cache.keys_with_prefix('') == {'K2', 'K44'}
assert cache.delete_with_prefix('A') == 0
assert cache.keys_with_prefix('') == {'K2', 'K44'}
assert cache.delete_with_prefix('K') == 2
assert cache.keys_with_prefix('K') == set()
assert cache.keys_with_prefix('') == set()
@parameterized.expand(['default', 'prefix', 'custom_key', 'custom_key2'])
def test_delete_with_prefix_version(self, cache_name):
cache = caches[cache_name]
cache.set('V12', True, version=1)
cache.set('V12', True, version=2)
cache.set('V2', True, version=2)
cache.set('V3', True, version=3)
has_key = cache.has_key # avoid lint error
assert cache.delete_with_prefix('V', version=1) == 1
assert not has_key('V12', version=1)
assert has_key('V12', version=2)
assert has_key('V2', version=2)
assert has_key('V3', version=3)
assert cache.delete_with_prefix('V', version=2) == 2
assert not has_key('V12', version=1)
assert not has_key('V12', version=2)
assert not has_key('V2', version=2)
assert has_key('V3', version=3)
assert cache.delete_with_prefix('V', version=3) == 1
assert not has_key('V12', version=1)
assert not has_key('V12', version=2)
assert not has_key('V2', version=2)
assert not has_key('V3', version=3)
@override_cache_settings(KEY_FUNCTION=custom_key_func)
def test_delete_with_prefix_with_no_reverse_works(self):
cache.set_many({'K1': 'value', 'K2': 'value2', 'B2': 'Anothervalue'})
assert cache.delete_with_prefix('K') == 2
assert cache.get_many(['K1', 'K2', 'B2']) == {'B2': 'Anothervalue'}
def test_mysql_cache_migration_alias(self):
out = StringIO()
call_command('mysql_cache_migration', 'default', stdout=out)
output = out.getvalue()
num_run_sqls = (len(output.split('RunSQL')) - 1)
assert num_run_sqls == 1
def test_mysql_cache_migration_non_existent(self):
out = StringIO()
with pytest.raises(CommandError):
call_command('mysql_cache_migration', 'nonexistent', stdout=out)
@override_cache_settings(
BACKEND='django.core.cache.backends.dummy.DummyCache',
)
def test_mysql_cache_migration_no_mysql_caches(self):
err = StringIO()
call_command('mysql_cache_migration', stderr=err)
assert "No MySQLCache instances in CACHES" in err.getvalue()
# cull_mysql_caches tests
@override_cache_settings(options={'MAX_ENTRIES': -1})
def test_cull_max_entries_minus_one(self):
# cull with MAX_ENTRIES = -1 should never clear anything that is not
# expired
# one expired key
cache.set('key', 'value', 0.1)
time.sleep(0.2)
# 90 non-expired keys
for n in range(9):
cache.set_many({
str(n * 10 + i): True
for i in range(10)
})
cache.cull()
assert self.table_count() == 90
def test_cull_mysql_caches_basic(self):
cache.set('key', 'value', 0.1)
time.sleep(0.2)
assert self.table_count() == 1
call_command('cull_mysql_caches', verbosity=0)
assert self.table_count() == 0
def test_cull_mysql_caches_named_cache(self):
cache.set('key', 'value', 0.1)
time.sleep(0.2)
assert self.table_count() == 1
out = StringIO()
call_command('cull_mysql_caches', 'default', verbosity=1, stdout=out)
output = out.getvalue()
assert (
output.strip()
== "Deleting from cache 'default'... 1 entries deleted."
)
assert self.table_count() == 0
def test_cull_mysql_caches_bad_cache_name(self):
with pytest.raises(CommandError) as excinfo:
call_command('cull_mysql_caches', "NOTACACHE", verbosity=0)
assert "Cache 'NOTACACHE' does not exist" == str(excinfo.value)
@override_cache_settings()
class MySQLCacheMigrationTests(MySQLCacheTableMixin, TransactionTestCase):
@pytest.fixture(autouse=True)
def flake8dir(self, flake8dir):
self.flake8dir = flake8dir
def test_mysql_cache_migration(self):
out = StringIO()
call_command('mysql_cache_migration', stdout=out)
output = out.getvalue()
# Lint it
self.flake8dir.make_example_py(output)
result = self.flake8dir.run_flake8()
assert result.out_lines == []
# Dynamic import and check
migration_mod = imp.new_module('0001_add_cache_tables')
exec(output, migration_mod.__dict__)
assert hasattr(migration_mod, 'Migration')
migration = migration_mod.Migration
assert hasattr(migration, 'dependencies')
assert hasattr(migration, 'operations')
# Since they all have the same table name, there should only be one
# operation
assert len(migration.operations) == 1
# Now run the migration forwards and backwards to check it works
operation = migration.operations[0]
assert not self.table_exists(self.table_name)
state = ProjectState()
new_state = state.clone()
with connection.schema_editor() as editor:
operation.database_forwards("testapp", editor, state, new_state)
assert self.table_exists(self.table_name)
new_state = state.clone()
with connection.schema_editor() as editor:
operation.database_backwards("testapp", editor, new_state, state)
assert not self.table_exists(self.table_name)
def table_exists(self, table_name):
with connection.cursor() as cursor:
cursor.execute(
"""SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA = DATABASE() AND
TABLE_NAME = %s""",
(table_name,),
)
return bool(cursor.fetchone()[0])
| 37.220799 | 108 | 0.601656 |
dae092b5d56fbbcb6bd55fd1d09cf6c7e45b4e92 | 49 | py | Python | lime/for_biologists/architectures/__init__.py | choderalab/gin | 9082431d8b664699a898c1e2fa490a18737d6e2d | [
"MIT"
] | 24 | 2019-07-20T22:37:09.000Z | 2021-07-07T07:13:56.000Z | lime/for_biologists/architectures/__init__.py | choderalab/gin | 9082431d8b664699a898c1e2fa490a18737d6e2d | [
"MIT"
] | 3 | 2021-05-10T05:29:59.000Z | 2022-02-10T00:15:05.000Z | lime/for_biologists/architectures/__init__.py | kuano-ai/gimlet | 9082431d8b664699a898c1e2fa490a18737d6e2d | [
"MIT"
] | 8 | 2019-08-09T17:30:20.000Z | 2021-12-01T13:27:46.000Z | import lime.for_biologists.architectures.vcharge
| 24.5 | 48 | 0.897959 |
0957a1e80db9fb879a9b2590351b4f4a1cebc965 | 1,065 | py | Python | core/confdb/syntax/protocols/lldp/base.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | core/confdb/syntax/protocols/lldp/base.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | core/confdb/syntax/protocols/lldp/base.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# ConfDB protocols lldp syntax
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from ...defs import DEF
from ...patterns import IF_NAME
PROTOCOLS_LLDP_SYNTAX = DEF(
"lldp",
[
DEF(
"interface",
[
DEF(
IF_NAME,
[
DEF(
"admin-status",
[
DEF("rx", gen="make_lldp_admin_status_rx"),
DEF("tx", gen="make_lldp_admin_status_tx"),
],
)
],
multi=True,
name="interface",
gen="make_lldp_interface",
)
],
)
],
)
| 28.783784 | 75 | 0.283568 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.