text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import os
import flask
from oslo.config import cfg
from dashboard import memory_storage
from stackalytics.openstack.common import log as logging
from stackalytics.processor import runtime_storage
from stackalytics.processor import utils
LOG = logging.getLogger(__name__)
def get_vault():
vault = getattr(flask.current_app, 'stackalytics_vault', None)
if not vault:
try:
vault = {}
runtime_storage_inst = runtime_storage.get_runtime_storage(
cfg.CONF.runtime_storage_uri)
vault['runtime_storage'] = runtime_storage_inst
vault['memory_storage'] = memory_storage.get_memory_storage(
memory_storage.MEMORY_STORAGE_CACHED)
init_project_types(vault)
init_releases(vault)
flask.current_app.stackalytics_vault = vault
except Exception as e:
LOG.critical('Failed to initialize application: %s', e)
LOG.exception(e)
flask.abort(500)
if not getattr(flask.request, 'stackalytics_updated', None):
flask.request.stackalytics_updated = True
memory_storage_inst = vault['memory_storage']
have_updates = memory_storage_inst.update(
vault['runtime_storage'].get_update(os.getpid()))
if have_updates:
init_project_types(vault)
init_releases(vault)
init_module_groups(vault)
return vault
def get_memory_storage():
return get_vault()['memory_storage']
def init_releases(vault):
runtime_storage_inst = vault['runtime_storage']
releases = runtime_storage_inst.get_by_key('releases')
if not releases:
raise Exception('Releases are missing in runtime storage')
vault['start_date'] = releases[0]['end_date']
vault['end_date'] = releases[-1]['end_date']
start_date = releases[0]['end_date']
for r in releases[1:]:
r['start_date'] = start_date
start_date = r['end_date']
vault['releases'] = dict((r['release_name'].lower(), r)
for r in releases[1:])
def init_project_types(vault):
runtime_storage_inst = vault['runtime_storage']
project_type_options = {}
project_type_group_index = {'all': set(['unknown'])}
for repo in utils.load_repos(runtime_storage_inst):
project_type = repo['project_type'].lower()
project_group = None
if ('project_group' in repo) and (repo['project_group']):
project_group = repo['project_group'].lower()
if project_type in project_type_options:
if project_group:
project_type_options[project_type].add(project_group)
else:
if project_group:
project_type_options[project_type] = set([project_group])
else:
project_type_options[project_type] = set()
module = repo['module']
if project_type in project_type_group_index:
project_type_group_index[project_type].add(module)
else:
project_type_group_index[project_type] = set([module])
if project_group:
if project_group in project_type_group_index:
project_type_group_index[project_group].add(module)
else:
project_type_group_index[project_group] = set([module])
project_type_group_index['all'].add(module)
vault['project_type_options'] = project_type_options
vault['project_type_group_index'] = project_type_group_index
def init_module_groups(vault):
runtime_storage_inst = vault['runtime_storage']
module_index = {}
module_id_index = {}
module_groups = runtime_storage_inst.get_by_key('module_groups') or []
for module_group in module_groups:
module_group_name = module_group['module_group_name']
module_group_id = module_group_name.lower()
module_id_index[module_group_id] = {
'group': True,
'id': module_group_id,
'text': module_group_name,
'modules': [m.lower() for m in module_group['modules']],
}
modules = module_group['modules']
for module in modules:
if module in module_index:
module_index[module].add(module_group_id)
else:
module_index[module] = set([module_group_id])
memory_storage_inst = vault['memory_storage']
for module in memory_storage_inst.get_modules():
module_id_index[module] = {
'id': module.lower(),
'text': module,
'modules': [module.lower()],
}
vault['module_group_index'] = module_index
vault['module_id_index'] = module_id_index
vault['module_groups'] = module_groups
def get_project_type_options():
return get_vault()['project_type_options']
def get_release_options():
runtime_storage_inst = get_vault()['runtime_storage']
releases = runtime_storage_inst.get_by_key('releases')[1:]
releases.reverse()
return releases
def is_project_type_valid(project_type):
if not project_type:
return False
project_type = project_type.lower()
if project_type == 'all':
return True
project_types = get_project_type_options()
if project_type in project_types:
return True
for p, g in project_types.iteritems():
if project_type in g:
return True
return False
def get_user_from_runtime_storage(user_id):
runtime_storage_inst = get_vault()['runtime_storage']
return utils.load_user(runtime_storage_inst, user_id)
def resolve_modules(module_ids):
module_id_index = get_vault()['module_id_index']
modules = set()
for module_id in module_ids:
if module_id in module_id_index:
modules |= set(module_id_index[module_id]['modules'])
return modules
|
{
"content_hash": "a738db67abb5a91decbae8e8e9a73eca",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 74,
"avg_line_length": 32.66480446927374,
"alnum_prop": 0.6153583034034548,
"repo_name": "joshuamckenty/stackalytics",
"id": "afa07b03f8defe03e37f404b56047f979ae3b721",
"size": "6429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/vault.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "48672"
},
{
"name": "JavaScript",
"bytes": "33301"
},
{
"name": "Python",
"bytes": "215950"
}
],
"symlink_target": ""
}
|
import django.template
register = django.template.Library()
@register.filter
def customfilter(stuff):
return "%s!" % stuff
@register.filter
def cat(stuff):
return "MEOW %s" % stuff
django.template.builtins.append(register)
|
{
"content_hash": "d64fc3e5e998a8cbc8a1a4ddc95883e3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 41,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.7257383966244726,
"repo_name": "holli-holzer/python-docx",
"id": "ca076a60e4ac7ddfa37fe3d52c39391a29318594",
"size": "237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docx/engines/filters/django/customfilter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6692"
},
{
"name": "Python",
"bytes": "783349"
}
],
"symlink_target": ""
}
|
"""Unit tests for the Model utilities."""
import json
import logging
from src import basetest
from src.clients import bigquery
from src.csvmatchreplace import transform
class TransformTest(basetest.TestCase):
"""Run all the ModelTests."""
def testTransformRow(self):
row1 = ['true', 'hue', 'blue']
config = {'columns': [{'type': bigquery.ColumnTypes.STRING,
'wanted': True,
'transformations': [
{'match': 'ue', 'replace': 'oo'},
{'match': 'lo', 'replace': 'ol'}]},
{'type': bigquery.ColumnTypes.STRING,
'wanted': False},
{'type': bigquery.ColumnTypes.STRING,
'wanted': True,
'transformations': [
{'match': 'b', 'replace': 'k'}]}]}
# perform transformations that include removing a column
(transformed_row, bad_columns) = transform.TransformRow(row1, config)
self.assertEqual(len(bad_columns), 0)
# transformed_row should only have 2 cells
self.assertEqual(len(transformed_row), 2)
# transformed_row should equal ['troo', 'klue']
self.assertEqual(transformed_row, ['troo', 'klue'])
config['columns'][1]['wanted'] = True
# perform transformations that don't include removing columns
(transformed_row2, bad_columns2) = transform.TransformRow(row1, config)
self.assertEqual(len(bad_columns2), 0)
# transformed_row should all 3 cells
self.assertEqual(len(transformed_row2), 3)
# transformed_row should equal ['troo', 'hue', 'klue']
self.assertEqual(transformed_row2, ['troo', 'hue', 'klue'])
def NOPEtestTransformRowWithBadData(self):
# TODO(user) get this test working again.
row1 = ['true', 'hue', 'blue']
t = {} # table.Table()
t.column_types = [bigquery.ColumnTypes.STRING,
bigquery.ColumnTypes.INTEGER,
bigquery.ColumnTypes.INTEGER]
t.transformations = json.dumps([
# col 1 has 2 transformations
[{'match': 'ue', 'replace': 'oo'},
{'match': 'lo', 'replace': 'ol'}],
# col 2 has no transformation)
[],
# col 3 has 1 transformation
[{'match': 'b', 'replace': 'k'}]])
# transformations = t.GetAllTransformations()
t.column_wanted = [True, True, True]
# perform transformations that don't include removing columns
(bad_columns, _) = t.TransformRow(row1)
# there should be two errors in bad columns
self.assertEqual(len(bad_columns), 2)
# index of first error should be 1
self.assertEqual(bad_columns[0].index, 1)
# index of second error should be 2
self.assertEqual(bad_columns[1].index, 2)
class TestNormalizeCellByType(basetest.TestCase):
def testNormalizeCellByType(self):
types = bigquery.ColumnTypes
# string tests
self.assertEqual('', transform.NormalizeCellByType('', 0, types.STRING))
# int tests
self.assertEqual('', transform.NormalizeCellByType('', 1, types.INTEGER))
self.assertEqual('1', transform.NormalizeCellByType('1', 1, types.INTEGER))
self.assertRaises(transform.CellError,
transform.NormalizeCellByType, 'ark', 1, types.INTEGER)
# float tests
self.assertEqual('', transform.NormalizeCellByType('', 2, types.FLOAT))
self.assertEqual('1.1',
transform.NormalizeCellByType('1.1', 2, types.FLOAT))
self.assertRaises(transform.CellError,
transform.NormalizeCellByType, 'ark', 2, types.FLOAT)
# bool tests
self.assertEqual('', transform.NormalizeCellByType('', 3, types.BOOLEAN))
self.assertEqual('True',
transform.NormalizeCellByType('true', 3, types.BOOLEAN))
self.assertEqual('True',
transform.NormalizeCellByType('TRUE', 3, types.BOOLEAN))
self.assertEqual('True',
transform.NormalizeCellByType('1', 3, types.BOOLEAN))
self.assertEqual('False',
transform.NormalizeCellByType('False', 3, types.BOOLEAN))
self.assertRaises(transform.CellError,
transform.NormalizeCellByType, 'ark', 3, types.BOOLEAN)
# timestamp tests
self.assertEqual('', transform.NormalizeCellByType('', 4, types.TIMESTAMP))
self.assertEqual('2013-06-06 00:00:00.000000 ',
transform.NormalizeCellByType('2013-06-06', 4,
types.TIMESTAMP))
self.assertRaises(transform.CellError,
transform.NormalizeCellByType, 'ark', 4, types.TIMESTAMP)
def testCellsAsString(self):
tests = (('', []),
('a', ['a']),
('a,b', ['a', 'b']),
('"a""b",c', ['a"b', 'c']),
('a\'b,c', ['a\'b', 'c']),
('"a,b",c', ['a,b', 'c']),
)
for expected, row in tests:
self.assertEquals(expected, transform.CellsToCsvString(row))
if __name__ == '__main__':
basetest.main()
|
{
"content_hash": "8c78c899f6e2379f356dc9daf3e1f7ef",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 79,
"avg_line_length": 38.67424242424242,
"alnum_prop": 0.5872673849167482,
"repo_name": "GoogleCloudPlatform/Data-Pipeline",
"id": "8c4bebbd8d753403fa24080dfbf4b1c218b02f37",
"size": "5701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/src/csvmatchreplace/transform_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3751"
},
{
"name": "JavaScript",
"bytes": "50597"
},
{
"name": "Python",
"bytes": "364246"
},
{
"name": "Shell",
"bytes": "14633"
}
],
"symlink_target": ""
}
|
import os
import os.path
import sys
# Django settings for map project.
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..' )
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DATABASES = {
'default': {
'ENGINE' : 'django.contrib.gis.db.backends.postgis',
'NAME': 'geodatabase',
'USER': '',
'PASSWORD': '',
'PORT': '5433'
}
}
#TODO haitham: is this safe? putting the password here?
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = "587"
EMAIL_HOST_USER = 'oMap.Team@gmail.com'
EMAIL_HOST_PASSWORD = 'oMap2011'
EMAIL_USE_TLS = True
AUTH_PROFILE_MODULE = "accounts.models.UserProfile"
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Jerusalem'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), "media")
DATA_ROOT = os.path.join(MEDIA_ROOT, 'data')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/media/admin/' #'/media/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '26g1(l$zyi5t)ic301cs^$d(q(j8bjbmo!#-ql7y*co5c8$j0_'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'map.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'gunicorn',
'accounts',
'django.contrib.gis',
'piston',
'map_info',
'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
{
"content_hash": "67c56c8d43c25227cda3b64997d1badb",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 88,
"avg_line_length": 30.805882352941175,
"alnum_prop": 0.6956272675195723,
"repo_name": "haithamk/oMap",
"id": "a28532119bccad05c30577706d41149030d7a2e3",
"size": "5237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/map/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "105884"
},
{
"name": "Python",
"bytes": "51939"
},
{
"name": "Shell",
"bytes": "4511"
}
],
"symlink_target": ""
}
|
""" Flask-User is a customizable user account management extension for Flask.
:copyright: (c) 2013 by Ling Thio
:author: Ling Thio (ling.thio@gmail.com)
:license: Simplified BSD License, see LICENSE.txt for more details."""
from passlib.context import CryptContext
from flask import Blueprint, current_app, url_for
from flask_login import LoginManager, UserMixin as LoginUserMixin, make_secure_token
from flask_user.db_adapters import DBAdapter
from .db_adapters import SQLAlchemyAdapter
from . import emails
from . import forms
from . import passwords
from . import settings
from . import tokens
from . import translations
from . import views
from . import signals
from .translations import get_translations
# Enable the following: from flask.ext.user import current_user
from flask_login import current_user
# Enable the following: from flask.ext.user import login_required, roles_required
from .decorators import *
# Enable the following: from flask.ext.user import user_logged_in
from .signals import *
__version__ = '0.6.6'
def _flask_user_context_processor():
""" Make 'user_manager' available to Jinja2 templates"""
return dict(user_manager=current_app.user_manager)
class UserManager(object):
""" This is the Flask-User object that manages the User management process."""
def __init__(self, db_adapter=None, app=None, **kwargs):
""" Create the UserManager object """
self.db_adapter = db_adapter
self.app = app
if db_adapter is not None and app is not None:
self.init_app(app, db_adapter, **kwargs)
def init_app(self, app, db_adapter=None,
# Forms
add_email_form=forms.AddEmailForm,
change_password_form=forms.ChangePasswordForm,
change_username_form=forms.ChangeUsernameForm,
forgot_password_form=forms.ForgotPasswordForm,
login_form=forms.LoginForm,
register_form=forms.RegisterForm,
resend_confirm_email_form=forms.ResendConfirmEmailForm,
reset_password_form=forms.ResetPasswordForm,
invite_form=forms.InviteForm,
# Validators
username_validator=forms.username_validator,
password_validator=forms.password_validator,
# View functions
change_password_view_function=views.change_password,
change_username_view_function=views.change_username,
confirm_email_view_function=views.confirm_email,
email_action_view_function=views.email_action,
forgot_password_view_function=views.forgot_password,
login_view_function=views.login,
logout_view_function=views.logout,
manage_emails_view_function=views.manage_emails,
register_view_function=views.register,
resend_confirm_email_view_function = views.resend_confirm_email,
reset_password_view_function = views.reset_password,
unconfirmed_email_view_function = views.unconfirmed,
unauthenticated_view_function = views.unauthenticated,
unauthorized_view_function = views.unauthorized,
user_profile_view_function = views.user_profile,
invite_view_function = views.invite,
# Misc
login_manager=LoginManager(),
password_crypt_context=None,
send_email_function = emails.send_email,
token_manager=tokens.TokenManager(),
legacy_check_password_hash=None
):
""" Initialize the UserManager object """
self.app = app
if db_adapter is not None:
self.db_adapter = db_adapter
# Forms
self.add_email_form = add_email_form
self.change_password_form = change_password_form
self.change_username_form = change_username_form
self.forgot_password_form = forgot_password_form
self.login_form = login_form
self.register_form = register_form
self.resend_confirm_email_form = resend_confirm_email_form
self.reset_password_form = reset_password_form
self.invite_form = invite_form
# Validators
self.username_validator = username_validator
self.password_validator = password_validator
# View functions
self.change_password_view_function = change_password_view_function
self.change_username_view_function = change_username_view_function
self.confirm_email_view_function = confirm_email_view_function
self.email_action_view_function = email_action_view_function
self.forgot_password_view_function = forgot_password_view_function
self.login_view_function = login_view_function
self.logout_view_function = logout_view_function
self.manage_emails_view_function = manage_emails_view_function
self.register_view_function = register_view_function
self.resend_confirm_email_view_function = resend_confirm_email_view_function
self.reset_password_view_function = reset_password_view_function
self.unconfirmed_email_view_function = unconfirmed_email_view_function
self.unauthenticated_view_function = unauthenticated_view_function
self.unauthorized_view_function = unauthorized_view_function
self.user_profile_view_function = user_profile_view_function
self.invite_view_function = invite_view_function
# Misc
self.login_manager = login_manager
self.token_manager = token_manager
self.password_crypt_context = password_crypt_context
self.send_email_function = send_email_function
self.legacy_check_password_hash = legacy_check_password_hash
""" Initialize app.user_manager."""
# Bind Flask-USER to app
app.user_manager = self
# Flask seems to also support the current_app.extensions[] list
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['user'] = self
# Set defaults for undefined settings
settings.set_default_settings(self, app.config)
# Make sure the settings are valid -- raise ConfigurationError if not
settings.check_settings(self)
# Initialize Translations -- Only if Flask-Babel has been installed
if hasattr(app.jinja_env, 'install_gettext_callables'):
app.jinja_env.install_gettext_callables(
lambda x: get_translations().ugettext(x),
lambda s, p, n: get_translations().ungettext(s, p, n),
newstyle=True)
else:
app.jinja_env.add_extension('jinja2.ext.i18n')
app.jinja_env.install_null_translations()
# Create password_crypt_context if needed
if not self.password_crypt_context:
self.password_crypt_context = CryptContext(
schemes=[app.config['USER_PASSWORD_HASH']])
# Setup Flask-Login
self.setup_login_manager(app)
# Setup TokenManager
self.token_manager.setup(app.config.get('SECRET_KEY'))
# Add flask_user/templates directory using a Blueprint
blueprint = Blueprint('flask_user', 'flask_user', template_folder='templates')
app.register_blueprint(blueprint)
# Add URL routes
self.add_url_routes(app)
# Add context processor
app.context_processor(_flask_user_context_processor)
# Prepare for translations
_ = translations.gettext
def setup_login_manager(self, app):
# Flask-Login calls this function to retrieve a User record by user ID.
# Note: user_id is a UNICODE string returned by UserMixin.get_id().
# See https://flask-login.readthedocs.org/en/latest/#how-it-works
@self.login_manager.user_loader
def load_user_by_id(user_unicode_id):
user_id = int(user_unicode_id)
#print('load_user_by_id: user_id=', user_id)
return self.get_user_by_id(user_id)
# Flask-login calls this function to retrieve a User record by user token.
# A token is used to secure the user ID when stored in browser sessions.
# See https://flask-login.readthedocs.org/en/latest/#alternative-tokens
@self.login_manager.token_loader
def load_user_by_token(token):
user_id = self.token_manager.decrypt_id(token)
#print('load_user_by_token: token=', token, 'user_id=', user_id)
return self.get_user_by_id(int(user_id))
self.login_manager.login_view = 'user.login'
self.login_manager.init_app(app)
def add_url_routes(self, app):
""" Add URL Routes"""
app.add_url_rule(self.login_url, 'user.login', self.login_view_function, methods=['GET', 'POST'])
app.add_url_rule(self.logout_url, 'user.logout', self.logout_view_function, methods=['GET', 'POST'])
if self.enable_confirm_email:
app.add_url_rule(self.confirm_email_url, 'user.confirm_email', self.confirm_email_view_function)
app.add_url_rule(self.resend_confirm_email_url, 'user.resend_confirm_email', self.resend_confirm_email_view_function, methods=['GET', 'POST'])
if self.enable_change_password:
app.add_url_rule(self.change_password_url, 'user.change_password', self.change_password_view_function, methods=['GET', 'POST'])
if self.enable_change_username:
app.add_url_rule(self.change_username_url, 'user.change_username', self.change_username_view_function, methods=['GET', 'POST'])
if self.enable_forgot_password:
app.add_url_rule(self.forgot_password_url, 'user.forgot_password', self.forgot_password_view_function, methods=['GET', 'POST'])
app.add_url_rule(self.reset_password_url, 'user.reset_password', self.reset_password_view_function, methods=['GET', 'POST'])
if self.enable_register:
app.add_url_rule(self.register_url, 'user.register', self.register_view_function, methods=['GET', 'POST'])
if self.db_adapter.UserEmailClass:
app.add_url_rule(self.email_action_url, 'user.email_action', self.email_action_view_function)
app.add_url_rule(self.manage_emails_url, 'user.manage_emails', self.manage_emails_view_function, methods=['GET', 'POST'])
app.add_url_rule(self.user_profile_url, 'user.profile', self.user_profile_view_function, methods=['GET', 'POST'])
if self.enable_invitation:
app.add_url_rule(self.invite_url, 'user.invite', self.invite_view_function, methods=['GET', 'POST'])
# Obsoleted function. Replace with hash_password()
def generate_password_hash(self, password):
return passwords.hash_password(self, password)
def hash_password(self, password):
return passwords.hash_password(self, password)
def get_password(self, user):
use_auth_class = True if self.db_adapter.UserAuthClass and hasattr(user, 'user_auth') else False
# Handle v0.5 backward compatibility
if self.db_adapter.UserProfileClass:
hashed_password = user.password
else:
hashed_password = user.user_auth.password if use_auth_class else user.password
return hashed_password
def update_password(self, user, hashed_password):
use_auth_class = True if self.db_adapter.UserAuthClass and hasattr(user, 'user_auth') else False
if use_auth_class:
user.user_auth.password = hashed_password
else:
user.password = hashed_password
self.db_adapter.commit()
def verify_password(self, password, user):
"""
Make it backward compatible to legacy password hash.
In addition, if such password were found, update the user's password field.
"""
verified = False
hashed_password = self.get_password(user)
try:
verified = passwords.verify_password(self, password, hashed_password)
except ValueError:
legacy_check = self.legacy_check_password_hash
if legacy_check:
verified = legacy_check(hashed_password, password)
if verified:
# update the hash
new_hash = self.hash_password(password)
self.update_password(user, new_hash)
return verified
def generate_token(self, user_id):
return self.token_manager.generate_token(user_id)
def verify_token(self, token, expiration_in_seconds):
return self.token_manager.verify_token(token, expiration_in_seconds)
def get_user_by_id(self, user_id):
# Handle v0.5 backward compatibility
ObjectClass = self.db_adapter.UserAuthClass if self.db_adapter.UserAuthClass and self.db_adapter.UserProfileClass else self.db_adapter.UserClass
return self.db_adapter.get_object(ObjectClass, user_id)
# NB: This backward compatibility function may be obsoleted in the future
# Use 'get_user_by_id() instead.
def find_user_by_id(self, user_id):
print('Warning: find_user_by_id() will be deprecated in the future. User get_user_by_id() instead.')
return self.get_user_by_id(user_id)
def get_user_email_by_id(self, user_email_id):
return self.db_adapter.get_object(self.db_adapter.UserEmailClass, user_email_id)
# NB: This backward compatibility function may be obsoleted in the future
# Use 'get_user_email_by_id() instead.
def find_user_email_by_id(self, user_email_id):
print('Warning: find_user_email_by_id() will be deprecated in the future. User get_user_email_by_id() instead.')
return self.get_user_email_by_id(user_email_id)
def find_user_by_username(self, username):
user_auth = None
# The username field can either be in the UserAuth class or in the User class
if self.db_adapter.UserAuthClass and hasattr(self.db_adapter.UserAuthClass, 'username'):
user_auth = self.db_adapter.ifind_first_object(self.db_adapter.UserAuthClass, username=username)
# Handle v0.5 backward compatibility
if self.db_adapter.UserProfileClass: return user_auth
user = user_auth.user if user_auth else None
else:
user = self.db_adapter.ifind_first_object(self.db_adapter.UserClass, username=username)
return user
def find_user_by_email(self, email):
user_email = None
user_auth = None
if self.db_adapter.UserEmailClass:
user_email = self.db_adapter.ifind_first_object(self.db_adapter.UserEmailClass, email=email)
user = user_email.user if user_email else None
else:
# The email field can either be in the UserAuth class or in the User class
if self.db_adapter.UserAuthClass and hasattr(self.db_adapter.UserAuthClass, 'email'):
user_auth = self.db_adapter.ifind_first_object(self.db_adapter.UserAuthClass, email=email)
# Handle v0.5 backward compatibility
if self.db_adapter.UserProfileClass: return (user_auth, user_email)
user = user_auth.user if user_auth else None
else:
user = self.db_adapter.ifind_first_object(self.db_adapter.UserClass, email=email)
return (user, user_email)
def email_is_available(self, new_email):
""" Return True if new_email does not exist.
Return False otherwise."""
user, user_email = self.find_user_by_email(new_email)
return (user==None)
def username_is_available(self, new_username):
""" Return True if new_username does not exist or if new_username equals old_username.
Return False otherwise."""
# Allow user to change username to the current username
if current_user.is_authenticated():
current_username = current_user.user_auth.username if self.db_adapter.UserAuthClass and hasattr(current_user, 'user_auth') else current_user.username
if new_username == current_username:
return True
# See if new_username is available
return self.find_user_by_username(new_username)==None
def send_reset_password_email(self, email):
# Find user by email
user, user_email = self.find_user_by_email(email)
if user:
# Generate reset password link
token = self.generate_token(int(user.get_id()))
reset_password_link = url_for('user.reset_password', token=token, _external=True)
# Send forgot password email
emails.send_forgot_password_email(user, user_email, reset_password_link)
# Store token
if hasattr(user, 'reset_password_token'):
self.db_adapter.update_object(user, reset_password_token=token)
self.db_adapter.commit()
# Send forgot_password signal
signals.user_forgot_password.send(current_app._get_current_object(), user=user)
class UserMixin(LoginUserMixin):
""" This class adds methods to the User model class required by Flask-Login and Flask-User."""
def is_active(self):
if hasattr(self, 'active'):
return self.active
else:
return self.is_enabled
def set_active(self, active):
if hasattr(self, 'active'):
self.active = active
else:
self.is_enabled = active
def has_role(self, *specified_role_names):
""" Return True if the user has one of the specified roles. Return False otherwise.
has_roles() accepts a 1 or more role name parameters
has_role(role_name1, role_name2, role_name3).
For example:
has_roles('a', 'b')
Translates to:
User has role 'a' OR role 'b'
"""
# Allow developers to attach the Roles to the User or the UserProfile object
if hasattr(self, 'roles'):
roles = self.roles
else:
if hasattr(self, 'user_profile') and hasattr(self.user_profile, 'roles'):
roles = self.user_profile.roles
else:
roles = None
if not roles: return False
# Translates a list of role objects to a list of role_names
user_role_names = [role.name for role in roles]
# Return True if one of the role_names matches
for role_name in specified_role_names:
if role_name in user_role_names:
return True
# Return False if none of the role_names matches
return False
def has_roles(self, *requirements):
""" Return True if the user has all of the specified roles. Return False otherwise.
has_roles() accepts a list of requirements:
has_role(requirement1, requirement2, requirement3).
Each requirement is either a role_name, or a tuple_of_role_names.
role_name example: 'manager'
tuple_of_role_names: ('funny', 'witty', 'hilarious')
A role_name-requirement is accepted when the user has this role.
A tuple_of_role_names-requirement is accepted when the user has ONE of these roles.
has_roles() returns true if ALL of the requirements have been accepted.
For example:
has_roles('a', ('b', 'c'), d)
Translates to:
User has role 'a' AND (role 'b' OR role 'c') AND role 'd'"""
# Allow developers to attach the Roles to the User or the UserProfile object
if hasattr(self, 'roles'):
roles = self.roles
else:
if hasattr(self, 'user_profile') and hasattr(self.user_profile, 'roles'):
roles = self.user_profile.roles
else:
roles = None
if not roles: return False
# Translates a list of role objects to a list of role_names
user_role_names = [role.name for role in roles]
# has_role() accepts a list of requirements
for requirement in requirements:
if isinstance(requirement, (list, tuple)):
# this is a tuple_of_role_names requirement
tuple_of_role_names = requirement
authorized = False
for role_name in tuple_of_role_names:
if role_name in user_role_names:
# tuple_of_role_names requirement was met: break out of loop
authorized = True
break
if not authorized:
return False # tuple_of_role_names requirement failed: return False
else:
# this is a role_name requirement
role_name = requirement
# the user must have this role
if not role_name in user_role_names:
return False # role_name requirement failed: return False
# All requirements have been met: return True
return True
# Flask-Login is capable of remembering the current user ID in the browser's session.
# This function enables the user ID to be encrypted as a token.
# See https://flask-login.readthedocs.org/en/latest/#remember-me
def get_auth_token(self):
token_manager = current_app.user_manager.token_manager
user_id = int(self.get_id())
token = token_manager.encrypt_id(user_id)
#print('get_auth_token: user_id=', user_id, 'token=', token)
return token
def has_confirmed_email(self):
db_adapter = current_app.user_manager.db_adapter
# Handle multiple emails per user: Find at least one confirmed email
if db_adapter.UserEmailClass:
has_confirmed_email = False
user_emails = db_adapter.find_all_objects(db_adapter.UserEmailClass, user_id=self.id)
for user_email in user_emails:
if user_email.confirmed_at:
has_confirmed_email = True
break
# Handle single email per user
else:
has_confirmed_email = True if self.confirmed_at else False
return has_confirmed_email
|
{
"content_hash": "9ba713125c1ad244a7ebc63f6765cc80",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 161,
"avg_line_length": 44.74251497005988,
"alnum_prop": 0.6332976445396146,
"repo_name": "DimensionSoftware/Flask-User",
"id": "befa73915af012a9aed14c8a68e528897657eaae",
"size": "22416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_user/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "18322"
},
{
"name": "Python",
"bytes": "196108"
}
],
"symlink_target": ""
}
|
import httplib2
import json
import sys
import DiscordGateway
sys.path.insert(0, "DiscordBot-PurpleMinion/Tools")
from NamedPipeIPC import NamedPipe
from BotConfigurations import getAuthenticationToken
from RestAPI import RestAPI
from DiscordGateway import gateway
class DiscordRequestAPI:
def __init__(self, namedPipe):
self.namedPipe = namedPipe
self.token = "Bot " + getAuthenticationToken()
self.discordRest = RestAPI(self.token)
def getBotName(self):
return self.discordRest.getBotName()
def getGuilds(self):
return self.discordRest.getGuildNames()
def getGuildChannels(self, guildName):
return self.discordRest.getGuildChannelNames(guildName)
def sendMessage(self, guildName, channelName, message):
enableTTS = False
return self.discordRest.sendMessage(guildName, channelName, enableTTS, message)
def sendTTSMessage(self, guildName, channelName, message):
enableTTS = True
return self.discordRest.sendMessage(guildName, channelName, enableTTS, message)
def getMessages(self, guildName, channelName):
return self.discordRest.getMessages(guildName, channelName)
def openGateway(self):
gatewayData = self.discordRest.getGatewayData()
gatewayUrl = gatewayData["url"];
gatewayShards = gatewayData["shards"];
self.gateway = gateway(self.token, self.namedPipe, gatewayUrl, gatewayShards)
message = "The gateway has been opened. The bot should be visible online now."
return message
def joinChannel(self, guildName, channelName):
guildId = self.discordRest.getGuildIdFromName(guildName)
channelId = self.discordRest.getGuildChannelIdFromName(guildName, channelName)
self.gateway.joinChannel(guildId, channelId)
self.inVoiceChannel = True
message = "The bot has entered channel " + self.voiceChannelName + "."
return message
def leaveChannel(self):
if self.inVoiceChannel:
self.gateway.leaveChannel()
self.inVoiceChannel = False
message = "The bot has left the channel"
else:
message = "The bot was not in a voice channel"
return message
|
{
"content_hash": "05160cd47461de06538a3266587d2693",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 81,
"avg_line_length": 33.131147540983605,
"alnum_prop": 0.7803067788223652,
"repo_name": "alanrossx2/DiscordBot-PurpleMinion",
"id": "d5b55e0ff164db820e3b49d210e5f19aeade64fc",
"size": "2021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DiscordRequestAPI/DiscordCommands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "27873"
},
{
"name": "Shell",
"bytes": "1372"
}
],
"symlink_target": ""
}
|
from Crypto.PublicKey import RSA
from django.shortcuts import get_object_or_404
from rest_framework import generics
from rest_framework import status
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from extras.api.renderers import FormlessBrowsableAPIRenderer, FreeRADIUSClientsRenderer
from secrets.filters import SecretFilter
from secrets.models import Secret, SecretRole, UserKey
from . import serializers
ERR_USERKEY_MISSING = "No UserKey found for the current user."
ERR_USERKEY_INACTIVE = "UserKey has not been activated for decryption."
ERR_PRIVKEY_INVALID = "Invalid private key."
class SecretRoleListView(generics.ListAPIView):
"""
List all secret roles
"""
queryset = SecretRole.objects.all()
serializer_class = serializers.SecretRoleSerializer
permission_classes = [IsAuthenticated]
class SecretRoleDetailView(generics.RetrieveAPIView):
"""
Retrieve a single secret role
"""
queryset = SecretRole.objects.all()
serializer_class = serializers.SecretRoleSerializer
permission_classes = [IsAuthenticated]
class SecretListView(generics.GenericAPIView):
"""
List secrets (filterable). If a private key is POSTed, attempt to decrypt each Secret.
"""
queryset = Secret.objects.select_related('device__primary_ip4', 'device__primary_ip6', 'role')\
.prefetch_related('role__users', 'role__groups')
serializer_class = serializers.SecretSerializer
filter_class = SecretFilter
renderer_classes = [FormlessBrowsableAPIRenderer, JSONRenderer, FreeRADIUSClientsRenderer]
permission_classes = [IsAuthenticated]
def get(self, request, private_key=None):
queryset = self.filter_queryset(self.get_queryset())
# Attempt to decrypt each Secret if a private key was provided.
if private_key:
try:
uk = UserKey.objects.get(user=request.user)
except UserKey.DoesNotExist:
return Response(
{'error': ERR_USERKEY_MISSING},
status=status.HTTP_400_BAD_REQUEST
)
if not uk.is_active():
return Response(
{'error': ERR_USERKEY_INACTIVE},
status=status.HTTP_400_BAD_REQUEST
)
master_key = uk.get_master_key(private_key)
if master_key is not None:
for s in queryset:
if s.decryptable_by(request.user):
s.decrypt(master_key)
else:
return Response(
{'error': ERR_PRIVKEY_INVALID},
status=status.HTTP_400_BAD_REQUEST
)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
def post(self, request):
return self.get(request, private_key=request.POST.get('private_key'))
class SecretDetailView(generics.GenericAPIView):
"""
Retrieve a single Secret. If a private key is POSTed, attempt to decrypt the Secret.
"""
queryset = Secret.objects.select_related('device__primary_ip4', 'device__primary_ip6', 'role')\
.prefetch_related('role__users', 'role__groups')
serializer_class = serializers.SecretSerializer
renderer_classes = [FormlessBrowsableAPIRenderer, JSONRenderer, FreeRADIUSClientsRenderer]
permission_classes = [IsAuthenticated]
def get(self, request, pk, private_key=None):
secret = get_object_or_404(Secret, pk=pk)
# Attempt to decrypt the Secret if a private key was provided.
if private_key:
try:
uk = UserKey.objects.get(user=request.user)
except UserKey.DoesNotExist:
return Response(
{'error': ERR_USERKEY_MISSING},
status=status.HTTP_400_BAD_REQUEST
)
if not uk.is_active():
return Response(
{'error': ERR_USERKEY_INACTIVE},
status=status.HTTP_400_BAD_REQUEST
)
if not secret.decryptable_by(request.user):
raise PermissionDenied(detail="You do not have permission to decrypt this secret.")
master_key = uk.get_master_key(private_key)
if master_key is None:
return Response(
{'error': ERR_PRIVKEY_INVALID},
status=status.HTTP_400_BAD_REQUEST
)
secret.decrypt(master_key)
serializer = self.get_serializer(secret)
return Response(serializer.data)
def post(self, request, pk):
return self.get(request, pk, private_key=request.POST.get('private_key'))
class RSAKeyGeneratorView(APIView):
"""
Generate a new RSA key pair for a user. Authenticated because it's a ripe avenue for DoS.
"""
permission_classes = [IsAuthenticated]
def get(self, request):
# Determine what size key to generate
key_size = request.GET.get('key_size', 2048)
if key_size not in range(2048, 4097, 256):
key_size = 2048
# Export RSA private and public keys in PEM format
key = RSA.generate(key_size)
private_key = key.exportKey('PEM')
public_key = key.publickey().exportKey('PEM')
return Response({
'private_key': private_key,
'public_key': public_key,
})
|
{
"content_hash": "27b88bbc3909a526a22131dfbdcf4006",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 99,
"avg_line_length": 36.54545454545455,
"alnum_prop": 0.6336176261549396,
"repo_name": "rfdrake/netbox",
"id": "672165da3cd877deea499a997e8118fd53c8bd15",
"size": "5628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netbox/secrets/api/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "157535"
},
{
"name": "HTML",
"bytes": "328897"
},
{
"name": "JavaScript",
"bytes": "12423"
},
{
"name": "Nginx",
"bytes": "774"
},
{
"name": "Python",
"bytes": "593223"
},
{
"name": "Shell",
"bytes": "3080"
}
],
"symlink_target": ""
}
|
from .binary import BinaryOperation
# Less than: [c = ]a < b
class LessThan(BinaryOperation):
# Constructor
def __init__(self, *args):
# Do all the initializing stuff
super().__init__(*args)
# Set filters
self._filters = {
BinaryOperation.OperatorSide.ASSIGN: self._filter.is_number,
BinaryOperation.OperatorSide.LEFT: self._filter.is_integer,
BinaryOperation.OperatorSide.RIGHT: self._filter.is_integer
}
# Set fallback numbers
self._fallback = {
BinaryOperation.OperatorSide.LEFT: list(range(*self._settings['IDENTIFIER_VALUE_RANGE'])),
BinaryOperation.OperatorSide.RIGHT: list(range(*self._settings['IDENTIFIER_VALUE_RANGE']))
}
# Calculate
def calculate(self, left, right):
return left < right
|
{
"content_hash": "282d4868897f067f11704f47e857cc5c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 102,
"avg_line_length": 32.73076923076923,
"alnum_prop": 0.6239717978848414,
"repo_name": "lgrahl/klausuromat",
"id": "914b95de299487a5a1889a8707b9214f0e838685",
"size": "851",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "klausuromat/operations/lt_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2339"
},
{
"name": "CoffeeScript",
"bytes": "2096"
},
{
"name": "JavaScript",
"bytes": "79447"
},
{
"name": "Python",
"bytes": "120053"
}
],
"symlink_target": ""
}
|
"""Routines to help recognizing sound files.
Function whathdr() recognizes various types of sound file headers.
It understands almost all headers that SOX can decode.
The return tuple contains the following items, in this order:
- file type (as SOX understands it)
- sampling rate (0 if unknown or hard to decode)
- number of channels (0 if unknown or hard to decode)
- number of frames in the file (-1 if unknown or hard to decode)
- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW
If the file doesn't have a recognizable type, it returns None.
If the file can't be opened, OSError is raised.
To compute the total time, divide the number of frames by the
sampling rate (a frame contains a sample for each channel).
Function what() calls whathdr(). (It used to also use some
heuristics for raw data, but this doesn't work very well.)
Finally, the function test() is a simple main program that calls
what() for all files mentioned on the argument list. For directory
arguments it calls what() for all files in that directory. Default
argument is "." (testing all files in the current directory). The
option -r tells it to recurse down directories found inside
explicitly given directories.
"""
# The file structure is top-down except that the test program and its
# subroutine come last.
__all__ = ['what', 'whathdr']
from collections import namedtuple
SndHeaders = namedtuple('SndHeaders',
'filetype framerate nchannels nframes sampwidth')
def what(filename):
"""Guess the type of a sound file."""
res = whathdr(filename)
return res
def whathdr(filename):
"""Recognize sound headers."""
with open(filename, 'rb') as f:
h = f.read(512)
for tf in tests:
res = tf(h, f)
if res:
return SndHeaders(*res)
return None
#-----------------------------------#
# Subroutines per sound header type #
#-----------------------------------#
tests = []
def test_aifc(h, f):
import aifc
if not h.startswith(b'FORM'):
return None
if h[8:12] == b'AIFC':
fmt = 'aifc'
elif h[8:12] == b'AIFF':
fmt = 'aiff'
else:
return None
f.seek(0)
try:
a = aifc.open(f, 'r')
except (EOFError, aifc.Error):
return None
return (fmt, a.getframerate(), a.getnchannels(),
a.getnframes(), 8 * a.getsampwidth())
tests.append(test_aifc)
def test_au(h, f):
if h.startswith(b'.snd'):
func = get_long_be
elif h[:4] in (b'\0ds.', b'dns.'):
func = get_long_le
else:
return None
filetype = 'au'
hdr_size = func(h[4:8])
data_size = func(h[8:12])
encoding = func(h[12:16])
rate = func(h[16:20])
nchannels = func(h[20:24])
sample_size = 1 # default
if encoding == 1:
sample_bits = 'U'
elif encoding == 2:
sample_bits = 8
elif encoding == 3:
sample_bits = 16
sample_size = 2
else:
sample_bits = '?'
frame_size = sample_size * nchannels
if frame_size:
nframe = data_size / frame_size
else:
nframe = -1
return filetype, rate, nchannels, nframe, sample_bits
tests.append(test_au)
def test_hcom(h, f):
if h[65:69] != b'FSSD' or h[128:132] != b'HCOM':
return None
divisor = get_long_be(h[144:148])
if divisor:
rate = 22050 / divisor
else:
rate = 0
return 'hcom', rate, 1, -1, 8
tests.append(test_hcom)
def test_voc(h, f):
if not h.startswith(b'Creative Voice File\032'):
return None
sbseek = get_short_le(h[20:22])
rate = 0
if 0 <= sbseek < 500 and h[sbseek] == 1:
ratecode = 256 - h[sbseek+4]
if ratecode:
rate = int(1000000.0 / ratecode)
return 'voc', rate, 1, -1, 8
tests.append(test_voc)
def test_wav(h, f):
import wave
# 'RIFF' <len> 'WAVE' 'fmt ' <len>
if not h.startswith(b'RIFF') or h[8:12] != b'WAVE' or h[12:16] != b'fmt ':
return None
f.seek(0)
try:
w = wave.openfp(f, 'r')
except (EOFError, wave.Error):
return None
return ('wav', w.getframerate(), w.getnchannels(),
w.getnframes(), 8*w.getsampwidth())
tests.append(test_wav)
def test_8svx(h, f):
if not h.startswith(b'FORM') or h[8:12] != b'8SVX':
return None
# Should decode it to get #channels -- assume always 1
return '8svx', 0, 1, 0, 8
tests.append(test_8svx)
def test_sndt(h, f):
if h.startswith(b'SOUND'):
nsamples = get_long_le(h[8:12])
rate = get_short_le(h[20:22])
return 'sndt', rate, 1, nsamples, 8
tests.append(test_sndt)
def test_sndr(h, f):
if h.startswith(b'\0\0'):
rate = get_short_le(h[2:4])
if 4000 <= rate <= 25000:
return 'sndr', rate, 1, -1, 8
tests.append(test_sndr)
#-------------------------------------------#
# Subroutines to extract numbers from bytes #
#-------------------------------------------#
def get_long_be(b):
return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]
def get_long_le(b):
return (b[3] << 24) | (b[2] << 16) | (b[1] << 8) | b[0]
def get_short_be(b):
return (b[0] << 8) | b[1]
def get_short_le(b):
return (b[1] << 8) | b[0]
#--------------------#
# Small test program #
#--------------------#
def test():
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print(filename + '/:', end=' ')
if recursive or toplevel:
print('recursing down:')
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print('*** directory (use -r) ***')
else:
print(filename + ':', end=' ')
sys.stdout.flush()
try:
print(what(filename))
except OSError:
print('*** not found ***')
if __name__ == '__main__':
test()
|
{
"content_hash": "525a68501055c416daae49049838679d",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 78,
"avg_line_length": 26.195918367346938,
"alnum_prop": 0.5554689934559053,
"repo_name": "MalloyPower/parsing-python",
"id": "e5901ec58338aaae1ed33d56573b4ca0ecc0cfa1",
"size": "6418",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.5.0/Lib/sndhdr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
"""
Tests for the WebAppTest class.
"""
import os
from unittest import expectedFailure
from bok_choy.web_app_test import WebAppTest
from .pages import ImagePage
class ScreenshotAssertTest(WebAppTest):
"""
Test the integration with needle and its screenshot assertion capability.
"""
def test_needle_screenshot_success(self):
"""
Test the integration with needle to capture and assert on a screenshot of an element.
Note that the baseline_directory is computed in the __init__ method of NeedleTestCase,
so we can monkeypatch it here in the testcase itself.
"""
self.baseline_directory = os.path.realpath(os.path.join(os.getcwd(), 'tests', 'baseline'))
self.page = ImagePage(self.browser).visit()
self.assertScreenshot('#green_check', 'correct-icon')
@expectedFailure
def test_needle_screenshot_failure(self):
"""
Test the integration with needle to capture and assert on a screenshot of an element.
Note that the baseline_directory is computed in the __init__ method of NeedleTestCase,
so we can monkeypatch it here in the testcase itself.
"""
self.baseline_directory = os.path.realpath(os.path.join(os.getcwd(), 'tests', 'baseline'))
self.page = ImagePage(self.browser).visit()
self.assertScreenshot('#green_check', 'incorrect-icon')
|
{
"content_hash": "f831e1146000d4b24df2ee531e2f9b8e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 98,
"avg_line_length": 38.666666666666664,
"alnum_prop": 0.6860632183908046,
"repo_name": "drptbl/bok-choy",
"id": "f8ac1eb3599035a631b822061fa13106663d61a5",
"size": "1392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_webapptest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "9708"
},
{
"name": "JavaScript",
"bytes": "184"
},
{
"name": "Python",
"bytes": "135934"
},
{
"name": "Shell",
"bytes": "2543"
}
],
"symlink_target": ""
}
|
import re
import time
import requests
import pandas as pd
import pymysql.cursors
from bs4 import BeautifulSoup
# Runtime start
start = time.clock()
print(start)
# Connect to the database
connection = pymysql.connect(user='root', password='abc123', host='127.0.0.1', db='FUTHEAD', cursorclass=pymysql.cursors.DictCursor, charset='UTF8')
cursor = connection.cursor()
tiers = [
'gold',
'silver',
'bronze'
]
fifa = {
#'10': 'FIFA10',
#'11': 'FIFA11',
#'12': 'FIFA12',
#'13': 'FIFA13',
#'14': 'FIFA14',
#'15': 'FIFA15',
#'16': 'FIFA16',
#'17': 'FIFA17',
#'18': 'FIFA18',
#'19': 'FIFA19',
'20': 'FIFA20'
}
for key, value in fifa.items():
print('Doing FIFA ' + key)
# Truncating table before inserting data into the table
cursor.execute('TRUNCATE TABLE FUTHEAD.{};'.format(value))
# List Intializations
players = []
attributes = []
# Looping through all pages to retrieve players stats and information
for tier in tiers:
FutHead = requests.get('https://www.futhead.com/' + key + '/players/?level=' + tier + '&bin_platform=ps')
bs = BeautifulSoup(FutHead.text, 'html.parser')
TotalPages = int(re.sub('\s +', '', str(bs.find('span', {'class': 'font-12 font-bold margin-l-r-10'}).get_text())).split(' ')[1])
print('Number of pages to be parsed for FIFA ' + key + ' ' + tier + ' level players: ' + str(TotalPages))
for page in range(1, TotalPages + 1):
FutHead = requests.get('http://www.futhead.com/' + key + '/players/?page=' + str(page) + '&level=' + tier + '&bin_platform=ps')
bs = BeautifulSoup(FutHead.text, 'html.parser')
Stats = bs.findAll('span', {'class': 'player-stat stream-col-60 hidden-md hidden-sm'})
Names = bs.findAll('span', {'class': 'player-name'})
Information = bs.findAll('span', {'class': 'player-club-league-name'})
Ratings = bs.findAll('span', {'class': re.compile('revision-gradient shadowed font-12')})
num = len(bs.findAll('li', {'class': 'list-group-item list-group-table-row player-group-item dark-hover'}))
# Parsing all players information
for i in range(num):
p = []
p.append(Names[i].get_text())
strong = Information[i].strong.extract()
try:
p.append(re.sub('\s +', '', str(Information[i].get_text())).split('| ')[1])
except IndexError:
p.append('')
try:
p.append(re.sub('\s +', '', str(Information[i].get_text())).split('| ')[2])
except IndexError:
p.append('')
p.append(strong.get_text())
p.append(tier.capitalize())
p.append(Ratings[i].get_text())
players.append(p)
# Parsing all players stats
temp = []
for stat in Stats:
if Stats.index(stat) % 6 == 0:
if len(temp) > 0:
attributes.append(temp)
temp = []
if stat.find('span', {'class': 'value'}) is None:
pass
else:
temp.append(stat.find('span', {'class': 'value'}).get_text())
print('Page ' + str(page) + ' is done!')
# Inserting data into its specific table
for player, attribute in zip(players, attributes):
cursor.execute('''
INSERT INTO FUTHEAD.{} (
NAME,
CLUB,
LEAGUE,
POSITION,
TIER,
RATING,
PACE,
SHOOTING,
PASSING,
DRIBBLING,
DEFENDING,
PHYSICAL
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
'''.format(value), (*player, *attribute))
# Dumping the lines into a csv file
pd.read_sql_query('SELECT * FROM FUTHEAD.{};'.format(value), connection).to_csv(value + '.csv', sep=',', encoding='utf-8', index=False)
# Commit MYSQL statements
connection.commit()
# Closing connection to the DB and closing csv file
connection.close()
# Runtime end
print(time.clock() - start)
|
{
"content_hash": "89f08db9c649c7194b4e385a9910b062",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 148,
"avg_line_length": 36.00826446280992,
"alnum_prop": 0.5182464998852422,
"repo_name": "kafagy95/fifa18-Data",
"id": "cd29f373491ba43926488f01c7c250e8e8441f0b",
"size": "4357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "futhead.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5397"
}
],
"symlink_target": ""
}
|
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_log import log as logging
from oslo_utils import versionutils
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova.db.sqlalchemy import models as main_models
from nova import exception
from nova.i18n import _LE
from nova import objects
from nova.objects import base
from nova.objects import fields
KEYPAIR_TYPE_SSH = 'ssh'
KEYPAIR_TYPE_X509 = 'x509'
LOG = logging.getLogger(__name__)
@db_api.api_context_manager.reader
def _get_from_db(context, user_id, name=None, limit=None, marker=None):
query = context.session.query(api_models.KeyPair).\
filter(api_models.KeyPair.user_id == user_id)
if name is not None:
db_keypair = query.filter(api_models.KeyPair.name == name).\
first()
if not db_keypair:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return db_keypair
marker_row = None
if marker is not None:
marker_row = context.session.query(api_models.KeyPair).\
filter(api_models.KeyPair.name == marker).\
filter(api_models.KeyPair.user_id == user_id).first()
if not marker_row:
raise exception.MarkerNotFound(marker=marker)
query = sqlalchemyutils.paginate_query(
query, api_models.KeyPair, limit, ['name'], marker=marker_row)
return query.all()
@db_api.api_context_manager.reader
def _get_count_from_db(context, user_id):
return context.session.query(api_models.KeyPair).\
filter(api_models.KeyPair.user_id == user_id).\
count()
@db_api.api_context_manager.writer
def _create_in_db(context, values):
kp = api_models.KeyPair()
kp.update(values)
try:
kp.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.KeyPairExists(key_name=values['name'])
return kp
@db_api.api_context_manager.writer
def _destroy_in_db(context, user_id, name):
result = context.session.query(api_models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
delete()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class KeyPair(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Added keypair type
# Version 1.3: Name field is non-null
# Version 1.4: Add localonly flag to get_by_name()
VERSION = '1.4'
fields = {
'id': fields.IntegerField(),
'name': fields.StringField(nullable=False),
'user_id': fields.StringField(nullable=True),
'fingerprint': fields.StringField(nullable=True),
'public_key': fields.StringField(nullable=True),
'type': fields.StringField(nullable=False),
}
def obj_make_compatible(self, primitive, target_version):
super(KeyPair, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 2) and 'type' in primitive:
del primitive['type']
@staticmethod
def _from_db_object(context, keypair, db_keypair):
ignore = {'deleted': False,
'deleted_at': None}
for key in keypair.fields:
if key in ignore and not hasattr(db_keypair, key):
keypair[key] = ignore[key]
else:
keypair[key] = db_keypair[key]
keypair._context = context
keypair.obj_reset_changes()
return keypair
@staticmethod
def _get_from_db(context, user_id, name):
return _get_from_db(context, user_id, name=name)
@staticmethod
def _destroy_in_db(context, user_id, name):
return _destroy_in_db(context, user_id, name)
@staticmethod
def _create_in_db(context, values):
return _create_in_db(context, values)
@base.remotable_classmethod
def get_by_name(cls, context, user_id, name,
localonly=False):
db_keypair = None
if not localonly:
try:
db_keypair = cls._get_from_db(context, user_id, name)
except exception.KeypairNotFound:
pass
if db_keypair is None:
db_keypair = db.key_pair_get(context, user_id, name)
return cls._from_db_object(context, cls(), db_keypair)
@base.remotable_classmethod
def destroy_by_name(cls, context, user_id, name):
try:
cls._destroy_in_db(context, user_id, name)
except exception.KeypairNotFound:
db.key_pair_destroy(context, user_id, name)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
# NOTE(danms): Check to see if it exists in the old DB before
# letting them create in the API DB, since we won't get protection
# from the UC.
try:
db.key_pair_get(self._context, self.user_id, self.name)
raise exception.KeyPairExists(key_name=self.name)
except exception.KeypairNotFound:
pass
self._create()
def _create(self):
updates = self.obj_get_changes()
db_keypair = self._create_in_db(self._context, updates)
self._from_db_object(self._context, self, db_keypair)
@base.remotable
def destroy(self):
try:
self._destroy_in_db(self._context, self.user_id, self.name)
except exception.KeypairNotFound:
db.key_pair_destroy(self._context, self.user_id, self.name)
@base.NovaObjectRegistry.register
class KeyPairList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# KeyPair <= version 1.1
# Version 1.1: KeyPair <= version 1.2
# Version 1.2: KeyPair <= version 1.3
# Version 1.3: Add new parameters 'limit' and 'marker' to get_by_user()
VERSION = '1.3'
fields = {
'objects': fields.ListOfObjectsField('KeyPair'),
}
@staticmethod
def _get_from_db(context, user_id, limit, marker):
return _get_from_db(context, user_id, limit=limit, marker=marker)
@staticmethod
def _get_count_from_db(context, user_id):
return _get_count_from_db(context, user_id)
@base.remotable_classmethod
def get_by_user(cls, context, user_id, limit=None, marker=None):
try:
api_db_keypairs = cls._get_from_db(
context, user_id, limit=limit, marker=marker)
# NOTE(pkholkin): If we were asked for a marker and found it in
# results from the API DB, we must continue our pagination with
# just the limit (if any) to the main DB.
marker = None
except exception.MarkerNotFound:
api_db_keypairs = []
if limit is not None:
limit_more = limit - len(api_db_keypairs)
else:
limit_more = None
if limit_more is None or limit_more > 0:
main_db_keypairs = db.key_pair_get_all_by_user(
context, user_id, limit=limit_more, marker=marker)
else:
main_db_keypairs = []
return base.obj_make_list(context, cls(context), objects.KeyPair,
api_db_keypairs + main_db_keypairs)
@base.remotable_classmethod
def get_count_by_user(cls, context, user_id):
return (cls._get_count_from_db(context, user_id) +
db.key_pair_count_by_user(context, user_id))
@db_api.main_context_manager.reader
def _count_unmigrated_instances(context):
return context.session.query(main_models.InstanceExtra).\
filter_by(keypairs=None).\
filter_by(deleted=0).\
count()
@db_api.main_context_manager.reader
def _get_main_keypairs(context, limit):
return context.session.query(main_models.KeyPair).\
filter_by(deleted=0).\
limit(limit).\
all()
def migrate_keypairs_to_api_db(context, count):
bad_instances = _count_unmigrated_instances(context)
if bad_instances:
LOG.error(_LE('Some instances are still missing keypair '
'information. Unable to run keypair migration '
'at this time.'))
return 0, 0
main_keypairs = _get_main_keypairs(context, count)
done = 0
for db_keypair in main_keypairs:
kp = objects.KeyPair(context=context,
user_id=db_keypair.user_id,
name=db_keypair.name,
fingerprint=db_keypair.fingerprint,
public_key=db_keypair.public_key,
type=db_keypair.type)
try:
kp._create()
except exception.KeyPairExists:
# NOTE(danms): If this got created somehow in the API DB,
# then it's newer and we just continue on to destroy the
# old one in the cell DB.
pass
db_api.key_pair_destroy(context, db_keypair.user_id, db_keypair.name)
done += 1
return len(main_keypairs), done
|
{
"content_hash": "2afb3ee624a3b1dcde6e3e7b3828a852",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 78,
"avg_line_length": 35.018450184501845,
"alnum_prop": 0.6162276080084299,
"repo_name": "alaski/nova",
"id": "7fe9dc518e479ae4fd6c28b3b26c2e77823ca005",
"size": "10095",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/objects/keypair.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16744610"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "351433"
}
],
"symlink_target": ""
}
|
"""Demonstrates how to authenticate to Google Cloud Platform APIs using the
Requests HTTP library."""
import argparse
def implicit():
import google.auth
from google.auth.transport import requests
# Get the credentials and project ID from the environment.
credentials, project = google.auth.default(
scopes=['https://www.googleapis.com/auth/cloud-platform'])
# Create a requests Session object with the credentials.
session = requests.AuthorizedSession(credentials)
# Make an authenticated API request
response = session.get(
'https://www.googleapis.com/storage/v1/b'.format(project),
params={'project': project})
response.raise_for_status()
buckets = response.json()
print(buckets)
def explicit(project):
from google.auth.transport import requests
from google.oauth2 import service_account
# Construct service account credentials using the service account key
# file.
credentials = service_account.Credentials.from_service_account_file(
'service_account.json')
credentials = credentials.with_scopes(
['https://www.googleapis.com/auth/cloud-platform'])
# Create a requests Session object with the credentials.
session = requests.AuthorizedSession(credentials)
# Make an authenticated API request
response = session.get(
'https://www.googleapis.com/storage/v1/b'.format(project),
params={'project': project})
response.raise_for_status()
buckets = response.json()
print(buckets)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
subparsers.add_parser('implicit', help=implicit.__doc__)
explicit_parser = subparsers.add_parser('explicit', help=explicit.__doc__)
explicit_parser.add_argument('project')
args = parser.parse_args()
if args.command == 'implicit':
implicit()
elif args.command == 'explicit':
explicit(args.project)
|
{
"content_hash": "f95c3ab595d1ddc08b6c51af939d7ce7",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 32.184615384615384,
"alnum_prop": 0.6940726577437859,
"repo_name": "BrandonY/python-docs-samples",
"id": "f8321f077414076c1185271d3f193288266e29fb",
"size": "2690",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "auth/http-client/snippets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2924"
},
{
"name": "HTML",
"bytes": "24309"
},
{
"name": "JavaScript",
"bytes": "11222"
},
{
"name": "Makefile",
"bytes": "881"
},
{
"name": "Protocol Buffer",
"bytes": "10818"
},
{
"name": "Python",
"bytes": "1331765"
},
{
"name": "Shell",
"bytes": "11028"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponse
from django.test import RequestFactory
from django.test.utils import override_settings
from csp.contrib.rate_limiting import RateLimitedCSPMiddleware
from csp.tests.utils import response
HEADER = 'Content-Security-Policy'
mw = RateLimitedCSPMiddleware(response())
rf = RequestFactory()
@override_settings(CSP_REPORT_PERCENTAGE=0.1, CSP_REPORT_URI='x')
def test_report_percentage():
times_seen = 0
for _ in range(5000):
request = rf.get('/')
response = HttpResponse()
mw.process_response(request, response)
if 'report-uri' in response[HEADER]:
times_seen += 1
# Roughly 10%
assert 400 <= times_seen <= 600
|
{
"content_hash": "71c7baba60dd34ad8b07e09c2a24db95",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 65,
"avg_line_length": 29.458333333333332,
"alnum_prop": 0.7057991513437057,
"repo_name": "mozilla/django-csp",
"id": "98ccdedb7f93125d021c4ff4c7d8bc3c147b8512",
"size": "707",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "csp/tests/test_contrib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "44772"
}
],
"symlink_target": ""
}
|
"""The Rcon driver."""
from rconsoft.dispatch.dispatcher import Signal
from rconsoft.util.signalsocket import SignalSocket
from rconsoft.config import config
#------------------------------
class RconDriver(object):
#==============================
def __init__(self):
self._init_signals()
#==============================
def _init_signals(self):
pass
#==============================
def run(self):
self.sock = SignalSocket(config['rcon']['local']['host'], int(config['rcon']['local']['port']), 'udp')
self.sock.post_receive.connect(self.on_receive)
self.sock.server()
#==============================
def on_receive(self, **kwargs):
print kwargs.get('data')
|
{
"content_hash": "44b1c3b2d1ef0d33444d311a162f4e76",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 106,
"avg_line_length": 29.48,
"alnum_prop": 0.49796472184531887,
"repo_name": "adrianlee/rcon-cs",
"id": "90d80e65c3567affdead176b8c58dbfda6f03fd9",
"size": "737",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rconsoft/rcon/driver.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "1326"
},
{
"name": "Python",
"bytes": "177581"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db import migrations, models
import openslides.utils.models
class Migration(migrations.Migration):
dependencies = [("agenda", "0005_auto_20180815_1109")]
operations = [
migrations.AlterField(
model_name="item",
name="parent",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=openslides.utils.models.SET_NULL_AND_AUTOUPDATE,
related_name="children",
to="agenda.Item",
),
),
migrations.AlterField(
model_name="speaker",
name="user",
field=models.ForeignKey(
on_delete=openslides.utils.models.CASCADE_AND_AUTOUPDATE,
to=settings.AUTH_USER_MODEL,
),
),
]
|
{
"content_hash": "e9f5e22b5f12977ebb08f6bac860ef58",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 74,
"avg_line_length": 27.806451612903224,
"alnum_prop": 0.54292343387471,
"repo_name": "jwinzer/OpenSlides",
"id": "c4de2a12f49ed52cba7bfbce4cb9294867419bf2",
"size": "911",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "server/openslides/agenda/migrations/0006_auto_20190119_1425.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2553"
},
{
"name": "HTML",
"bytes": "504880"
},
{
"name": "JavaScript",
"bytes": "74835"
},
{
"name": "M4",
"bytes": "18419"
},
{
"name": "Python",
"bytes": "1533060"
},
{
"name": "SCSS",
"bytes": "137122"
},
{
"name": "Shell",
"bytes": "9338"
},
{
"name": "Smarty",
"bytes": "7554"
},
{
"name": "TypeScript",
"bytes": "2637192"
}
],
"symlink_target": ""
}
|
"""Test module for cgsdata"""
|
{
"content_hash": "7efec850d59c9ae2c8154515bae9ea13",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 29,
"avg_line_length": 30,
"alnum_prop": 0.6666666666666666,
"repo_name": "jpoullet2000/cgs-data",
"id": "7b97ff28cd6556173f9f414ec235d4e78c120cab",
"size": "30",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "42413"
},
{
"name": "Shell",
"bytes": "537"
}
],
"symlink_target": ""
}
|
from flask import Flask
from flask import g, session, request, url_for, flash
from flask import redirect, render_template
from flask_oauthlib.client import OAuth
app = Flask(__name__)
app.debug = True
app.secret_key = 'development'
oauth = OAuth(app)
twitter = oauth.remote_app(
'twitter',
consumer_key='xBeXxg9lyElUgwZT6AZ0A',
consumer_secret='aawnSpNTOVuDCjx7HMh6uSXetjNN8zWLpZwCEU4LBrk',
base_url='https://api.twitter.com/1.1/',
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authenticate',
)
@twitter.tokengetter
def get_twitter_token():
if 'twitter_oauth' in session:
resp = session['twitter_oauth']
return resp['oauth_token'], resp['oauth_token_secret']
@app.before_request
def before_request():
g.user = None
if 'twitter_oauth' in session:
g.user = session['twitter_oauth']
@app.route('/')
def index():
tweets = None
if g.user is not None:
resp = twitter.request('statuses/home_timeline.json')
if resp.status == 200:
tweets = resp.data
else:
flash('Unable to load tweets from Twitter.')
return render_template('index.html', tweets=tweets)
@app.route('/tweet', methods=['POST'])
def tweet():
if g.user is None:
return redirect(url_for('login', next=request.url))
status = request.form['tweet']
if not status:
return redirect(url_for('index'))
resp = twitter.post('statuses/update.json', data={
'status': status
})
if resp.status == 403:
flash("Error: #%d, %s " % (
resp.data.get('errors')[0].get('code'),
resp.data.get('errors')[0].get('message'))
)
elif resp.status == 401:
flash('Authorization error with Twitter.')
else:
flash('Successfully tweeted your tweet (ID: #%s)' % resp.data['id'])
return redirect(url_for('index'))
@app.route('/login')
def login():
callback_url = url_for('oauthorized', next=request.args.get('next'))
return twitter.authorize(callback=callback_url or request.referrer or None)
@app.route('/logout')
def logout():
session.pop('twitter_oauth', None)
return redirect(url_for('index'))
@app.route('/oauthorized')
def oauthorized():
resp = twitter.authorized_response()
if resp is None:
flash('You denied the request to sign in.')
else:
session['twitter_oauth'] = resp
return redirect(url_for('index'))
if __name__ == '__main__':
app.run()
|
{
"content_hash": "f4f079356fe9bbf70dbe0971790a2444",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 79,
"avg_line_length": 27.177083333333332,
"alnum_prop": 0.6374089689536221,
"repo_name": "stianpr/flask-oauthlib",
"id": "0ed6151461704e23ca30c3e04e32e0757089a6eb",
"size": "2626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/twitter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "675"
},
{
"name": "HTML",
"bytes": "2514"
},
{
"name": "Makefile",
"bytes": "749"
},
{
"name": "Python",
"bytes": "234406"
}
],
"symlink_target": ""
}
|
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ConnectionRegistrationRequestDto(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, connection_id=None, connector_id=None, parameters=None, keep_alive=False):
"""
ConnectionRegistrationRequestDto - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'connection_id': 'str',
'connector_id': 'str',
'parameters': 'dict(str, str)',
'keep_alive': 'bool'
}
self.attribute_map = {
'connection_id': 'connectionId',
'connector_id': 'connectorId',
'parameters': 'parameters',
'keep_alive': 'keepAlive'
}
self._connection_id = connection_id
self._connector_id = connector_id
self._parameters = parameters
self._keep_alive = keep_alive
@property
def connection_id(self):
"""
Gets the connection_id of this ConnectionRegistrationRequestDto.
:return: The connection_id of this ConnectionRegistrationRequestDto.
:rtype: str
"""
return self._connection_id
@connection_id.setter
def connection_id(self, connection_id):
"""
Sets the connection_id of this ConnectionRegistrationRequestDto.
:param connection_id: The connection_id of this ConnectionRegistrationRequestDto.
:type: str
"""
self._connection_id = connection_id
@property
def connector_id(self):
"""
Gets the connector_id of this ConnectionRegistrationRequestDto.
:return: The connector_id of this ConnectionRegistrationRequestDto.
:rtype: str
"""
return self._connector_id
@connector_id.setter
def connector_id(self, connector_id):
"""
Sets the connector_id of this ConnectionRegistrationRequestDto.
:param connector_id: The connector_id of this ConnectionRegistrationRequestDto.
:type: str
"""
self._connector_id = connector_id
@property
def parameters(self):
"""
Gets the parameters of this ConnectionRegistrationRequestDto.
:return: The parameters of this ConnectionRegistrationRequestDto.
:rtype: dict(str, str)
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""
Sets the parameters of this ConnectionRegistrationRequestDto.
:param parameters: The parameters of this ConnectionRegistrationRequestDto.
:type: dict(str, str)
"""
self._parameters = parameters
@property
def keep_alive(self):
"""
Gets the keep_alive of this ConnectionRegistrationRequestDto.
:return: The keep_alive of this ConnectionRegistrationRequestDto.
:rtype: bool
"""
return self._keep_alive
@keep_alive.setter
def keep_alive(self, keep_alive):
"""
Sets the keep_alive of this ConnectionRegistrationRequestDto.
:param keep_alive: The keep_alive of this ConnectionRegistrationRequestDto.
:type: bool
"""
self._keep_alive = keep_alive
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "6d0b11143bfba54a862a47b5e366bb60",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 105,
"avg_line_length": 28.507462686567163,
"alnum_prop": 0.5884816753926702,
"repo_name": "melphi/algobox",
"id": "989904a7c2d9e2aba1f275f23b83fe3ee04f2221",
"size": "5747",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/algobox/src/algobox/client/generated/api/models/connection_registration_request_dto.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "105814"
},
{
"name": "HTML",
"bytes": "11544"
},
{
"name": "Java",
"bytes": "530584"
},
{
"name": "JavaScript",
"bytes": "2464376"
},
{
"name": "Jupyter Notebook",
"bytes": "8856"
},
{
"name": "Python",
"bytes": "534936"
},
{
"name": "Shell",
"bytes": "1848"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from cryptochallenge import userprofile
class TestProfileMethods(TestCase):
def test_kvparse(self):
teststr = "foo=bar&baz=qux&zap=zazzle"
print(teststr)
exp_obj = {
"foo": "bar",
"baz": "qux",
"zap": "zazzle"
}
print(exp_obj)
ret_obj = userprofile.kvparse(teststr)
print(ret_obj)
self.assertEqual(exp_obj, ret_obj)
def test_profile_for(self):
test_email = "bob@2018_tests.com"
exp_obj = {
"email": test_email,
"uid": "10",
"role": "user"
}
print(exp_obj)
ret_obj = userprofile.profile_for(test_email)
print(ret_obj)
self.assertEqual(exp_obj, ret_obj)
def test_kvbuild(self):
exp_str = "foo=bar&baz=qux&zap=zazzle"
print(exp_str)
test_obj = {
"foo": "bar",
"baz": "qux",
"zap": "zazzle"
}
ret_str = userprofile.kvbuild(test_obj)
print(ret_str)
self.assertEqual(exp_str, ret_str)
def test_profile_encrypt(self):
self.assertEqual("A", "A")
def test_profile_decrypt(self):
self.assertEqual("A", "A")
|
{
"content_hash": "b7fd4e2eeb031b5181f2e166d0426da4",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 53,
"avg_line_length": 24.192307692307693,
"alnum_prop": 0.5230524642289348,
"repo_name": "dctelf/poisonous-mushroom",
"id": "f3cb4db57a2b5ba794dd8ae759c534b642741406",
"size": "1258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2018_tests/test_profilemethods.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "69709"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from .models import kinesisvideoarchivedmedia_backends
from ..core.models import base_decorator
kinesisvideoarchivedmedia_backend = kinesisvideoarchivedmedia_backends["us-east-1"]
mock_kinesisvideoarchivedmedia = base_decorator(kinesisvideoarchivedmedia_backends)
|
{
"content_hash": "6510a23b404d1e24604fcd8a7f74388c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 83,
"avg_line_length": 50.833333333333336,
"alnum_prop": 0.8524590163934426,
"repo_name": "william-richard/moto",
"id": "c1676c87143a460272d3b5601d89a727fdf028e4",
"size": "305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/kinesisvideoarchivedmedia/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "6637538"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
}
|
import time
import copy
import uuid
from mitmproxy import controller # noqa
from mitmproxy import stateobject
from mitmproxy import connections
from mitmproxy import version
import typing # noqa
class Error(stateobject.StateObject):
"""
An Error.
This is distinct from an protocol error response (say, a HTTP code 500),
which is represented by a normal HTTPResponse object. This class is
responsible for indicating errors that fall outside of normal protocol
communications, like interrupted connections, timeouts, protocol errors.
Exposes the following attributes:
msg: Message describing the error
timestamp: Seconds since the epoch
"""
def __init__(self, msg: str, timestamp=None) -> None:
"""
@type msg: str
@type timestamp: float
"""
self.msg = msg
self.timestamp = timestamp or time.time()
_stateobject_attributes = dict(
msg=str,
timestamp=float
)
def __str__(self):
return self.msg
def __repr__(self):
return self.msg
@classmethod
def from_state(cls, state):
# the default implementation assumes an empty constructor. Override
# accordingly.
f = cls(None)
f.set_state(state)
return f
def copy(self):
c = copy.copy(self)
return c
class Flow(stateobject.StateObject):
"""
A Flow is a collection of objects representing a single transaction.
This class is usually subclassed for each protocol, e.g. HTTPFlow.
"""
def __init__(
self,
type: str,
client_conn: connections.ClientConnection,
server_conn: connections.ServerConnection,
live: bool=None
) -> None:
self.type = type
self.id = str(uuid.uuid4())
self.client_conn = client_conn
self.server_conn = server_conn
self.live = live
self.error = None # type: typing.Optional[Error]
self.intercepted = False # type: bool
self._backup = None # type: typing.Optional[Flow]
self.reply = None # type: typing.Optional[controller.Reply]
self.marked = False # type: bool
self.metadata = dict() # type: typing.Dict[str, str]
_stateobject_attributes = dict(
id=str,
error=Error,
client_conn=connections.ClientConnection,
server_conn=connections.ServerConnection,
type=str,
intercepted=bool,
marked=bool,
metadata=dict,
)
def get_state(self):
d = super().get_state()
d.update(version=version.IVERSION)
if self._backup and self._backup != d:
d.update(backup=self._backup)
return d
def set_state(self, state):
state.pop("version")
if "backup" in state:
self._backup = state.pop("backup")
super().set_state(state)
@classmethod
def from_state(cls, state):
f = cls(None, None)
f.set_state(state)
return f
def copy(self):
f = copy.copy(self)
f.id = str(uuid.uuid4())
f.live = False
f.client_conn = self.client_conn.copy()
f.server_conn = self.server_conn.copy()
f.metadata = self.metadata.copy()
if self.error:
f.error = self.error.copy()
return f
def modified(self):
"""
Has this Flow been modified?
"""
if self._backup:
return self._backup != self.get_state()
else:
return False
def backup(self, force=False):
"""
Save a backup of this Flow, which can be reverted to using a
call to .revert().
"""
if not self._backup:
self._backup = self.get_state()
def revert(self):
"""
Revert to the last backed up state.
"""
if self._backup:
self.set_state(self._backup)
self._backup = None
@property
def killable(self):
return self.reply and self.reply.state in {"handled", "taken"}
def kill(self):
"""
Kill this request.
"""
self.error = Error("Connection killed")
self.intercepted = False
# reply.state should only be "handled" or "taken" here.
# if none of this is the case, .take() will raise an exception.
if self.reply.state != "taken":
self.reply.take()
self.reply.kill(force=True)
self.reply.commit()
self.live = False
def intercept(self):
"""
Intercept this Flow. Processing will stop until resume is
called.
"""
if self.intercepted:
return
self.intercepted = True
self.reply.take()
def resume(self):
"""
Continue with the flow - called after an intercept().
"""
if not self.intercepted:
return
self.intercepted = False
self.reply.ack()
self.reply.commit()
|
{
"content_hash": "062d94b497d19b4cc52415fd041f3653",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 80,
"avg_line_length": 26.588541666666668,
"alnum_prop": 0.5657198824681685,
"repo_name": "dwfreed/mitmproxy",
"id": "7034cb4ab7951a094b78cf5713d7b2e93f0390c7",
"size": "5105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mitmproxy/flow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "208058"
},
{
"name": "HTML",
"bytes": "4270"
},
{
"name": "JavaScript",
"bytes": "2149949"
},
{
"name": "PowerShell",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1378470"
},
{
"name": "Shell",
"bytes": "3660"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, absolute_import
import django
DEBUG = True
USE_TZ = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "dddddddddddddddddddddddddddddddddddddddddddddddddd"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
ROOT_URLCONF = "tests.urls"
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"magic_cards",
]
SITE_ID = 1
if django.VERSION >= (1, 10):
MIDDLEWARE = ()
else:
MIDDLEWARE_CLASSES = ()
|
{
"content_hash": "dfbe3fd3f3dda4f3aaa2c326188e8860",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 66,
"avg_line_length": 19,
"alnum_prop": 0.6595394736842105,
"repo_name": "pbaranay/django-magic-cards",
"id": "9a5cf11b7548162153b4a73832307111c7c02e63",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2864"
},
{
"name": "Makefile",
"bytes": "1529"
},
{
"name": "Python",
"bytes": "37871"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from dateutil import tz
from math import floor
def utcnow():
"""Returns the current time in a timezone-aware datetime object."""
return datetime.now(tz=tz.tzutc())
def unix_timestamp(dt):
"""Returns a UNIX timestamp representing the given datetime."""
try:
return floor(dt.timestamp())
except AttributeError:
# Handle Python 2.
dt_naive = dt.replace(tzinfo=None) - dt.utcoffset()
return floor((dt_naive - datetime(1970, 1, 1)).total_seconds())
def from_unix_timestamp(unix_ts):
"""Returns a datetime from the given UNIX timestamp."""
return datetime.fromtimestamp(int(unix_ts), tz.tzutc())
|
{
"content_hash": "637ec61de8237cf15e9049e0402e9245",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 29.82608695652174,
"alnum_prop": 0.6807580174927114,
"repo_name": "kyouko-taiga/trexmo",
"id": "6bbc3f8600580905741fb7f05b2407d90b26653a",
"size": "686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trexmo/core/utils/time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "624"
},
{
"name": "CSS",
"bytes": "489"
},
{
"name": "HTML",
"bytes": "8983"
},
{
"name": "JavaScript",
"bytes": "87874"
},
{
"name": "Python",
"bytes": "74429"
},
{
"name": "Shell",
"bytes": "205"
}
],
"symlink_target": ""
}
|
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate 427 more blocks.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
"""
from test_framework.blocktools import create_coinbase, create_block, create_transaction, add_witness_commitment
from test_framework.messages import CTransaction
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str
import time
NULLDUMMY_ERROR = "non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero) (code 64)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
self.extra_args = [['-whitelist=127.0.0.1', '-vbparams=segwit:0:999999999999', '-addresstype=legacy', "-deprecatedrpc=addwitnessaddress"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1, [self.address])['address']
self.wit_address = self.nodes[0].addwitnessaddress(self.address)
self.wit_ms_address = self.nodes[0].addmultisigaddress(1, [self.address], '', 'p2sh-segwit')['address']
self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427) # Block 429
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, amount=49)]
txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_with_witness()), True)
test1txs.append(create_transaction(self.nodes[0], txid1, self.ms_address, amount=48))
txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_with_witness()), True)
test1txs.append(create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, amount=49))
txid3 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[2].serialize_with_witness()), True)
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = create_transaction(self.nodes[0], txid2, self.ms_address, amount=47)
trueDummy(test2tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True)
self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = create_transaction(self.nodes[0], test2tx.hash, self.address, amount=46)
test6txs=[CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test4tx])
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = create_transaction(self.nodes[0], txid3, self.wit_address, amount=48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test5tx], True)
self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
for i in test6txs:
self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_with_witness()), True)
self.block_submit(self.nodes[0], test6txs, True, True)
def block_submit(self, node, txs, witness = False, accept = False):
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(with_witness=True, legacy=False)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
|
{
"content_hash": "813d96741c9f446937527f62a6aeb2a7",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 146,
"avg_line_length": 52.125,
"alnum_prop": 0.686810551558753,
"repo_name": "BTCGPU/BTCGPU",
"id": "1cf5b33e1e40a87ce660853b580d365ca102212f",
"size": "6469",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional/feature_nulldummy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "681371"
},
{
"name": "C++",
"bytes": "6149622"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "196038"
},
{
"name": "Makefile",
"bytes": "117750"
},
{
"name": "Objective-C",
"bytes": "16581"
},
{
"name": "Objective-C++",
"bytes": "6592"
},
{
"name": "Python",
"bytes": "1479080"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "79152"
}
],
"symlink_target": ""
}
|
"""
This module implements a simple framework for creating C-like enumerations in
Python using classes. Simply inherit from the #Enumeration class.
```python
>>> class Color(enum.Enumeration):
... red = 0
... green = 1
... blue = 2
>>> print Color.red
<Color: red>
>>> print Color('green')
<Color: green>
>>> print Color(2)
<Color: blue>
>>> print Color('red') is Color.red
True
>>> print Color.blue.name
blue
>>> Color(343)
Traceback (most recent call last):
File "test.py", line 10, in <module>
Color(343)
File "C:\\repositories\\py-nr.utils\\nr\\utils\\enum.py", line 159, in __new__
raise NoSuchEnumerationValue(cls.__name__, value)
nr.utils.enum.NoSuchEnumerationValue: ('Color', 343)
```
If you want to disable that an invalid enumeration value will raise an error,
a `__fallback__` value can be specified on class-level.
```python
>>> class Color(enum.Enumeration):
... red = 0
... green = 1
... blue = 2
... __fallback__ = -1
>>> print Color(42)
<Color -invalid->
>>> print Color(7).value
-1
>>> print Color(16).name
-invalid-
```
You can also iterate over an enumeration class. Note that the order of the
items yielded is value-sorted and the order of declaration does not play any
role.
```python
>>> class Color(enum.Enumeration):
... red = 0
... green = 1
... blue = 2
... __fallback__ = -1
>>> for color in Color:
... print color
<Color: red>
<Color: green>
<Color: blue>
```
You can add data or actual methods to an enumeration class by wrapping it with
the #Data class.
```python
class Color(enum.Enumeration):
red = 0
green = 1
blue = 2
@enum.Data
@property
def astuple(self):
if self == Color.red:
return (1, 0, 0)
elif self == Color.green:
return (0, 1, 0)
elif self == Color.blue:
return (0, 0, 1)
else:
assert False
print Color.red.astuple
# (1, 0, 0)
```
"""
import ctypes
import six
import sys
class NoSuchEnumerationValue(ValueError):
r""" Raised when an Enumeration object was attempted to be
created from an integer value but there was no enumeration
object for this value.
Note that you can specify ``__fallback_value__`` on an
Enumeration class to not let it raise an exception. """
pass
class Data(object):
"""
Small class that can be used to specify data on an enumeration that should
not be converted and interpreted as an enumeration value.
```python
class Color(enum.Enumeration):
red = 0
green = 1
blue = 2
@enum.Data
@property
def astuple(self):
if self == Color.red:
return (1, 0, 0)
elif self == Color.green:
return (0, 1, 0)
elif self == Color.blue:
return (0, 0, 1)
else:
assert False
print Color.red.astuple
# (1, 0, 0)
```
This class can be subclassed to add new sugar to the already very sweet pie.
"""
def __init__(self, value):
super(Data, self).__init__()
self.value = value
def unpack(self):
return self.value
class _EnumerationMeta(type):
"""
This is the meta class for the #Enumeration base class which handles the
automatic conversion of integer values to instances of the #Enumeration
class. There are no other types allowed other than int or #Data which
will be unpacked on the Enumeration class.
If a `__fallback__` was defined on class-level as an integer, the
#Enumeration constructor will not raise a #NoSuchEnumerationValue exception
if the passed value did not match the enumeration values, but instead return
that fallback value.
This fallback is not taken into account when attempting to create a new
#Enumeration object by a string.
"""
_values = None
__fallback__ = None
def __new__(cls, name, bases, data):
# Unpack all Data objects and create a dictionary of
# values that will be converted to instances of the
# enumeration class later.
enum_values = {}
collections = {}
for key, value in data.items():
# Unpack Data objects into the class.
if isinstance(value, Data):
data[key] = value.unpack()
# Integers will be enumeration values.
elif isinstance(value, int):
enum_values[key] = value
# Lists and tuples will be converted to
# collections of the Enumeration values.
elif isinstance(value, (list, tuple)):
collections[key] = value
# We don't accept anything else.
elif not key.startswith('_'):
message = 'Enumeration must consist of ints or Data objects ' \
'only, got %s for \'%s\''
raise TypeError(message % (value.__class__.__name__, key))
# Create the new class object and give it the dictionary
# that will map the integral values to the instances.
class_ = type.__new__(cls, name, bases, data)
class_._values = {}
# Iterate over all entries in the data entries and
# convert integral values to instances of the enumeration
# class.
for key, value in six.iteritems(enum_values):
# Create the new object. We must not use the classes'
# __new__() method as it resolves the object from the
# existing values.
obj = object.__new__(class_)
object.__init__(obj)
obj.value = value
obj.name = key
if key == '__fallback__':
obj.name = '-invalid-'
else:
class_._values[value] = obj
setattr(class_, key, obj)
# Convert the collections.
for key, value in six.iteritems(collections):
value = type(value)(class_(v) for v in value)
setattr(class_, key, value)
return class_
def __iter__(self):
" Iterator over value-sorted enumeration values. "
return iter(self.__values__())
def __values__(self):
return sorted(six.itervalues(self._values), key=lambda x: x.value)
def __getattr__(self, name):
if self.__bases__ == (object,) and name == 'Data':
return Data
raise AttributeError(name)
class Enumeration(six.with_metaclass(_EnumerationMeta)):
"""
This is the base class for listing enumerations. All components of the class
that are integers will be automatically converted to instances of the
#Enumeration class. Creating new instances of the class will only work if the
value is an existing enumeration value.
The hash of an enumeration value is its name, but indexing a container
corresponds to its value.
"""
def __new__(cls, value, _allow_fallback=True):
"""
Creates a new instance of the Enumeration. *value* must be the integral
number of one of the existing enumerations. #NoSuchEnumerationValue is
raised in any other case.
If a fallback was defined, it is returned only if *value* is an integer,
not if it is a string.
"""
# Try to find the actual instance of the Enumeration class
# for the integer value and return it if it is available.
if isinstance(value, six.integer_types):
try:
value = cls._values[value]
except KeyError:
# If a fallback value was specified, use it
# instead of raising an exception.
if _allow_fallback and cls.__fallback__ is not None:
return cls.__fallback__
raise NoSuchEnumerationValue(cls.__name__, value)
# Or by name?
elif isinstance(value, six.string_types):
try:
new_value = getattr(cls, value)
if type(new_value) != cls:
raise AttributeError
except AttributeError:
raise NoSuchEnumerationValue(cls.__name__, value)
value = new_value
# At this point, value must be an object of the Enumeration
# class, otherwise an invalid value was passed.
if type(value) == cls:
return value
raise TypeError('value must be %s or int, got %s' % (cls.__name__, type(value).__name__))
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if type(other) == self.__class__:
return other.value == self.value
elif isinstance(other, six.string_types):
return other == self.name
else:
return self.value == other
def __ne__(self, other):
return not (self == other)
def __int__(self):
return self.value
def __str__(self):
class_name = self.__class__.__name__
return '<%s: %s>' % (class_name, self.name)
def __repr__(self):
class_name = self.__class__.__name__
return '<%s: [%d] %s>' % (class_name, self.value, self.name)
def __index__(self):
return self.value
def __nonzero__(self):
return False
__bool__ = __nonzero__ # Py3
# ctypes support
@property
def _as_parameter_(self):
return ctypes.c_int(self.value)
@Data
@classmethod
def from_param(cls, obj):
if isinstance(obj, (int,) + six.string_types):
obj = cls(obj)
if type(obj) != cls:
c1 = cls.__name__
c2 = obj.__class__.__name__
raise TypeError('can not create %s from %s' % (c1, c2))
return ctypes.c_int(obj.value)
|
{
"content_hash": "329f23ffa46c1caeb7f6cef0d7168a81",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 93,
"avg_line_length": 26.41839762611276,
"alnum_prop": 0.6387734471526452,
"repo_name": "nr-plugins/c4ddev",
"id": "f915e95b1243d3fcc9678b5a81be211cdf51288f",
"size": "10025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/nr.types/src/nr/types/enum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "508"
},
{
"name": "Python",
"bytes": "54455"
}
],
"symlink_target": ""
}
|
from msrest.paging import Paged
class ExpressRouteCircuitAuthorizationPaged(Paged):
"""
A paging container for iterating over a list of :class:`ExpressRouteCircuitAuthorization <azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitAuthorization>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[ExpressRouteCircuitAuthorization]'}
}
def __init__(self, *args, **kwargs):
super(ExpressRouteCircuitAuthorizationPaged, self).__init__(*args, **kwargs)
|
{
"content_hash": "fc2d29f7d45364ee608dd2dae0f8b227",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 173,
"avg_line_length": 36.125,
"alnum_prop": 0.6799307958477508,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "367fbd109d07d6cc3637b8c8ba5a0e7645bd2786",
"size": "1052",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/express_route_circuit_authorization_paged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
import unittest
from django.contrib import admin as django_admin
from django.test import client
from .. import admin, models
class ConfiguracaoAdminTestCase(unittest.TestCase):
def setUp(self):
factory = client.RequestFactory()
self.request = factory.get("/admin/configuracao/add")
self.request.user = None
def test_model_Configuracao_deve_estar_registrado_no_admin(self):
self.assertIn(models.Configuracao, django_admin.site._registry)
def test_model_Configuracao_eh_registrado_com_ConfiguracaoAdmin(self):
self.assertIsInstance(django_admin.site._registry[models.Configuracao],
admin.ConfiguracaoAdmin)
def test_nao_deve_ser_possivel_adicionar_uma_instancia_pelo_admin(self):
cadmin = admin.ConfiguracaoAdmin(models.Configuracao.objects.get(),
None)
self.assertFalse(cadmin.has_add_permission(self.request))
def test_nao_deve_ser_possivel_apagar_uma_instancia_pelo_admin(self):
cadmin = admin.ConfiguracaoAdmin(models.Configuracao.objects.get(),
None)
self.assertFalse(cadmin.has_delete_permission(self.request))
|
{
"content_hash": "2aedf678eddd104e729725d2a5ef5f8d",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 39.61290322580645,
"alnum_prop": 0.6767100977198697,
"repo_name": "devincachu/devincachu-2013",
"id": "e51db161934b631ededc3d31726388b9ff2f8e57",
"size": "1416",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "devincachu/inscricao/tests/test_admin_configuracao.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "117956"
},
{
"name": "HTML",
"bytes": "58039"
},
{
"name": "JavaScript",
"bytes": "15639"
},
{
"name": "Python",
"bytes": "151979"
},
{
"name": "Shell",
"bytes": "289"
}
],
"symlink_target": ""
}
|
import tempfile
import unittest
import yaml
import config_helper
import print_config
class PrintConfigTest(unittest.TestCase):
def test_load_yaml_file(self):
schema = config_helper.Schema.load_yaml("""
properties:
propertyInt:
type: int
propertyString:
type: string
""")
with tempfile.NamedTemporaryFile('w', encoding='utf-8') as f:
f.write("""
propertyInt: 3
propertyString: abc
""")
f.flush()
values = config_helper.load_values(f.name, '/non/existence/dir', schema)
self.assertEqual({'propertyInt': 3, 'propertyString': 'abc'}, values)
def test_output_shell_vars(self):
self.assertEqual(
'$propertyInt $propertyString',
print_config.output_shell_vars({
'propertyInt': 1,
'propertyString': 'Value'
}))
def test_output_yaml(self):
values = {
'propertyInt': 1,
'propertyString': 'unnested',
'dotted.propertyInt': 2,
'dotted.propertyString': 'nested',
'dotted.dotted.propertyInt': 3,
'dotted.dotted.propertyString': 'double_nested',
'dotted.dotted.dotted.propertyInt': 4,
'dotted.dotted.dotted.propertyString': 'triple_nested',
}
actual = print_config.output_yaml(values)
self.assertEqual(
yaml.safe_load(actual), {
'propertyInt': 1,
'propertyString': 'unnested',
'dotted': {
'propertyInt': 2,
'propertyString': 'nested',
'dotted': {
'propertyInt': 3,
'propertyString': 'double_nested',
'dotted': {
'propertyInt': 4,
'propertyString': 'triple_nested',
},
},
}
})
def test_output_param(self):
values = {'name': 'name-1', 'namespace': 'namespace-1'}
schema = config_helper.Schema.load_yaml("""
properties:
name:
type: string
x-google-marketplace:
type: NAME
namespace:
type: string
x-google-marketplace:
type: NAMESPACE
""")
self.assertEqual('name',
print_config.output_xtype(values, schema, 'NAME', True))
self.assertEqual(
'namespace', print_config.output_xtype(values, schema, 'NAMESPACE',
True))
self.assertEqual('name-1',
print_config.output_xtype(values, schema, 'NAME', False))
self.assertEqual(
'namespace-1',
print_config.output_xtype(values, schema, 'NAMESPACE', False))
def test_output_param_multiple(self):
values = {'property1': 'Value1', 'property2': 'Value2'}
schema = config_helper.Schema.load_yaml("""
properties:
image1:
type: string
default: gcr.io/google/busybox:1.0
x-google-marketplace:
type: IMAGE
image2:
type: string
default: gcr.io/google/busybox:1.0
x-google-marketplace:
type: IMAGE
""")
self.assertRaises(
print_config.InvalidParameter,
lambda: print_config.output_xtype(values, schema, 'IMAGE', True))
|
{
"content_hash": "fdf22a147f897e27bf4179b1b4912391",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 31.03669724770642,
"alnum_prop": 0.5300029559562518,
"repo_name": "GoogleCloudPlatform/marketplace-k8s-app-tools",
"id": "9077476e4f610badb7144d394ee59fe87fdfcb95",
"size": "3959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marketplace/deployer_util/print_config_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7489"
},
{
"name": "Makefile",
"bytes": "22317"
},
{
"name": "Python",
"bytes": "263511"
},
{
"name": "Shell",
"bytes": "51665"
}
],
"symlink_target": ""
}
|
from django.contrib.contenttypes.models import ContentType
from django.db import models, transaction
from wagtail.wagtailcore.models import Page
from content.models import ContentPage
from kehmet.models import KehmetContentPage, KehmetFrontPage
cp_type = ContentType.objects.get_for_model(ContentPage)
k_root = Page.objects.get(url_path='/digietu/kehmet/')
pages = k_root.get_descendants().type(ContentPage)
dummy_page = Page(title="dummy", path="1234", slug="dummy-slug", depth=1)
def convert_page(page, target_model):
try:
page.kehmetcontentpage
return
except:
pass
kcp_type = ContentType.objects.get_for_model(target_model)
cp_page = page.specific
kcp_page = target_model(body=cp_page.body, page_ptr=page)
for f in kcp_page._meta.fields:
setattr(kcp_page, f.name, getattr(cp_page, f.name))
super(Page, kcp_page).save()
for f in dummy_page._meta.fields:
setattr(cp_page, f.name, getattr(dummy_page, f.name))
cp_page.page_ptr_id = dummy_page.id
cp_page.save()
print(page)
models.Model.delete(cp_page, keep_parents=True)
page.content_type = kcp_type
page.save(update_fields=['content_type'])
with transaction.atomic():
dummy_page.save()
if not isinstance(k_root, KehmetFrontPage):
convert_page(k_root, KehmetFrontPage)
for page in pages:
convert_page(page, KehmetContentPage)
p = Page.objects.get(id=dummy_page.id)
p.delete()
|
{
"content_hash": "12d8af5e8ed1745fcf8e4998b72e7f8b",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 30.604166666666668,
"alnum_prop": 0.6977535738597685,
"repo_name": "terotic/digihel",
"id": "4c87028ef44c1a513fd0ec317d7c3400c5e92a6c",
"size": "1469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/migrate_kehmet_pages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "139197"
},
{
"name": "CoffeeScript",
"bytes": "2442"
},
{
"name": "HTML",
"bytes": "64627"
},
{
"name": "JavaScript",
"bytes": "48715"
},
{
"name": "Python",
"bytes": "124367"
}
],
"symlink_target": ""
}
|
"""
Utility base TestCase classes for testing manage views.
"""
from datetime import datetime
from . import base
class ListViewTestCase(base.FormViewTestCase, base.ListViewTestCase):
"""Base class for testing manage list views."""
# subclasses should specify these:
perm = None # required management permission codename
def assertActionRequiresPermission(self, action, permission=None):
"""Assert that the given list action requires the given permission."""
if permission is None:
permission = self.perm
o = self.factory.create()
form = self.get_form()
name = "action-{0}".format(action)
# action button not shown to the user
self.assertTrue(name not in form.fields)
# ...but if they cleverly submit it anyway they get a 403...
res = self.post(
{
name: str(o.id),
"csrfmiddlewaretoken":
form.fields.get("csrfmiddlewaretoken")[0].value
},
status=403,
)
# ...with a message about permissions.
res.mustcontain("permission")
def test_delete(self):
"""Can delete objects from list."""
self.add_perm(self.perm)
o = self.factory.create()
self.get_form().submit(
name="action-delete",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"}
)
self.assertTrue(bool(self.refresh(o).deleted_on))
def test_delete_requires_permission(self):
"""Deleting requires appropriate permission."""
self.assertActionRequiresPermission("delete")
def test_create_link(self):
"""With proper perm, create link is there."""
self.add_perm(self.perm)
res = self.get()
self.assertElement(res.html, "a", "create")
def test_create_link_requires_perms(self):
"""Without proper perm, create link is not there."""
res = self.get()
self.assertElement(res.html, "a", "create", count=0)
class MTModelListTests(object):
"""Additional manage list view tests for MTModels."""
def test_clone(self):
"""Can clone objects in list."""
self.add_perm(self.perm)
self.factory.create()
res = self.get_form().submit(
name="action-clone",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
)
self.assertElement(
res.json["html"], "h3", "title", count=2)
def test_clone_requires_permission(self):
"""Cloning requires appropriate permission."""
self.assertActionRequiresPermission("clone")
def test_filter_by_creator(self):
"""Can filter by creator."""
self.factory.create(name="Foo 1", user=self.user)
self.factory.create(name="Foo 2")
res = self.get(params={"filter-creator": self.user.id})
self.assertInList(res, "Foo 1")
self.assertNotInList(res, "Foo 2")
def test_default_sort_by_last_created(self):
"""Default sort is by latest created first."""
self.factory.create(
name="Foo 1", created_on=datetime(2012, 1, 21))
self.factory.create(
name="Foo 2", created_on=datetime(2012, 1, 22))
res = self.get()
self.assertOrderInList(res, "Foo 2", "Foo 1")
class StatusListTests(object):
"""Extra tests for manage lists with activated/deactivate actions."""
def test_activate(self):
"""Can activate objects in list."""
self.add_perm(self.perm)
s = self.factory.create(status="draft")
self.get_form().submit(
name="action-activate",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
)
self.assertEqual(self.refresh(s).status, "active")
def test_activate_requires_permission(self):
"""Activating requires appropriate permission."""
self.assertActionRequiresPermission("activate", self.perm)
def test_draft(self):
"""Can make-draft objects in list."""
self.add_perm(self.perm)
s = self.factory.create(status="active")
self.get_form().submit(
name="action-draft",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
)
self.assertEqual(self.refresh(s).status, "draft")
def test_draft_requires_permission(self):
"""Resetting to draft requires appropriate permission."""
self.assertActionRequiresPermission("draft", self.perm)
def test_deactivate(self):
"""Can deactivate objects in list."""
self.add_perm(self.perm)
s = self.factory.create(status="active")
self.get_form().submit(
name="action-deactivate",
index=0,
headers={"X-Requested-With": "XMLHttpRequest"},
)
self.assertEqual(self.refresh(s).status, "disabled")
def test_deactivate_requires_permission(self):
"""Deactivating requires appropriate permission."""
self.assertActionRequiresPermission("deactivate", self.perm)
|
{
"content_hash": "22e122177fe6edb1b87643f3ee399126",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 78,
"avg_line_length": 27.754010695187166,
"alnum_prop": 0.5947976878612716,
"repo_name": "mccarrmb/moztrap",
"id": "beffd6e767f26b666fcb15b0524a7874180d97d0",
"size": "5190",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/case/view/manage.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "713098"
},
{
"name": "HTML",
"bytes": "1056025"
},
{
"name": "JavaScript",
"bytes": "270285"
},
{
"name": "Python",
"bytes": "2090049"
},
{
"name": "Ruby",
"bytes": "464"
},
{
"name": "Shell",
"bytes": "867"
}
],
"symlink_target": ""
}
|
"""Volume manager service, the main entry point that manages volumes."""
from __future__ import absolute_import
import os
import json
import stat
from uuid import UUID, uuid4
from characteristic import attributes
from twisted.python.filepath import FilePath
from twisted.application.service import Service
from twisted.internet.endpoints import ProcessEndpoint, connectProtocol
from twisted.internet import reactor
from twisted.internet.defer import fail
from twisted.internet.task import LoopingCall
# We might want to make these utilities shared, rather than in zfs
# module... but in this case the usage is temporary and should go away as
# part of https://github.com/ClusterHQ/flocker/issues/64
from .filesystems.zfs import _AccumulatingProtocol, CommandFailed
DEFAULT_CONFIG_PATH = FilePath(b"/etc/flocker/volume.json")
WAIT_FOR_VOLUME_INTERVAL = 0.1
class CreateConfigurationError(Exception):
"""Create the configuration file failed."""
class VolumeService(Service):
"""Main service for volume management.
:ivar unicode uuid: A unique identifier for this particular node's
volume manager. Only available once the service has started.
"""
def __init__(self, config_path, pool, reactor):
"""
:param FilePath config_path: Path to the volume manager config file.
:param pool: A `flocker.volume.filesystems.interface.IStoragePool`
provider.
:param reactor: A ``twisted.internet.interface.IReactorTime`` provider.
"""
self._config_path = config_path
self._pool = pool
self._reactor = reactor
def startService(self):
parent = self._config_path.parent()
try:
if not parent.exists():
parent.makedirs()
if not self._config_path.exists():
uuid = unicode(uuid4())
self._config_path.setContent(json.dumps({u"uuid": uuid,
u"version": 1}))
except OSError as e:
raise CreateConfigurationError(e.args[1])
config = json.loads(self._config_path.getContent())
self.uuid = config[u"uuid"]
def create(self, name):
"""Create a new volume.
:param unicode name: The name of the volume.
:return: A ``Deferred`` that fires with a :class:`Volume`.
"""
volume = Volume(uuid=self.uuid, name=name, _pool=self._pool)
d = self._pool.create(volume)
def created(filesystem):
filesystem.get_path().chmod(
# 0o777 the long way:
stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
return volume
d.addCallback(created)
return d
def get(self, name):
"""
Return a locally-owned ``Volume`` with the given name.
Whether or not this volume actually exists is not checked in any
way.
:param unicode name: The name of the volume.
:return: A ``Volume``.
"""
return Volume(uuid=self.uuid, name=name, _pool=self._pool)
def wait_for_volume(self, name):
"""
Wait for a volume by the given name, owned by thus service, to exist.
Polls the storage pool for the specified volume to appear.
:param unicode name: The name of the volume.
:return: A ``Deferred`` that fires with a :class:`Volume`.
"""
volume = Volume(uuid=self.uuid, name=name, _pool=self._pool)
def check_for_volume(volumes):
if volume in volumes:
call.stop()
def loop():
d = self.enumerate()
d.addCallback(check_for_volume)
return d
call = LoopingCall(loop)
call.clock = self._reactor
d = call.start(WAIT_FOR_VOLUME_INTERVAL)
d.addCallback(lambda _: volume)
return d
def enumerate(self):
"""Get a listing of all volumes managed by this service.
:return: A ``Deferred`` that fires with an iterator of :class:`Volume`.
"""
enumerating = self._pool.enumerate()
def enumerated(filesystems):
for filesystem in filesystems:
# XXX It so happens that this works but it's kind of a
# fragile way to recover the information:
# https://github.com/ClusterHQ/flocker/issues/78
basename = filesystem.get_path().basename()
try:
uuid, name = basename.split(b".", 1)
uuid = UUID(uuid)
except ValueError:
# If we can't split on `.` and get two parts then it's not
# a filesystem Flocker is managing. Likewise if we can't
# interpret the bit before the `.` as a UUID. Perhaps a
# user created it, who knows. Just ignore it.
continue
# Probably shouldn't yield this volume if the uuid doesn't
# match this service's uuid.
yield Volume(
uuid=unicode(uuid),
name=name.decode('utf8'),
_pool=self._pool)
enumerating.addCallback(enumerated)
return enumerating
def push(self, volume, destination, config_path=DEFAULT_CONFIG_PATH):
"""
Push the latest data in the volume to a remote destination.
This is a blocking API for now.
Only locally owned volumes (i.e. volumes whose ``uuid`` matches
this service's) can be pushed.
:param Volume volume: The volume to push.
:param IRemoteVolumeManager destination: The remote volume manager
to push to.
:param FilePath config_path: Path to configuration file for the
remote ``flocker-volume``.
:raises ValueError: If the uuid of the volume is different than
our own; only locally-owned volumes can be pushed.
"""
if volume.uuid != self.uuid:
raise ValueError()
fs = volume.get_filesystem()
with destination.receive(volume) as receiver:
with fs.reader() as contents:
for chunk in iter(lambda: contents.read(1024 * 1024), b""):
receiver.write(chunk)
def receive(self, volume_uuid, volume_name, input_file):
"""
Process a volume's data that can be read from a file-like object.
This is a blocking API for now.
Only remotely owned volumes (i.e. volumes whose ``uuid`` do not match
this service's) can be received.
:param unicode volume_uuid: The volume's UUID.
:param unicode volume_name: The volume's name.
:param input_file: A file-like object, typically ``sys.stdin``, from
which to read the data.
:raises ValueError: If the uuid of the volume matches our own;
remote nodes can't overwrite locally-owned volumes.
"""
if volume_uuid == self.uuid:
raise ValueError()
volume = Volume(uuid=volume_uuid, name=volume_name, _pool=self._pool)
with volume.get_filesystem().writer() as writer:
for chunk in iter(lambda: input_file.read(1024 * 1024), b""):
writer.write(chunk)
def acquire(self, volume_uuid, volume_name):
"""
Take ownership of a volume.
This is a blocking API for now.
Only remotely owned volumes (i.e. volumes whose ``uuid`` do not match
this service's) can be acquired.
:param unicode volume_uuid: The volume owner's UUID.
:param unicode volume_name: The volume's name.
:return: ``Deferred`` that fires on success, or errbacks with
``ValueError`` If the uuid of the volume matches our own.
"""
if volume_uuid == self.uuid:
return fail(ValueError("Can't acquire already-owned volume"))
volume = Volume(uuid=volume_uuid, name=volume_name, _pool=self._pool)
return volume.change_owner(self.uuid)
def handoff(self, volume, destination):
"""
Handoff a locally owned volume to a remote destination.
The remote destination will be the new owner of the volume.
This is a blocking API for now (but it does return a ``Deferred``
for success/failure).
:param Volume volume: The volume to handoff.
:param IRemoteVolumeManager destination: The remote volume manager
to handoff to.
:return: ``Deferred`` that fires when the handoff has finished, or
errbacks on error (specifcally with a ``ValueError`` if the
volume is not locally owned).
"""
try:
self.push(volume, destination)
except ValueError:
return fail()
remote_uuid = destination.acquire(volume)
return volume.change_owner(remote_uuid)
# Communication with Docker should be done via its API, not with this
# approach, but that depends on unreleased Twisted 14.1:
# https://github.com/ClusterHQ/flocker/issues/64
def _docker_command(reactor, arguments):
"""Run the ``docker`` command-line tool with the given arguments.
:param reactor: A ``IReactorProcess`` provider.
:param arguments: A ``list`` of ``bytes``, command-line arguments to
``docker``.
:return: A :class:`Deferred` firing with the bytes of the result (on
exit code 0), or errbacking with :class:`CommandFailed` or
:class:`BadArguments` depending on the exit code (1 or 2).
"""
endpoint = ProcessEndpoint(reactor, b"docker", [b"docker"] + arguments,
os.environ)
d = connectProtocol(endpoint, _AccumulatingProtocol())
d.addCallback(lambda protocol: protocol._result)
return d
@attributes(["uuid", "name", "_pool"])
class Volume(object):
"""A data volume's identifier.
:ivar unicode uuid: The UUID of the volume manager that owns this volume.
:ivar unicode name: The name of the volume. Since volume names must
match Docker container names, the characters used should be limited to
those that Docker allows for container names.
:ivar _pool: A `flocker.volume.filesystems.interface.IStoragePool`
provider where the volume's filesystem is stored.
"""
def change_owner(self, new_owner_uuid):
"""
Change which volume manager owns this volume.
:param unicode new_owner_uuid: The UUID of the new owner.
:return: ``Deferred`` that fires with a new :class:`Volume`
instance once the ownership has been changed.
"""
new_volume = Volume(uuid=new_owner_uuid, name=self.name,
_pool=self._pool)
d = self._pool.change_owner(self, new_volume)
def filesystem_changed(_):
return new_volume
d.addCallback(filesystem_changed)
return d
def get_filesystem(self):
"""Return the volume's filesystem.
:return: The ``IFilesystem`` provider for the volume.
"""
return self._pool.get(self)
@property
def _container_name(self):
"""Return the corresponding Docker container name.
:return: Container name as ``bytes``.
"""
return b"%s-data" % (self.name.encode("ascii"),)
def expose_to_docker(self, mount_path):
"""
Create a container that will expose the volume to Docker at the given
mount path.
Can be called multiple times. Mount paths from previous calls will
be overridden.
:param FilePath mount_path: The path at which to mount the volume
within the container.
:return: ``Deferred`` firing when the operation is done.
"""
local_path = self.get_filesystem().get_path().path
mount_path = mount_path.path
d = self.remove_from_docker()
d.addCallback(
lambda _:
_docker_command(reactor,
[b"run", b"--name", self._container_name,
b"--volume=%s:%s:rw" % (local_path,
mount_path),
b"busybox", b"/bin/true"]))
return d
def remove_from_docker(self):
"""
Remove the Docker container created for the volume.
If no container exists this will silently do nothing.
:return: ``Deferred`` firing with ``None`` when the operation is
done.
"""
d = _docker_command(reactor, [b"rm", self._container_name])
d.addErrback(lambda failure: failure.trap(CommandFailed))
d.addCallback(lambda _: None)
return d
|
{
"content_hash": "b53d112059758f80f5ed13b8e8d272fe",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 79,
"avg_line_length": 35.848314606741575,
"alnum_prop": 0.6019432690800814,
"repo_name": "beni55/flocker",
"id": "e147d3b43a22b7542f2f8dfd03ce7b2d0852bcfe",
"size": "12883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flocker/volume/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "540895"
},
{
"name": "Ruby",
"bytes": "797"
},
{
"name": "Shell",
"bytes": "3744"
}
],
"symlink_target": ""
}
|
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class KeystoneProject(resource.Resource):
"""Heat Template Resource for Keystone Project."""
support_status = support.SupportStatus(
version='2015.1',
message=_('Supported versions: keystone v3'))
default_client_name = 'keystone'
entity = 'projects'
PROPERTIES = (
NAME, DOMAIN, DESCRIPTION, ENABLED
) = (
'name', 'domain', 'description', 'enabled'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of keystone project.'),
update_allowed=True
),
DOMAIN: properties.Schema(
properties.Schema.STRING,
_('Name or id of keystone domain.'),
default='default',
update_allowed=True,
constraints=[constraints.CustomConstraint('keystone.domain')]
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of keystone project.'),
default='',
update_allowed=True
),
ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('This project is enabled or disabled.'),
default=True,
update_allowed=True
)
}
def client(self):
return super(KeystoneProject, self).client().client
def _create_project(self,
project_name,
description,
domain,
enabled):
domain = self.client_plugin().get_domain_id(domain)
return self.client().projects.create(
name=project_name,
domain=domain,
description=description,
enabled=enabled)
def _update_project(self,
project_id,
domain,
new_name=None,
new_description=None,
enabled=None):
values = dict()
if new_name is not None:
values['name'] = new_name
if new_description is not None:
values['description'] = new_description
if enabled is not None:
values['enabled'] = enabled
values['project'] = project_id
domain = self.client_plugin().get_domain_id(domain)
values['domain'] = domain
return self.client().projects.update(**values)
def handle_create(self):
project_name = (self.properties.get(self.NAME) or
self.physical_resource_name())
description = self.properties.get(self.DESCRIPTION)
domain = self.properties.get(self.DOMAIN)
enabled = self.properties.get(self.ENABLED)
project = self._create_project(
project_name=project_name,
description=description,
domain=domain,
enabled=enabled
)
self.resource_id_set(project.id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
name = prop_diff.get(self.NAME) or self.physical_resource_name()
description = prop_diff.get(self.DESCRIPTION)
enabled = prop_diff.get(self.ENABLED)
domain = (prop_diff.get(self.DOMAIN) or
self._stored_properties_data.get(self.DOMAIN))
self._update_project(
project_id=self.resource_id,
domain=domain,
new_name=name,
new_description=description,
enabled=enabled
)
def resource_mapping():
return {
'OS::Keystone::Project': KeystoneProject
}
|
{
"content_hash": "9dda04285b6a7f2d102cdb417d5307fc",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 73,
"avg_line_length": 30.24,
"alnum_prop": 0.5603174603174603,
"repo_name": "takeshineshiro/heat",
"id": "fa7a97b81a8af888c46ab09468a5c6ae8dd23a42",
"size": "4355",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/keystone/project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6735948"
},
{
"name": "Shell",
"bytes": "33316"
}
],
"symlink_target": ""
}
|
import operator
from .. import inspect
from .. import util
from ..sql import and_
from ..sql import operators
class UnevaluatableError(Exception):
pass
class _NoObject(operators.ColumnOperators):
def operate(self, *arg, **kw):
return None
def reverse_operate(self, *arg, **kw):
return None
_NO_OBJECT = _NoObject()
_straight_ops = set(
getattr(operators, op)
for op in (
"add",
"mul",
"sub",
"div",
"mod",
"truediv",
"lt",
"le",
"ne",
"gt",
"ge",
"eq",
)
)
_extended_ops = {
operators.in_op: (lambda a, b: a in b if a is not _NO_OBJECT else None),
operators.not_in_op: (
lambda a, b: a not in b if a is not _NO_OBJECT else None
),
}
_notimplemented_ops = set(
getattr(operators, op)
for op in (
"like_op",
"not_like_op",
"ilike_op",
"not_ilike_op",
"startswith_op",
"between_op",
"endswith_op",
"concat_op",
)
)
class EvaluatorCompiler(object):
def __init__(self, target_cls=None):
self.target_cls = target_cls
def process(self, *clauses):
if len(clauses) > 1:
clause = and_(*clauses)
elif clauses:
clause = clauses[0]
meth = getattr(self, "visit_%s" % clause.__visit_name__, None)
if not meth:
raise UnevaluatableError(
"Cannot evaluate %s" % type(clause).__name__
)
return meth(clause)
def visit_grouping(self, clause):
return self.process(clause.element)
def visit_null(self, clause):
return lambda obj: None
def visit_false(self, clause):
return lambda obj: False
def visit_true(self, clause):
return lambda obj: True
def visit_column(self, clause):
if "parentmapper" in clause._annotations:
parentmapper = clause._annotations["parentmapper"]
if self.target_cls and not issubclass(
self.target_cls, parentmapper.class_
):
raise UnevaluatableError(
"Can't evaluate criteria against alternate class %s"
% parentmapper.class_
)
key = parentmapper._columntoproperty[clause].key
else:
key = clause.key
if (
self.target_cls
and key in inspect(self.target_cls).column_attrs
):
util.warn(
"Evaluating non-mapped column expression '%s' onto "
"ORM instances; this is a deprecated use case. Please "
"make use of the actual mapped columns in ORM-evaluated "
"UPDATE / DELETE expressions." % clause
)
else:
raise UnevaluatableError("Cannot evaluate column: %s" % clause)
get_corresponding_attr = operator.attrgetter(key)
return (
lambda obj: get_corresponding_attr(obj)
if obj is not None
else _NO_OBJECT
)
def visit_tuple(self, clause):
return self.visit_clauselist(clause)
def visit_clauselist(self, clause):
evaluators = list(map(self.process, clause.clauses))
if clause.operator is operators.or_:
def evaluate(obj):
has_null = False
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if value:
return True
has_null = has_null or value is None
if has_null:
return None
return False
elif clause.operator is operators.and_:
def evaluate(obj):
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if not value:
if value is None or value is _NO_OBJECT:
return None
return False
return True
elif clause.operator is operators.comma_op:
def evaluate(obj):
values = []
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if value is None or value is _NO_OBJECT:
return None
values.append(value)
return tuple(values)
else:
raise UnevaluatableError(
"Cannot evaluate clauselist with operator %s" % clause.operator
)
return evaluate
def visit_binary(self, clause):
eval_left, eval_right = list(
map(self.process, [clause.left, clause.right])
)
operator = clause.operator
if operator is operators.is_:
def evaluate(obj):
return eval_left(obj) == eval_right(obj)
elif operator is operators.is_not:
def evaluate(obj):
return eval_left(obj) != eval_right(obj)
elif operator in _extended_ops:
def evaluate(obj):
left_val = eval_left(obj)
right_val = eval_right(obj)
if left_val is None or right_val is None:
return None
return _extended_ops[operator](left_val, right_val)
elif operator in _straight_ops:
def evaluate(obj):
left_val = eval_left(obj)
right_val = eval_right(obj)
if left_val is None or right_val is None:
return None
return operator(eval_left(obj), eval_right(obj))
else:
raise UnevaluatableError(
"Cannot evaluate %s with operator %s"
% (type(clause).__name__, clause.operator)
)
return evaluate
def visit_unary(self, clause):
eval_inner = self.process(clause.element)
if clause.operator is operators.inv:
def evaluate(obj):
value = eval_inner(obj)
if value is None:
return None
return not value
return evaluate
raise UnevaluatableError(
"Cannot evaluate %s with operator %s"
% (type(clause).__name__, clause.operator)
)
def visit_bindparam(self, clause):
if clause.callable:
val = clause.callable()
else:
val = clause.value
return lambda obj: val
|
{
"content_hash": "573f619a58421b80eb61a263fa563dc6",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 79,
"avg_line_length": 28.264957264957264,
"alnum_prop": 0.511037193831267,
"repo_name": "monetate/sqlalchemy",
"id": "69d80dd8bdbaae82728fec838b21e49c4f5f0e18",
"size": "6852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/orm/evaluator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49142"
},
{
"name": "Python",
"bytes": "11790244"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
try:
from urlparse import urlparse, parse_qs
except ImportError: # Python 3
from urllib.parse import urlparse, parse_qs
import os
from didel.base import DidelEntity
from didel.fileutils import date2timestamp, mkdir_p, file_mtime
from didel.souputils import parse_homemade_dl
def parse_query(url):
return parse_qs(urlparse(url).query)
class CoursePage(DidelEntity):
"""
A common base for Course-related pages
"""
def __init__(self, ref):
super(CoursePage, self).__init__()
if ref.startswith('/'):
self.path = ref
else:
self.path = self.URL_FMT.format(ref=ref)
self.ref = ref
class CourseAssignment(CoursePage):
"""
A course assignment
"""
def __init__(self, path, course_code):
super(CourseAssignment, self).__init__(path)
self.course_code = course_code
def populate(self, soup, session, **kw):
content = soup.select('#courseRightContent')[0]
attrs = parse_homemade_dl(content.select('p small')[0])
self.title = attrs.get('titre')
self.begin = attrs.get('du')
self.end = attrs.get('au')
self.submission_type = attrs.get('type de soumission')
self.work_type = attrs.get('type de travail')
self.visibility = attrs.get(u'visibilit\xe9 de la soumission')
self.assig_id = parse_query(self.path)['assigId'][0]
def submit(self, student, title, datafile, description=''):
"""
Create a new submission for this assignment
- ``student``: a ``Student`` object for the currently connected user
- ``title``: the assignment's title
- ``datafile``: an open file-like object for the attachment
- ``description``: an optional description
"""
authors = '%s %s' % (student.lastname, student.firstname)
data = {
'claroFormId': '42',
'cmd': 'exSubWrk',
'cidReset': 'true',
'cidReq': self.course_code,
'wrkTitle': title,
'wrkAuthor': authors,
'wrkTxt': description,
'submitWrk': 'Ok',
}
files = {
'wrkFile': datafile
}
path_fmt = '/claroline/work/user_work.php?assigId={aid}&authId={uid}'
path = path_fmt.format(aid=self.assig_id, uid=student.auth_id)
resp = self.session.post(path, data=data, files=files)
return resp.ok and title in resp.text
class CourseAssignments(CoursePage, list):
"""
Assignments list for a course
"""
URL_FMT = '/claroline/work/work.php?cidReset=true&cidReq={ref}'
def populate(self, soup, session):
trs = soup.select('#courseRightContent table tbody tr')
path_fmt = '/claroline/work/%s'
for tr in trs:
path = path_fmt % tr.select('a')[0].attrs['href']
self.append(CourseAssignment(path, self.ref))
class Course(CoursePage):
"""
A course. It has the following attributes: ``title``, ``teacher``,
``about`` and the following sub-resources:
- ``assignments``
Additionally, it keeps a reference to its student with ``student``
"""
URL_FMT = '/claroline/course/index.php?cid={ref}&cidReset=true&cidReq={ref}'
def __init__(self, ref, student=None):
"""
Create a new course from a reference, and an optional student
"""
super(Course, self).__init__(ref)
self.student = student
self.add_resource('assignments', CourseAssignments(ref))
def populate(self, soup, session):
header = soup.select('.courseInfos')[0]
self.title = header.select('h2 a')[0].get_text()
self.teacher = header.select('p')[0].get_text().split('\n')[-1].strip()
about = soup.select('#portletAbout')
if about:
self.about = about[0].get_text().strip()
def synchronize_docs(self, path, session):
"""
Synchronize the documents in the given path with the ones from the
courses followed by the student. The path will be created and populated
if it doesn't exist.
"""
d = CourseDocuments(self.ref)
d.fetch(session)
d.synchronize(path)
def enroll(self, key=None):
"""
Enroll the current student in this course. Some courses require a key
to enroll, give it as ``key``.
"""
path = '/claroline/auth/courses.php'
ok_text = u'Vous êtes désormais inscrit'
params = {'cmd': 'exReg', 'course': self.ref}
if not key:
return self.session.get_ensure_text(path, ok_text, params=params)
data = params.copy()
data['registrationKey'] = 'scade'
resp = self.session.post(path, params=params, data=data)
return resp.ok and ok_text in resp.text
def unenroll(self):
"""
Unenroll the current student from this course.
"""
path = '/claroline/auth/courses.php'
text = u'Vous avez été désinscrit'
params = {'cmd': 'exUnreg', 'course': self.ref}
return self.session.get_ensure_text(path, text, params=params)
class CoursesMainPage(DidelEntity, list):
"""
DidEL's student homepage
"""
def __init__(self):
super(CoursesMainPage, self).__init__()
self.path = '/'
def populate(self, soup, *args, **kw):
for ref in soup.select("dt a"):
href = ref.get("href")
if not href:
continue
cid = parse_query(href).get("cid")
if not cid:
continue
self.append(Course(cid[0]))
class CourseDocuments(DidelEntity):
URL_FMT = '/claroline/document/document.php?cidReset=true&cidReq={ref}'
def __init__(self, ref, path=None):
super(CourseDocuments, self).__init__()
self.ressources = {}
self.ref = ref
if path :
self.path = path
self.ref = ""
else:
self.path = self.URL_FMT.format(ref=ref)
def populate(self, soup, session):
"""
Get all documents and folder from a course.
"""
table = soup.select(".claroTable tbody tr[align=center]")
for line in table:
cols = line.select("td")
item = cols[0].select(".item")[0]
name = item.contents[1].strip()
date = cols[2].select("small")[0].contents[0].strip()
url = cols[0].select("a")[0].attrs["href"].strip()
if item.select("img[src^=/web/img/folder.png]"):
doc = CourseDocuments("", url)
doc.fetch(self.session)
else:
doc = CourseDocument(name, url, date)
self.add_resource(name, doc)
def synchronize(self, path):
"""
compare files on didel with file in folder,
and calling download add or reset files'user
only if not exist or older
"""
path = "%s/%s" % (path, self.ref)
mkdir_p(path)
for k, resource in self._resources.items():
if isinstance(resource, CourseDocuments):
resource.synchronize("%s/%s" % (path, k))
else:
no_file = not os.path.exists("%s/%s" % (path, k))
didel_time = date2timestamp(resource.date)
if no_file or didel_time > file_mtime("%s/%s" % (path, k)):
self.download(resource, path)
# TODO move this on the CourseDocument class
def download(self, document, path):
"""
Download a document in a given path, provided that the parent
directories already exist
"""
response = self.session.get(document.url)
document.path = "%s/%s" % (path, document.name)
with open(document.path, 'w') as file:
# TODO use a binary stream here
file.write(response.content)
class CourseDocument(object):
def __init__(self, name, url, date):
self.name = name
self.url = url
self.date = date
|
{
"content_hash": "3ea49ff2bce726ffa2290cbd6d6a5e8f",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 80,
"avg_line_length": 31.234615384615385,
"alnum_prop": 0.5707425193941633,
"repo_name": "bfontaine/didelcli",
"id": "b92c4cee039abb541168d9ecf51c30f839ad91f9",
"size": "8151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "didel/courses.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "51605"
}
],
"symlink_target": ""
}
|
import re
from typing import List
from gitlint.git import GitCommit
from gitlint.options import StrOption
from gitlint.rules import CommitMessageTitle, LineRule, RuleViolation
# Word list from https://github.com/m1foley/fit-commit
# Copyright (c) 2015 Mike Foley
# License: MIT
# Ref: fit_commit/validators/tense.rb
WORD_SET = {
'adds', 'adding', 'added',
'allows', 'allowing', 'allowed',
'amends', 'amending', 'amended',
'bumps', 'bumping', 'bumped',
'calculates', 'calculating', 'calculated',
'changes', 'changing', 'changed',
'cleans', 'cleaning', 'cleaned',
'commits', 'committing', 'committed',
'corrects', 'correcting', 'corrected',
'creates', 'creating', 'created',
'darkens', 'darkening', 'darkened',
'disables', 'disabling', 'disabled',
'displays', 'displaying', 'displayed',
'documents', 'documenting', 'documented',
'drys', 'drying', 'dryed',
'ends', 'ending', 'ended',
'enforces', 'enforcing', 'enforced',
'enqueues', 'enqueuing', 'enqueued',
'extracts', 'extracting', 'extracted',
'finishes', 'finishing', 'finished',
'fixes', 'fixing', 'fixed',
'formats', 'formatting', 'formatted',
'guards', 'guarding', 'guarded',
'handles', 'handling', 'handled',
'hides', 'hiding', 'hid',
'increases', 'increasing', 'increased',
'ignores', 'ignoring', 'ignored',
'implements', 'implementing', 'implemented',
'improves', 'improving', 'improved',
'keeps', 'keeping', 'kept',
'kills', 'killing', 'killed',
'makes', 'making', 'made',
'merges', 'merging', 'merged',
'moves', 'moving', 'moved',
'permits', 'permitting', 'permitted',
'prevents', 'preventing', 'prevented',
'pushes', 'pushing', 'pushed',
'rebases', 'rebasing', 'rebased',
'refactors', 'refactoring', 'refactored',
'removes', 'removing', 'removed',
'renames', 'renaming', 'renamed',
'reorders', 'reordering', 'reordered',
'replaces', 'replacing', 'replaced',
'requires', 'requiring', 'required',
'restores', 'restoring', 'restored',
'sends', 'sending', 'sent',
'sets', 'setting',
'separates', 'separating', 'separated',
'shows', 'showing', 'showed',
'simplifies', 'simplifying', 'simplified',
'skips', 'skipping', 'skipped',
'sorts', 'sorting',
'speeds', 'speeding', 'sped',
'starts', 'starting', 'started',
'supports', 'supporting', 'supported',
'takes', 'taking', 'took',
'testing', 'tested', # 'tests' excluded to reduce false negative
'truncates', 'truncating', 'truncated',
'updates', 'updating', 'updated',
'uses', 'using', 'used',
}
imperative_forms = [
'add', 'allow', 'amend', 'bump', 'calculate', 'change', 'clean', 'commit',
'correct', 'create', 'darken', 'disable', 'display', 'document', 'dry',
'end', 'enforce', 'enqueue', 'extract', 'finish', 'fix', 'format', 'guard',
'handle', 'hide', 'ignore', 'implement', 'improve', 'increase', 'keep',
'kill', 'make', 'merge', 'move', 'permit', 'prevent', 'push', 'rebase',
'refactor', 'remove', 'rename', 'reorder', 'replace', 'require', 'restore',
'send', 'separate', 'set', 'show', 'simplify', 'skip', 'sort', 'speed',
'start', 'support', 'take', 'test', 'truncate', 'update', 'use',
]
imperative_forms.sort()
def head_binary_search(key: str, words: List[str]) -> str:
""" Find the imperative mood version of `word` by looking at the first
3 characters. """
# Edge case: 'disable' and 'display' have the same 3 starting letters.
if key in ['displays', 'displaying', 'displayed']:
return 'display'
lower = 0
upper = len(words) - 1
while True:
if lower > upper:
# Should not happen
raise Exception(f"Cannot find imperative mood of {key}")
mid = (lower + upper) // 2
imperative_form = words[mid]
if key[:3] == imperative_form[:3]:
return imperative_form
elif key < imperative_form:
upper = mid - 1
elif key > imperative_form:
lower = mid + 1
class ImperativeMood(LineRule):
""" This rule will enforce that the commit message title uses imperative
mood. This is done by checking if the first word is in `WORD_SET`, if so
show the word in the correct mood. """
name = "title-imperative-mood"
id = "Z1"
target = CommitMessageTitle
error_msg = ('The first word in commit title should be in imperative mood '
'("{word}" -> "{imperative}"): "{title}"')
def validate(self, line: str, commit: GitCommit) -> List[RuleViolation]:
violations = []
# Ignore the section tag (ie `<section tag>: <message body>.`)
words = line.split(': ', 1)[-1].split()
first_word = words[0].lower()
if first_word in WORD_SET:
imperative = head_binary_search(first_word, imperative_forms)
violation = RuleViolation(self.id, self.error_msg.format(
word=first_word,
imperative=imperative,
title=commit.message.title,
))
violations.append(violation)
return violations
class TitleMatchRegexAllowException(LineRule):
"""Allows revert commits contrary to the built-in title-match-regex rule"""
name = 'title-match-regex-allow-exception'
id = 'Z2'
target = CommitMessageTitle
options_spec = [StrOption('regex', ".*", "Regex the title should match")]
def validate(self, title: str, commit: GitCommit) -> List[RuleViolation]:
regex = self.options['regex'].value
pattern = re.compile(regex, re.UNICODE)
if not pattern.search(title) and not title.startswith("Revert \""):
violation_msg = f"Title does not match regex ({regex})"
return [RuleViolation(self.id, violation_msg, title)]
return []
|
{
"content_hash": "c1ddd5c325af2082c974de8351e5cc6b",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 79,
"avg_line_length": 36.061349693251536,
"alnum_prop": 0.6017352841102416,
"repo_name": "showell/zulip",
"id": "da70b63f117646ae08c62de1402d8b3e0be980a3",
"size": "5878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/lib/gitlint-rules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "433235"
},
{
"name": "Dockerfile",
"bytes": "2941"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "634357"
},
{
"name": "Handlebars",
"bytes": "235334"
},
{
"name": "JavaScript",
"bytes": "3341135"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "79720"
},
{
"name": "Python",
"bytes": "8120030"
},
{
"name": "Ruby",
"bytes": "8480"
},
{
"name": "Shell",
"bytes": "133132"
},
{
"name": "TypeScript",
"bytes": "20603"
}
],
"symlink_target": ""
}
|
from oslo.config import cfg
import testtools
from testtools import matchers
from neutron.common import exceptions as exc
from neutron.db import api as db
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_vxlan
from neutron.tests import base
TUNNEL_IP_ONE = "10.10.10.10"
TUNNEL_IP_TWO = "10.10.10.20"
TUN_MIN = 100
TUN_MAX = 109
TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)]
UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)]
INVALID_VXLAN_VNI = 7337
MULTICAST_GROUP = "239.1.1.1"
VXLAN_UDP_PORT_ONE = 9999
VXLAN_UDP_PORT_TWO = 8888
class VxlanTypeTest(base.BaseTestCase):
def setUp(self):
super(VxlanTypeTest, self).setUp()
ml2_db.initialize()
cfg.CONF.set_override('vni_ranges', [TUNNEL_RANGES],
group='ml2_type_vxlan')
cfg.CONF.set_override('vxlan_group', MULTICAST_GROUP,
group='ml2_type_vxlan')
self.driver = type_vxlan.VxlanTypeDriver()
self.driver.vxlan_vni_ranges = TUNNEL_RANGES
self.driver._sync_vxlan_allocations()
self.session = db.get_session()
self.addCleanup(cfg.CONF.reset)
self.addCleanup(db.clear_db)
def test_vxlan_tunnel_type(self):
self.assertEqual(self.driver.get_type(), type_vxlan.TYPE_VXLAN)
def test_validate_provider_segment(self):
segment = {api.NETWORK_TYPE: 'vxlan',
api.PHYSICAL_NETWORK: 'phys_net',
api.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
segment[api.PHYSICAL_NETWORK] = None
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
def test_sync_tunnel_allocations(self):
self.assertIsNone(
self.driver.get_vxlan_allocation(self.session,
(TUN_MIN - 1))
)
self.assertFalse(
self.driver.get_vxlan_allocation(self.session,
(TUN_MIN)).allocated
)
self.assertFalse(
self.driver.get_vxlan_allocation(self.session,
(TUN_MIN + 1)).allocated
)
self.assertFalse(
self.driver.get_vxlan_allocation(self.session,
(TUN_MAX - 1)).allocated
)
self.assertFalse(
self.driver.get_vxlan_allocation(self.session,
(TUN_MAX)).allocated
)
self.assertIsNone(
self.driver.get_vxlan_allocation(self.session,
(TUN_MAX + 1))
)
self.driver.vxlan_vni_ranges = UPDATED_TUNNEL_RANGES
self.driver._sync_vxlan_allocations()
self.assertIsNone(self.driver.
get_vxlan_allocation(self.session,
(TUN_MIN + 5 - 1)))
self.assertFalse(self.driver.
get_vxlan_allocation(self.session, (TUN_MIN + 5)).
allocated)
self.assertFalse(self.driver.
get_vxlan_allocation(self.session, (TUN_MIN + 5 + 1)).
allocated)
self.assertFalse(self.driver.
get_vxlan_allocation(self.session, (TUN_MAX + 5 - 1)).
allocated)
self.assertFalse(self.driver.
get_vxlan_allocation(self.session, (TUN_MAX + 5)).
allocated)
self.assertIsNone(self.driver.
get_vxlan_allocation(self.session,
(TUN_MAX + 5 + 1)))
def test_reserve_provider_segment(self):
segment = {api.NETWORK_TYPE: 'vxlan',
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: 101}
self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_vxlan_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
with testtools.ExpectedException(exc.TunnelIdInUse):
self.driver.reserve_provider_segment(self.session, segment)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_vxlan_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertFalse(alloc.allocated)
segment[api.SEGMENTATION_ID] = 1000
self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_vxlan_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_vxlan_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertIsNone(alloc)
def test_allocate_tenant_segment(self):
tunnel_ids = set()
for x in xrange(TUN_MIN, TUN_MAX + 1):
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
segment = self.driver.allocate_tenant_segment(self.session)
self.assertEqual(None, segment)
segment = {api.NETWORK_TYPE: 'vxlan',
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: tunnel_ids.pop()}
self.driver.release_segment(self.session, segment)
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
for tunnel_id in tunnel_ids:
segment[api.SEGMENTATION_ID] = tunnel_id
self.driver.release_segment(self.session, segment)
def test_vxlan_endpoints(self):
"""Test VXLAN allocation/de-allocation."""
# Set first endpoint, verify it gets VXLAN VNI 1
vxlan1_endpoint = self.driver.add_endpoint(TUNNEL_IP_ONE,
VXLAN_UDP_PORT_ONE)
self.assertEqual(TUNNEL_IP_ONE, vxlan1_endpoint.ip_address)
self.assertEqual(VXLAN_UDP_PORT_ONE, vxlan1_endpoint.udp_port)
# Set second endpoint, verify it gets VXLAN VNI 2
vxlan2_endpoint = self.driver.add_endpoint(TUNNEL_IP_TWO,
VXLAN_UDP_PORT_TWO)
self.assertEqual(TUNNEL_IP_TWO, vxlan2_endpoint.ip_address)
self.assertEqual(VXLAN_UDP_PORT_TWO, vxlan2_endpoint.udp_port)
# Get all the endpoints
endpoints = self.driver.get_endpoints()
for endpoint in endpoints:
if endpoint['ip_address'] == TUNNEL_IP_ONE:
self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint['udp_port'])
elif endpoint['ip_address'] == TUNNEL_IP_TWO:
self.assertEqual(VXLAN_UDP_PORT_TWO, endpoint['udp_port'])
class VxlanTypeMultiRangeTest(base.BaseTestCase):
TUN_MIN0 = 100
TUN_MAX0 = 101
TUN_MIN1 = 200
TUN_MAX1 = 201
TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)]
def setUp(self):
super(VxlanTypeMultiRangeTest, self).setUp()
ml2_db.initialize()
self.driver = type_vxlan.VxlanTypeDriver()
self.driver.vxlan_vni_ranges = self.TUNNEL_MULTI_RANGES
self.driver._sync_vxlan_allocations()
self.session = db.get_session()
self.addCleanup(cfg.CONF.reset)
self.addCleanup(db.clear_db)
def test_release_segment(self):
segments = [self.driver.allocate_tenant_segment(self.session)
for i in range(4)]
# Release them in random order. No special meaning.
for i in (0, 2, 1, 3):
self.driver.release_segment(self.session, segments[i])
for key in (self.TUN_MIN0, self.TUN_MAX0,
self.TUN_MIN1, self.TUN_MAX1):
alloc = self.driver.get_vxlan_allocation(self.session, key)
self.assertFalse(alloc.allocated)
|
{
"content_hash": "e8f72cc29cdbae71093d6b7b2556fdbd",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 79,
"avg_line_length": 41.2877358490566,
"alnum_prop": 0.5782017593967782,
"repo_name": "rickerc/neutron_audit",
"id": "b9f83b7d67ffb41e0fa2fa16c4b90d6c23294528",
"size": "9438",
"binary": false,
"copies": "1",
"ref": "refs/heads/cis-havana-staging",
"path": "neutron/tests/unit/ml2/test_type_vxlan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "7052151"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
from operator import sub
from itertools import combinations
def checkio(cakes):
distance = lambda v: sum(v[i]**2 for i in range(2))**.5
normalize = lambda v: ['{:.3f}'.format(v[i]/distance(v)) for i in range(2)]
vectors = {}
for a, b in combinations(sorted(cakes), 2):
vect = list(map(sub, b,a))
norm = str(normalize(vect))
try:
vectors[norm].append(a)
except KeyError:
vectors[norm] = [a]
vectors[norm].append(b)
vectors = {k:v for k, v in vectors.items() if len(v) > 2}
linked, result = [], 0
for k, v in vectors.items():
s = [p for n,p in enumerate(v) if p not in v[:n]]
for u in s:
l = [u] + [v[i+1] for i, p in enumerate(v) if p == u and not i % 2]
if len(l) > 2:
if not any(all(e in o for e in l) for o in linked):
linked.append(l)
result += 1
return result
|
{
"content_hash": "cabfbd48b2ce1d2cd6482de3fbe026ae",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 35.48148148148148,
"alnum_prop": 0.5187891440501043,
"repo_name": "Pouf/CodingCompetition",
"id": "f5387e506571617c23b68e8b500c54b9e8ab577f",
"size": "958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CiO/cakes-rows.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "7355"
},
{
"name": "Python",
"bytes": "202689"
}
],
"symlink_target": ""
}
|
from django.dispatch import receiver
from django.db.models.signals import m2m_changed
from django.db.models import get_model
from oscar.apps.basket.abstract_models import AbstractBasket
Voucher = get_model('voucher', 'Voucher')
OrderDiscount = get_model('order', 'OrderDiscount')
ConditionalOffer = get_model('offer', 'ConditionalOffer')
@receiver(m2m_changed)
def receive_basket_voucher_change(sender, **kwargs):
if (kwargs['model'] == Voucher and kwargs['action'] == 'post_add'
and isinstance(kwargs['instance'], AbstractBasket)
and kwargs['pk_set']):
voucher_id = list(kwargs['pk_set'])[0]
voucher = Voucher._default_manager.get(pk=voucher_id)
voucher.num_basket_additions += 1
voucher.save()
|
{
"content_hash": "7e967af8eb14a4956b18d08669254a56",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 69,
"avg_line_length": 39.94736842105263,
"alnum_prop": 0.6996047430830039,
"repo_name": "Idematica/django-oscar",
"id": "845858372718e2b10c01b65607ae08b2f7a2d8b4",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/apps/offer/receivers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1099824"
},
{
"name": "JavaScript",
"bytes": "818932"
},
{
"name": "Puppet",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "4079718"
},
{
"name": "Shell",
"bytes": "5760"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import datashape
from datashape import (dshape, DataShape, Record, isdimension, Option,
discover, Tuple)
from .dispatch import dispatch
from .expr import Expr
from .compatibility import _strtypes
__all__ = []
try:
import pyspark
from pyspark import sql, RDD
from pyspark.sql import (IntegerType, FloatType, StringType, TimestampType,
StructType, StructField, ArrayType, SchemaRDD, SQLContext,
ShortType, DoubleType, BooleanType, LongType)
from pyspark import SparkContext
except ImportError:
pyspark = None
def deoption(ds):
"""
>>> deoption('int32')
ctype("int32")
>>> deoption('?int32')
ctype("int32")
"""
if isinstance(ds, str):
ds = dshape(ds)
if isinstance(ds, DataShape) and not isdimension(ds[0]):
return deoption(ds[0])
if isinstance(ds, Option):
return ds.ty
else:
return ds
if pyspark:
if not issubclass(SQLContext, object):
raise ImportError("This version of SparkSQL uses old-style classes. "
"Please update to newer version of Spark")
types = {datashape.int16: ShortType(),
datashape.int32: IntegerType(),
datashape.int64: IntegerType(),
datashape.float32: FloatType(),
datashape.float64: DoubleType(),
datashape.real: DoubleType(),
datashape.time_: TimestampType(),
datashape.date_: TimestampType(),
datashape.datetime_: TimestampType(),
datashape.bool_: BooleanType(),
datashape.string: StringType()}
rev_types = {IntegerType(): datashape.int64,
ShortType(): datashape.int32,
LongType(): datashape.int64,
FloatType(): datashape.float32,
DoubleType(): datashape.float64,
StringType(): datashape.string,
TimestampType(): datashape.datetime_,
BooleanType(): datashape.bool_}
def sparksql_to_ds(ss):
""" Convert datashape to SparkSQL type system
>>> sparksql_to_ds(IntegerType()) # doctest: +SKIP
ctype("int64")
>>> sparksql_to_ds(ArrayType(IntegerType(), False)) # doctest: +SKIP
dshape("var * int64")
>>> sparksql_to_ds(ArrayType(IntegerType(), True)) # doctest: +SKIP
dshape("var * ?int64")
>>> sparksql_to_ds(StructType([ # doctest: +SKIP
... StructField('name', StringType(), False),
... StructField('amount', IntegerType(), True)]))
dshape("{ name : string, amount : ?int64 }")
"""
if ss in rev_types:
return rev_types[ss]
if isinstance(ss, ArrayType):
elem = sparksql_to_ds(ss.elementType)
if ss.containsNull:
return datashape.var * Option(elem)
else:
return datashape.var * elem
if isinstance(ss, StructType):
return dshape(Record([[field.name, Option(sparksql_to_ds(field.dataType))
if field.nullable
else sparksql_to_ds(field.dataType)]
for field in ss.fields]))
raise NotImplementedError("SparkSQL type not known %s" % ss)
def ds_to_sparksql(ds):
""" Convert datashape to SparkSQL type system
>>> print(ds_to_sparksql('int32')) # doctest: +SKIP
IntegerType
>>> print(ds_to_sparksql('5 * int32')) # doctest: +SKIP
ArrayType(IntegerType,false)
>>> print(ds_to_sparksql('5 * ?int32')) # doctest: +SKIP
ArrayType(IntegerType,true)
>>> print(ds_to_sparksql('{name: string, amount: int32}')) # doctest: +SKIP
StructType(List(StructField(name,StringType,false),StructField(amount,IntegerType,false)))
>>> print(ds_to_sparksql('10 * {name: string, amount: ?int32}')) # doctest: +SKIP
ArrayType(StructType(List(StructField(name,StringType,false),StructField(amount,IntegerType,true))),false)
"""
if isinstance(ds, str):
return ds_to_sparksql(dshape(ds))
if isinstance(ds, Record):
return sql.StructType([
sql.StructField(name,
ds_to_sparksql(deoption(typ)),
isinstance(typ, datashape.Option))
for name, typ in ds.fields])
if isinstance(ds, DataShape):
if isdimension(ds[0]):
elem = ds.subshape[0]
if isinstance(elem, DataShape) and len(elem) == 1:
elem = elem[0]
return sql.ArrayType(ds_to_sparksql(deoption(elem)),
isinstance(elem, Option))
else:
return ds_to_sparksql(ds[0])
if ds in types:
return types[ds]
raise NotImplementedError()
@dispatch(SQLContext, RDD)
def into(sqlContext, rdd, schema=None, columns=None, **kwargs):
""" Convert a normal PySpark RDD to a SparkSQL RDD
Schema inferred by ds_to_sparksql. Can also specify it explicitly with
schema keyword argument.
"""
schema = schema or discover(rdd).subshape[0]
if isinstance(schema[0], Tuple):
columns = columns or list(range(len(schema[0].dshapes)))
types = schema[0].dshapes
schema = dshape(Record(list(zip(columns, types))))
sql_schema = ds_to_sparksql(schema)
return sqlContext.applySchema(rdd, sql_schema)
@dispatch(SQLContext, (Expr, object) + _strtypes)
def into(sqlContext, o, **kwargs):
schema = kwargs.pop('schema', None) or discover(o).subshape[0]
return into(sqlContext, into(sqlContext._sc, o), schema=schema, **kwargs)
@dispatch((tuple, list, set), SchemaRDD)
def into(a, b, **kwargs):
if not isinstance(a, type):
a = type(a)
return a(map(tuple, b.collect()))
@dispatch(SchemaRDD)
def discover(srdd):
return datashape.var * sparksql_to_ds(srdd.schema())
|
{
"content_hash": "c67d830dd7fba441e515c7769dab30e5",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 114,
"avg_line_length": 35.674285714285716,
"alnum_prop": 0.5720006407176037,
"repo_name": "vitan/blaze",
"id": "4fda3f949cedc80cd212a335ba1226dbef8d2a85",
"size": "6243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blaze/sparksql.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from verbs.baseforms import forms
class CloseForm(forms.VerbForm):
name = "Close"
slug = "close"
edit_what_remark = forms.CharField()
duration_min_time = forms.IntegerField()
|
{
"content_hash": "bbbd2aec511070ee44da7746fc001677",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 44,
"avg_line_length": 19.5,
"alnum_prop": 0.6871794871794872,
"repo_name": "Bionetbook/bionetbook",
"id": "64f6c64dd34ebd16320b9cb65f2026eeeff4c660",
"size": "195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bnbapp/bionetbook/_old/verbs/forms/close.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1584"
},
{
"name": "CSS",
"bytes": "955489"
},
{
"name": "HTML",
"bytes": "1662331"
},
{
"name": "JavaScript",
"bytes": "8620958"
},
{
"name": "Makefile",
"bytes": "1215"
},
{
"name": "Python",
"bytes": "1238882"
}
],
"symlink_target": ""
}
|
import re
import itertools
import collections
import logging
import inspect
from datetime import datetime, date
from django.conf import settings
from django.utils.http import parse_http_date, http_date
from django.http import Http404, HttpResponse as _HttpResponse
from django.core.handlers.wsgi import STATUS_CODE_TEXT
from nuages.utils import get_matching_mime_types, parse_accept_header
__all__ = ('HttpResponse', 'HttpResponse', 'HttpError', 'NotModifiedError',
'InvalidRequestError', 'UnauthorizedError', 'ForbiddenError',
'MethodNotAllowedError', 'NotAcceptableError', 'ConflictError',
'PreconditionFailedError', 'UnsupportedMediaTypeError',
'RequestRangeNotSatisfiableError', 'Etag', 'Range', 'ContentRange',)
logger = logging.getLogger(__name__)
ISO8601_DATEFORMAT = '%Y-%m-%dT%T.%fZ'
def datetime_to_timestamp(datetm):
'''Returns the timestamp representation of a datetime instance.'''
datestring = datetm.strftime("%s")
if isinstance(datetm, datetime):
datestring += ".%03d" % (datetm.time().microsecond / 1000)
return float(datestring)
def datetime_to_str(datetm):
'''Returns a string representation of the current date'''
return http_date(datetime_to_timestamp(datetm))
def parse_datetime(datestr):
'''Turns a ISO8601 or RFC1123 date string representation to a Python
datetime instance.'''
try:
datestr = float(datestr)
return datetime.fromtimestamp(datestr)
except ValueError:
pass
try:
datestr = str(datestr)
if 'GMT' in datestr:
return datetime.fromtimestamp(parse_http_date(datestr))
return datetime.strptime(datestr, ISO8601_DATEFORMAT)
except Exception, e:
raise ValueError('Unable to parse date \'%s\' (reason: %s)' %
(datestr, repr(e)))
class RequestMeta(collections.MutableMapping):
'''Wrapper around the META dict of a Django HttpRequest instance'''
def __init__(self, request_meta):
self.store = request_meta
def __getitem__(self, key):
key = self.__keytransform__(key)
header, value = key, self.store.get(key)
try:
if header == 'HTTP_AUTHORIZATION':
try:
protocol, token = value.strip(' ').split(' ')
protocol = protocol.upper()
if protocol == 'BASIC':
username, password = token.decode('base64').split(':')
return protocol, username, password
return protocol, token
except ValueError:
return value
if header in ['HTTP_IF_MATCH', 'HTTP_IF_NONE_MATCH']:
return (Etag.parse(value) if ';' not in value
else [map(Etag.parse, value.split(';'))])
if header == 'HTTP_IF_RANGE':
return Etag.parse(value)
if header in ['HTTP_IF_MODIFIED_SINCE',
'HTTP_IF_UNMODIFIED_SINCE']:
return parse_datetime(value)
if header == 'HTTP_RANGE':
return Range.parse(value)
if header == 'HTTP_CONTENT_TYPE':
return value.split(';')[0] #removing potential "; charset=UTF-8"
if header == 'HTTP_ACCEPT':
if not value:
return [settings.DEFAULT_CONTENT_TYPE,]
items = parse_accept_header(value)
return [settings.DEFAULT_CONTENT_TYPE if i == '*/*' else i
for i in items]
return value
except Exception, e:
logging.error(str(e))
return value
def get(self, key, default=None):
try:
if self.__keytransform__(key) in self.store:
return self.__getitem__(key)
except:
pass
return default
def __setitem__(self, key, value):
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
del self.store[self.__keytransform__(key)]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __keytransform__(self, key):
return key.upper().replace('-', '_')
class HttpRequest(object):
'''
Wrapper around the DJANGO HttpRequest with improved methods,
and attributes.
'''
def __init__(self, base_request):
self._base_request = base_request
self.META = RequestMeta(base_request.META)
@property
def method(self):
'''Support for the X-HTTP-Method-Override header.
Returns the value of the header if set, falls back to the real HTTP
method if not.'''
return self.META.get('HTTP_X_HTTP_METHOD_OVERRIDE',
self._base_request.method).upper()
def __getattr__(self, name):
'''Allows all the attributes of the base HttpRequest to be mirrored in
the wrapper, unless they've been overridden.'''
return getattr(self._base_request, name)
class HttpResponse(_HttpResponse):
'''A transparent wrapper around the Django HttpResponse class.'''
def __init__(self, node=None, payload=None, *args, **kwargs):
self.__node = node
self.payload = None
super(HttpResponse, self).__init__(*args, **kwargs)
def __getattr(self, attr):
if attr == 'status':
attr = 'status_code'
return super(HttpResponse, self).__getattr__(attr)
def __setattr__(self, attr, val):
if attr == 'status':
attr = 'status_code'
return super(HttpResponse, self).__setattr__(attr, val)
def __setitem__(self, header, value):
'''Conversion of types'''
if type(value) in (datetime, date):
value = datetime_to_str(value)
super(HttpResponse, self).__setitem__(header, value)
@property
def content_type(self):
value = self.get('Content-Type', settings.DEFAULT_CONTENT_TYPE)
return value[:max(0, value.find('; charset')) or None].strip(' ')
@property
def node(self):
return self.__node
class HttpError(HttpResponse, Exception):
'''When raised, turns to an HTTP response detailing the error that
occurred.
This class is both an Exception and an HttpResponse, which means it can
be "raised', or be the value returned by a view.'''
def __init__(self, node=None, status=503, description=''):
HttpResponse.__init__(self, status=status)
Exception.__init__(self)
self.message = (STATUS_CODE_TEXT.get(status, 'Error %s' % status) +
'\n' + (description or ''))
self.__node = node
self.payload = {'error': STATUS_CODE_TEXT.get(status),
'description': description}
@property
def node(self):
return self.__node
def __str__(self):
return self.message
class BadRequestError(HttpError):
''''''
def __init__(self, node=None, description='The request cannot be ' \
'fulfilled due to bad syntax.'):
super(BadRequestError, self).__init__(node, 400, description)
class NotFoundError(HttpError):
'''The requested resource could not be found but may be available again
in the future.'''
def __init__(self, node=None):
if node:
description = 'The URL seems well-formed, but the requested ' \
'resource or collection could not be found.'
else:
description = 'The requested resource or collection could not be ' \
'found.'
super(NotFoundError, self).__init__(node, 404, description)
class NotModifiedError(HttpError):
'''"If the client has performed a conditional GET request and access is
allowed, but the document has not been modified, the server SHOULD respond
with this status code. The 304 response MUST NOT contain a message-body,
and thus is always terminated by the first empty line after the header
fields."'''
def __init__(self, node=None):
super(NotModifiedError, self).__init__(node, 304)
class InvalidRequestError(HttpError):
'''"The request could not be understood by the server due to malformed
syntax"'''
def __init__(self, node=None, status=400, description=''):
super(InvalidRequestError, self).__init__(node, status, description)
class UnauthorizedError(HttpError):
'''"The request requires user authentication.
The response MUST include a WWW-Authenticate header field (section 14.47)
containing a challenge applicable to the requested resource."
(http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.2)'''
def __init__(self, node=None):
super(UnauthorizedError, self).__init__(node, 401)
#TODO: Add a WWW-Authenticate header
class ForbiddenError(HttpError):
'''The server understood the request, but is refusing to fulfill it.
Authorization will not help and the request SHOULD NOT be repeated.
If the request method was not HEAD and the server wishes to make public
why the request has not been fulfilled, it SHOULD describe the reason for
the refusal in the entity. If the server does not wish to make this
information available to the client, the status code 404 (Not Found)
can be used instead.'''
def __init__(self, node=None, description=None):
if not description:
raise Http404
super(ForbiddenError, self).__init__(node, 403, description)
class MethodNotAllowedError(HttpError):
'''"The method specified in the Request-Line is not allowed for the
resource identified by the Request-URI. The response MUST include an
Allow header containing a list of valid methods for the requested
resource."'''
def __init__(self, node=None):
super(MethodNotAllowedError, self).__init__(node, 405)
node = node if inspect.isclass(node) else node.__class__
self['Allow'] = ', '.join(node.get_allowed_methods(implicits=False))
class NotAcceptableError(HttpError):
'''"The resource identified by the request is only capable of generating
response entities which have content characteristics not acceptable
according to the accept headers sent in the request.
Unless it was a HEAD request, the response SHOULD include an entity
containing a list of available entity characteristics and location(s)
from which the user or user agent can choose the one most appropriate."'''
def __init__(self, node):
description = ', '.join(node.outputs)
super(NotAcceptableError, self).__init__(node, 406, description)
class ConflictError(HttpError):
'''"The request could not be completed due to a conflict with the current
state of the resource. This code is only allowed in situations where it is
expected that the user might be able to resolve the conflict and resubmit
the request. The response body SHOULD include enough information for the
user to recognize the source of the conflict. Ideally, the response entity
would include enough information for the user or user agent to fix the
problem; however, that might not be possible and is not required."'''
def __init__(self, node=None, description=None):
super(ConflictError, self).__init__(node, 409, description)
class PreconditionFailedError(HttpError):
'''"The precondition given in one or more of the request-header fields
evaluated to false when it was tested on the server."'''
def __init__(self, node=None, description=''):
super(PreconditionFailedError, self).__init__(node, 412, description)
class UnsupportedMediaTypeError(HttpError):
'''"The server is refusing to service the request because the entity of
the request is in a format not supported by the requested resource for the
requested method."'''
def __init__(self, node=None, description='', required_format=None):
if required_format and not description:
description = ('Data must be sent formatted and sent in a ' \
'request with a Content-Type header set to %s' %
required_format)
super(UnsupportedMediaTypeError, self).__init__(node, 415, description)
class RequestedRangeNotSatisfiableError(HttpError):
'''"A server SHOULD return a response with this status code if a request
included a Range request-header field (section 14.35), and none of the
range-specifier values in this field overlap the current extent of the
selected resource, and the request did not include an If-Range
request-header field. "'''
def __init__(self, node=None, description=''):
super(RequestedRangeNotSatisfiableError, self).__init__(node, 416,
description)
class Etag(object):
'''The ETag response-header field provides the current value of the entity
tag for the requested variant
(http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.19)'''
def __init__(self, last_modified, id_):
self.__locals = locals()
setattr(self, 'timestamp', datetime_to_timestamp(last_modified))
self.__locals['timestamp'] = self.timestamp
map(lambda key: setattr(self, key, self.__locals[key]), self.__locals)
def __eq__(self, instance):
try:
if not instance:
return False
#A WILDCARD ETag is considered equal to any ETag value.
if repr(self) == '*' or repr(instance) == '*':
return True
if self.id_ != instance.id_:
return False
return self.last_modified == instance.last_modified
except:
return False
def __ne__(self, instance):
return not self.__eq__(instance)
def __cmp__(self, instance):
if not instance:
return 1
#A WILDCARD ETag is considered equal to any ETag value.
if repr(self) == '*' or repr(instance) == '*':
return 0
return self.last_modified - instance.last_modified
def __repr__(self):
if not self.timestamp:
return '*'
return '%f-%s' % (self.timestamp, self.id_)
@classmethod
def parse(cls, raw_etag):
try:
timestamp, id_ = raw_etag.split('-')
return cls(datetime.fromtimestamp(timestamp), id_)
except:
ValueError('Invalid \'Etag\' header value')
ETAG_WILDCARD = Etag(datetime.fromtimestamp(0), '0')
class Range(object):
'''Parses the content of a Range header into a simple helper class.
HTTP retrieval requests using conditional or unconditional GET methods
MAY request one or more sub-ranges of the entity, instead of the entire
entity.
(http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.2)'''
def __init__(self, unit, offset, limit):
self.__locals = locals()
map(lambda key: setattr(self, key, self.__locals[key]), self.__locals)
@classmethod
def parse(cls, raw_header):
#FIXME: Not all the formats defined by the HTTP RFC are supported
match = re.match(r'^(?P<unit>\w+)=(?P<offset>\d+)-(?P<limit>\d+)$',
raw_header)
if not match:
raise ValueError('Invalid \'Range\' header value')
values = match.groupdict()
return cls(values['unit'],
int(values['offset']),
int(values['limit']))
class ContentRange(object):
'''Builds a valid Content-Range header representation as defined in the
HTTP protocol.
The Content-Range entity-header is sent with a partial entity-body to
specify where in the full entity-body the partial body should be applied.
(http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.16)'''
def __init__(self, unit, first, last, total):
self.__locals = locals()
map(lambda key: setattr(self, key, self.__locals[key]), self.__locals)
def __repr__(self):
return '%(unit) %(first)-%(last)/%(total)' % self.__locals
|
{
"content_hash": "42764c656e7eae2b09de0a5e5981dc40",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 80,
"avg_line_length": 37.24770642201835,
"alnum_prop": 0.624692118226601,
"repo_name": "mohamedattahri/Nuages",
"id": "d53f4063c746570dfa1fc9d66eb2573ad47e0d38",
"size": "16240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nuages/http/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "51335"
}
],
"symlink_target": ""
}
|
try:
import hashlib
md5er = hashlib.md5
except ImportError, e:
import md5
md5er = md5.new
import optparse
import os
from os.path import abspath, join, dirname, basename, exists
import pickle
import re
import sys
import subprocess
import multiprocessing
from subprocess import PIPE
# Disabled LINT rules and reason.
# build/include_what_you_use: Started giving false positives for variables
# named "string" and "map" assuming that you needed to include STL headers.
ENABLED_LINT_RULES = """
build/class
build/deprecated
build/endif_comment
build/forward_decl
build/include_order
build/printf_format
build/storage_class
legal/copyright
readability/boost
readability/braces
readability/casting
readability/check
readability/constructors
readability/fn_size
readability/function
readability/multiline_comment
readability/multiline_string
readability/streams
readability/todo
readability/utf8
runtime/arrays
runtime/casting
runtime/deprecated_fn
runtime/explicit
runtime/int
runtime/memset
runtime/mutex
runtime/nonconf
runtime/printf
runtime/printf_format
runtime/references
runtime/rtti
runtime/sizeof
runtime/string
runtime/virtual
runtime/vlog
whitespace/blank_line
whitespace/braces
whitespace/comma
whitespace/comments
whitespace/ending_newline
whitespace/indent
whitespace/labels
whitespace/line_length
whitespace/newline
whitespace/operators
whitespace/parens
whitespace/tab
whitespace/todo
""".split()
LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
def CppLintWorker(command):
try:
process = subprocess.Popen(command, stderr=subprocess.PIPE)
process.wait()
out_lines = ""
error_count = -1
while True:
out_line = process.stderr.readline()
if out_line == '' and process.poll() != None:
break
m = LINT_OUTPUT_PATTERN.match(out_line)
if m:
out_lines += out_line
error_count += 1
sys.stderr.write(out_lines)
return error_count
except KeyboardInterrupt:
process.kill()
except:
print('Error running cpplint.py. Please make sure you have depot_tools' +
' in your $PATH. Lint check skipped.')
process.kill()
class FileContentsCache(object):
def __init__(self, sums_file_name):
self.sums = {}
self.sums_file_name = sums_file_name
def Load(self):
try:
sums_file = None
try:
sums_file = open(self.sums_file_name, 'r')
self.sums = pickle.load(sums_file)
except IOError:
# File might not exist, this is OK.
pass
finally:
if sums_file:
sums_file.close()
def Save(self):
try:
sums_file = open(self.sums_file_name, 'w')
pickle.dump(self.sums, sums_file)
finally:
sums_file.close()
def FilterUnchangedFiles(self, files):
changed_or_new = []
for file in files:
try:
handle = open(file, "r")
file_sum = md5er(handle.read()).digest()
if not file in self.sums or self.sums[file] != file_sum:
changed_or_new.append(file)
self.sums[file] = file_sum
finally:
handle.close()
return changed_or_new
def RemoveFile(self, file):
if file in self.sums:
self.sums.pop(file)
class SourceFileProcessor(object):
"""
Utility class that can run through a directory structure, find all relevant
files and invoke a custom check on the files.
"""
def Run(self, path):
all_files = []
for file in self.GetPathsToSearch():
all_files += self.FindFilesIn(join(path, file))
if not self.ProcessFiles(all_files, path):
return False
return True
def IgnoreDir(self, name):
return name.startswith('.') or name == 'data' or name == 'sputniktests'
def IgnoreFile(self, name):
return name.startswith('.')
def FindFilesIn(self, path):
result = []
for (root, dirs, files) in os.walk(path):
for ignored in [x for x in dirs if self.IgnoreDir(x)]:
dirs.remove(ignored)
for file in files:
if not self.IgnoreFile(file) and self.IsRelevant(file):
result.append(join(root, file))
return result
class CppLintProcessor(SourceFileProcessor):
"""
Lint files to check that they follow the google code style.
"""
def IsRelevant(self, name):
return name.endswith('.cc') or name.endswith('.h')
def IgnoreDir(self, name):
return (super(CppLintProcessor, self).IgnoreDir(name)
or (name == 'third_party'))
IGNORE_LINT = ['flag-definitions.h']
def IgnoreFile(self, name):
return (super(CppLintProcessor, self).IgnoreFile(name)
or (name in CppLintProcessor.IGNORE_LINT))
def GetPathsToSearch(self):
return ['src', 'preparser', 'include', 'samples', join('test', 'cctest')]
def ProcessFiles(self, files, path):
good_files_cache = FileContentsCache('.cpplint-cache')
good_files_cache.Load()
files = good_files_cache.FilterUnchangedFiles(files)
if len(files) == 0:
print 'No changes in files detected. Skipping cpplint check.'
return True
filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
command = ['cpplint.py', '--filter', filt]
local_cpplint = join(path, "tools", "cpplint.py")
if exists(local_cpplint):
command = ['python', local_cpplint, '--filter', filt]
commands = join([command + [file] for file in files])
count = multiprocessing.cpu_count()
pool = multiprocessing.Pool(count)
try:
results = pool.map_async(CppLintWorker, commands).get(999999)
except KeyboardInterrupt:
print "\nCaught KeyboardInterrupt, terminating workers."
sys.exit(1)
for i in range(len(files)):
if results[i] > 0:
good_files_cache.RemoveFile(files[i])
total_errors = sum(results)
print "Total errors found: %d" % total_errors
good_files_cache.Save()
return total_errors == 0
COPYRIGHT_HEADER_PATTERN = re.compile(
r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.')
class SourceProcessor(SourceFileProcessor):
"""
Check that all files include a copyright notice and no trailing whitespaces.
"""
RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', 'SConscript',
'SConstruct', '.status', '.gyp', '.gypi']
# Overwriting the one in the parent class.
def FindFilesIn(self, path):
if os.path.exists(path+'/.git'):
output = subprocess.Popen('git ls-files --full-name',
stdout=PIPE, cwd=path, shell=True)
result = []
for file in output.stdout.read().split():
for dir_part in os.path.dirname(file).split(os.sep):
if self.IgnoreDir(dir_part):
break
else:
if self.IsRelevant(file) and not self.IgnoreFile(file):
result.append(join(path, file))
if output.wait() == 0:
return result
return super(SourceProcessor, self).FindFilesIn(path)
def IsRelevant(self, name):
for ext in SourceProcessor.RELEVANT_EXTENSIONS:
if name.endswith(ext):
return True
return False
def GetPathsToSearch(self):
return ['.']
def IgnoreDir(self, name):
return (super(SourceProcessor, self).IgnoreDir(name)
or (name == 'third_party')
or (name == 'gyp')
or (name == 'out')
or (name == 'obj'))
IGNORE_COPYRIGHTS = ['cpplint.py',
'earley-boyer.js',
'raytrace.js',
'crypto.js',
'libraries.cc',
'libraries-empty.cc',
'jsmin.py',
'regexp-pcre.js']
IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
def ProcessContents(self, name, contents):
result = True
base = basename(name)
if not base in SourceProcessor.IGNORE_TABS:
if '\t' in contents:
print "%s contains tabs" % name
result = False
if not base in SourceProcessor.IGNORE_COPYRIGHTS:
if not COPYRIGHT_HEADER_PATTERN.search(contents):
print "%s is missing a correct copyright header." % name
result = False
ext = base.split('.').pop()
if ' \n' in contents or contents.endswith(' '):
line = 0
lines = []
parts = contents.split(' \n')
if not contents.endswith(' '):
parts.pop()
for part in parts:
line += part.count('\n') + 1
lines.append(str(line))
linenumbers = ', '.join(lines)
if len(lines) > 1:
print "%s has trailing whitespaces in lines %s." % (name, linenumbers)
else:
print "%s has trailing whitespaces in line %s." % (name, linenumbers)
result = False
return result
def ProcessFiles(self, files, path):
success = True
violations = 0
for file in files:
try:
handle = open(file)
contents = handle.read()
if not self.ProcessContents(file, contents):
success = False
violations += 1
finally:
handle.close()
print "Total violating files: %s" % violations
return success
def GetOptions():
result = optparse.OptionParser()
result.add_option('--no-lint', help="Do not run cpplint", default=False,
action="store_true")
return result
def Main():
workspace = abspath(join(dirname(sys.argv[0]), '..'))
parser = GetOptions()
(options, args) = parser.parse_args()
success = True
print "Running C++ lint check..."
if not options.no_lint:
success = CppLintProcessor().Run(workspace) and success
print "Running copyright header and trailing whitespaces check..."
success = SourceProcessor().Run(workspace) and success
if success:
return 0
else:
return 1
if __name__ == '__main__':
sys.exit(Main())
|
{
"content_hash": "9942d00089ab1ecdc8a13d66feabd552",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 81,
"avg_line_length": 27.52112676056338,
"alnum_prop": 0.6387922210849539,
"repo_name": "mogoweb/webkit_for_android5.1",
"id": "a5f4c614d04a800c816991bdc6a00f27101aa5f1",
"size": "11366",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "v8/tools/presubmit.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AppleScript",
"bytes": "6772"
},
{
"name": "Assembly",
"bytes": "26025"
},
{
"name": "Awk",
"bytes": "2800"
},
{
"name": "Batchfile",
"bytes": "57337"
},
{
"name": "C",
"bytes": "7713030"
},
{
"name": "C++",
"bytes": "153178707"
},
{
"name": "CMake",
"bytes": "192330"
},
{
"name": "CSS",
"bytes": "483041"
},
{
"name": "Common Lisp",
"bytes": "9920"
},
{
"name": "DIGITAL Command Language",
"bytes": "5243"
},
{
"name": "DTrace",
"bytes": "1931"
},
{
"name": "Go",
"bytes": "3744"
},
{
"name": "HTML",
"bytes": "14998422"
},
{
"name": "Java",
"bytes": "1522083"
},
{
"name": "JavaScript",
"bytes": "18008829"
},
{
"name": "Lex",
"bytes": "42554"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "M4",
"bytes": "49839"
},
{
"name": "Makefile",
"bytes": "476166"
},
{
"name": "Module Management System",
"bytes": "9756"
},
{
"name": "Objective-C",
"bytes": "2798053"
},
{
"name": "Objective-C++",
"bytes": "7846322"
},
{
"name": "PHP",
"bytes": "66595"
},
{
"name": "Perl",
"bytes": "1130475"
},
{
"name": "Perl 6",
"bytes": "445215"
},
{
"name": "Python",
"bytes": "5503045"
},
{
"name": "QML",
"bytes": "3331"
},
{
"name": "QMake",
"bytes": "294800"
},
{
"name": "R",
"bytes": "290"
},
{
"name": "Roff",
"bytes": "273562"
},
{
"name": "Ruby",
"bytes": "81928"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "488223"
},
{
"name": "Yacc",
"bytes": "153801"
},
{
"name": "xBase",
"bytes": "328"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from dipy.reconst.dti import fractional_anisotropy, color_fa
from scipy.ndimage.filters import median_filter
try:
from skimage.filters import threshold_otsu as otsu
except:
from .threshold import otsu
from scipy.ndimage import binary_dilation, generate_binary_structure
def multi_median(input, median_radius, numpass):
""" Applies median filter multiple times on input data.
Parameters
----------
input : ndarray
The input volume to apply filter on.
median_radius : int
Radius (in voxels) of the applied median filter
numpass: int
Number of pass of the median filter
Returns
-------
input : ndarray
Filtered input volume.
"""
outvol = np.zeros_like(input)
# Array representing the size of the median window in each dimension.
medarr = np.ones_like(input.shape) * ((median_radius * 2) + 1)
# Multi pass
for i in range(0, numpass):
median_filter(input, medarr, output=input)
return input
def applymask(vol, mask):
""" Mask vol with mask.
Parameters
----------
vol : ndarray
Array with $V$ dimensions
mask : ndarray
Binary mask. Has $M$ dimensions where $M <= V$. When $M < V$, we append
$V - M$ dimensions with axis length 1 to `mask` so that `mask` will
broadcast against `vol`. In the typical case `vol` can be 4D, `mask`
can be 3D, and we append a 1 to the mask shape which (via numpy
broadcasting) has the effect of appling the 3D mask to each 3D slice in
`vol` (``vol[..., 0]`` to ``vol[..., -1``).
Returns
-------
masked_vol : ndarray
`vol` multiplied by `mask` where `mask` may have been extended to match
extra dimensions in `vol`
"""
mask = mask.reshape(mask.shape + (vol.ndim - mask.ndim) * (1,))
return vol * mask
def bounding_box(vol):
""" Compute the bounding box of nonzero intensity voxels in the volume.
Parameters
----------
vol : ndarray
Volume to compute bounding box on.
Returns
-------
npmins : list
Array containg minimum index of each dimension
npmaxs : list
Array containg maximum index of each dimension
"""
# Find bounds on first dimension
temp = vol
for i in range(vol.ndim - 1):
temp = temp.any(-1)
mins = [temp.argmax()]
maxs = [len(temp) - temp[::-1].argmax()]
# Check that vol is not all 0
if mins[0] == 0 and temp[0] == 0:
warn('No data found in volume to bound. Returning empty bounding box.')
return [0] * vol.ndim, [0] * vol.ndim
# Find bounds on remaining dimensions
if vol.ndim > 1:
a, b = bounding_box(vol.any(0))
mins.extend(a)
maxs.extend(b)
return mins, maxs
def crop(vol, mins, maxs):
""" Crops the input volume.
Parameters
----------
vol : ndarray
Volume to crop.
mins : array
Array containg minimum index of each dimension.
maxs : array
Array containg maximum index of each dimension.
Returns
-------
vol : ndarray
The cropped volume.
"""
return vol[tuple(slice(i, j) for i, j in zip(mins, maxs))]
def median_otsu(input_volume, median_radius=4, numpass=4,
autocrop=False, vol_idx=None, dilate=None):
""" Simple brain extraction tool method for images from DWI data
It uses a median filter smoothing of the input_volumes `vol_idx` and an
automatic histogram Otsu thresholding technique, hence the name
*median_otsu*.
This function is inspired from Mrtrix's bet which has default values
``median_radius=3``, ``numpass=2``. However, from tests on multiple 1.5T
and 3T data from GE, Philips, Siemens, the most robust choice is
``median_radius=4``, ``numpass=4``.
Parameters
----------
input_volume : ndarray
ndarray of the brain volume
median_radius : int
Radius (in voxels) of the applied median filter(default 4)
numpass: int
Number of pass of the median filter (default 4)
autocrop: bool, optional
if True, the masked input_volume will also be cropped using the bounding
box defined by the masked data. Should be on if DWI is upsampled to 1x1x1
resolution. (default False)
vol_idx : None or array, optional
1D array representing indices of ``axis=3`` of a 4D `input_volume`
None (the default) corresponds to ``(0,)`` (assumes first volume in 4D array)
dilate : None or int, optional
number of iterations for binary dilation
Returns
-------
maskedvolume : ndarray
Masked input_volume
mask : 3D ndarray
The binary brain mask
"""
if len(input_volume.shape) == 4:
if vol_idx is not None:
b0vol = np.mean(input_volume[..., tuple(vol_idx)], axis=3)
else:
b0vol = input_volume[..., 0].copy()
else:
b0vol = input_volume.copy()
# Make a mask using a multiple pass median filter and histogram thresholding.
mask = multi_median(b0vol, median_radius, numpass)
thresh = otsu(mask)
mask = mask > thresh
if dilate is not None:
cross = generate_binary_structure(3, 1)
mask = binary_dilation(mask, cross, iterations=dilate)
# Auto crop the volumes using the mask as input_volume for bounding box computing.
if autocrop:
mins, maxs = bounding_box(mask)
mask = crop(mask, mins, maxs)
croppedvolume = crop(input_volume, mins, maxs)
maskedvolume = applymask(croppedvolume, mask)
else:
maskedvolume = applymask(input_volume, mask)
return maskedvolume, mask
def segment_from_cfa(tensor_fit, roi, threshold, return_cfa=False):
"""
Segment the cfa inside roi using the values from threshold as bounds.
Parameters
-------------
tensor_fit : TensorFit object
TensorFit object
roi : ndarray
A binary mask, which contains the bounding box for the segmentation.
threshold : array-like
An iterable that defines the min and max values to use for the thresholding.
The values are specified as (R_min, R_max, G_min, G_max, B_min, B_max)
return_cfa : bool, optional
If True, the cfa is also returned.
Returns
----------
mask : ndarray
Binary mask of the segmentation.
cfa : ndarray, optional
Array with shape = (..., 3), where ... is the shape of tensor_fit.
The color fractional anisotropy, ordered as a nd array with the last
dimension of size 3 for the R, G and B channels.
"""
FA = fractional_anisotropy(tensor_fit.evals)
FA[np.isnan(FA)] = 0
FA = np.clip(FA, 0, 1) # Clamp the FA to remove degenerate tensors
cfa = color_fa(FA, tensor_fit.evecs)
roi = np.asarray(roi, dtype=bool)
include = (cfa >= threshold[0::2]) & (cfa <= threshold[1::2]) & roi[..., None]
mask = np.all(include, axis=-1)
if return_cfa:
return mask, cfa
return mask
def clean_cc_mask(mask):
"""
Cleans a segmentation of the corpus callosum so no random pixels are included.
Parameters
----------
mask : ndarray
Binary mask of the coarse segmentation.
Returns
-------
new_cc_mask : ndarray
Binary mask of the cleaned segmentation.
"""
from scipy.ndimage.measurements import label
new_cc_mask = np.zeros(mask.shape)
# Flood fill algorithm to find contiguous regions.
labels, numL = label(mask)
volumes = [len(labels[np.where(labels == l_idx+1)]) for l_idx in np.arange(numL)]
biggest_vol = np.arange(numL)[np.where(volumes == np.max(volumes))] + 1
new_cc_mask[np.where(labels == biggest_vol)] = 1
return new_cc_mask
|
{
"content_hash": "fbd61beed04bc769b7e856429550d023",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 86,
"avg_line_length": 30.28352490421456,
"alnum_prop": 0.6266447368421053,
"repo_name": "JohnGriffiths/dipy",
"id": "fd3f2736e46bb25c9081f9b0cf00a99c4ed00772",
"size": "7904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dipy/segment/mask.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2844"
},
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Python",
"bytes": "2655097"
}
],
"symlink_target": ""
}
|
"""
@file
@brief Shortcut for ``setuphelper``
"""
from .setup_creation import create_empty_folder_setup, create_folder_setup
|
{
"content_hash": "bed80f4d0266e87ac599547a273457f4",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 74,
"avg_line_length": 21,
"alnum_prop": 0.7380952380952381,
"repo_name": "sdpython/pymyinstall",
"id": "cd0675f52d831e1b8f89e7f617d8cacf768dd6e6",
"size": "126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pymyinstall/setuphelper/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "19179"
},
{
"name": "HTML",
"bytes": "1294549"
},
{
"name": "Inno Setup",
"bytes": "7565"
},
{
"name": "Julia",
"bytes": "688"
},
{
"name": "Jupyter Notebook",
"bytes": "38720"
},
{
"name": "Python",
"bytes": "2387148"
},
{
"name": "R",
"bytes": "4370"
},
{
"name": "Shell",
"bytes": "623"
}
],
"symlink_target": ""
}
|
import simuvex
class setvbuf(simuvex.SimProcedure):
def run(self, stream, buf, type_, size):
return 0
|
{
"content_hash": "9d27109a38a49ae11907a8697c4a65c6",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 44,
"avg_line_length": 23,
"alnum_prop": 0.6782608695652174,
"repo_name": "zhuyue1314/simuvex",
"id": "a71640edf4695b00f47abc642d449160f93c4bb5",
"size": "115",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "simuvex/procedures/libc___so___6/setvbuf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "543807"
}
],
"symlink_target": ""
}
|
"""
To change the version of entire package, just edit this one location.
"""
__title__ = 'newspaper'
__author__ = 'Lucas Ou-Yang'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014, Lucas Ou-Yang'
version_info = (0, 2, 2)
__version__ = ".".join(map(str, version_info))
|
{
"content_hash": "c7519d2001839fbd8dbc9fa77461ec2b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 69,
"avg_line_length": 27.2,
"alnum_prop": 0.6323529411764706,
"repo_name": "Factr/newspaper",
"id": "aa77537ed05c18d1443a1657cab3d1330fedac43",
"size": "296",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "newspaper/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "159393"
}
],
"symlink_target": ""
}
|
from matplotlib.colors import Normalize,LinearSegmentedColormap,cbook,ma
from matplotlib.cm import datad
import numpy as np
"""These functions allow for the creation of 'divergent' colorbars
out of *any* two existing matplotlib colorbars. They are
stitched together such that the split between the two colorbars
always happens at zero.
Usage
-----
from densityplot import *
from pylab import *
#create some data to plot
x = arange(0, pi, 0.1)
y = arange(0, 2*pi, 0.1)
X, Y = meshgrid(x,y)
Z = cos(X) * sin(Y) * 10
#stitch together two colorbars
#(the second colorbar is automatically reversed)
dub_cm=mk_dub_color('cubehelix_purple','cubehelix_green')
#set the normalization such that the split is at zero
n=MidNorm(vmax=10,vmin=-5)
#use this colorbar and normiazation to plot the image
imshow(Z, interpolation='nearest', cmap=dub_cm, norm=n, vmax=10, vmin=-15)
colorbar()
show()
"""
class MidNorm(Normalize):
"""A subclass on Normalize to map all pisitive values to
the range [.5,1] and all negitve valeus to the range
[0,.5). This means 0 will always be mapped to the
*middle* of a colorbar.
Usage:
from desnityplot import *
#make a normialization that maps [-5,10] to
#[0,1] such that 0 is mapped to 0.5
n=MidNorm(vmax=10,vmin=-5)
"""
def __init__(self,vmin=None,vmax=None,clip=False):
Normalize.__init__(self,vmin,vmax,clip)
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > 0:
raise ValueError("minvalue must be less than 0")
if vmax < 0:
raise ValueError("maxvalue must be more than 0")
elif vmin == vmax:
result.fill(0) # Or should it be all masked? Or 0.5?
else:
vmin = float(vmin)
vmax = float(vmax)
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# ma division is very slow; we can take a shortcut
resdat = result.data
resdat[resdat>0] /= vmax
resdat[resdat<0] /= -vmin
resdat=resdat/2.+0.5
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
val=2*(val-0.5)
val[val>0]*=vmax
val[val<0]*=-vmin
return val
else:
if val<0.5:
return 2*val*(-vmin)
else:
return val*vmax
def mk_dub_color(cmHigh,cmLow):
"""
This function takes two matplotlib colomaps and makes a single
colorbar out of them.
Parameters
----------
cmHigh : name of mpl colormap (as string) to be used for
high values (normalized values > 0.5)
*note* this colorbar will be reversed
cmLow : name of mpl colormap (as string) to be used for
low values (normalized valeus < 0.5)
Return
------
dub_color : matplotlib colormap
Usage
-----
from densityplot import *
from pylab import *
dub_cm=mk_dub_color('cubehelix_purple','cubehelix_green')
"""
cH=datad[cmHigh] #get the color dict for cmHigh
cL=datad[cmLow] #get the color dict for cmLow
norm_high=Normalize(vmin=0.5,vmax=1.0)
norm_low=Normalize(vmin=0.0,vmax=0.5)
def dub_color_get(cH,cL,key):
def color(x):
hdx=(x>=0.5)
ldx=(x<0.5)
out=np.zeros(len(x))
if hdx.any():
xn=norm_high(x[hdx])
xn=abs(xn-1) #invert this color map so it goes white to black
out[hdx]=cH[key](xn)
if ldx.any():
xn=norm_low(x[ldx])
out[ldx]=cL[key](xn)
return out
return color
dub_color_dict={'red':dub_color_get(cH,cL,'red'),
'green':dub_color_get(cH,cL,'green'),
'blue':dub_color_get(cH,cL,'blue')}
return LinearSegmentedColormap('dub_color',dub_color_dict,256)
if __name__=='__main__':
from custom_cm import *
from pylab import *
#create some data to plot
x = arange(0, pi, 0.1)
y = arange(0, 2*pi, 0.1)
X, Y = meshgrid(x,y)
Z = cos(X) * sin(Y) * 10
#stitch together two colorbars
#(the second colorbar is automatically reversed)
dub_cm=mk_dub_color('cubehelix_purple','cubehelix_green')
#set the normalization such that the split is at zero
n=MidNorm(vmax=10,vmin=-5)
#use this colorbar and normiazation to plot the image
imshow(Z, interpolation='nearest', cmap=dub_cm, norm=n, vmax=10, vmin=-15)
colorbar()
show()
|
{
"content_hash": "cd3d33744b699f1cac716a8709d67bba",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 78,
"avg_line_length": 32.88535031847134,
"alnum_prop": 0.5800890954871198,
"repo_name": "CKrawczyk/densityplot",
"id": "bb574636eb21e83a832690c2699e10fde89c4387",
"size": "5163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "densityplot/new_norm.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47856"
}
],
"symlink_target": ""
}
|
"""Tests for the mobile_app HTTP API."""
# pylint: disable=redefined-outer-name,unused-import
import pytest
from homeassistant.components.mobile_app.const import CONF_SECRET, DOMAIN
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.setup import async_setup_component
from .const import REGISTER, RENDER_TEMPLATE
async def test_registration(hass, hass_client):
"""Test that registrations happen."""
try:
# pylint: disable=unused-import
from nacl.secret import SecretBox # noqa: F401
from nacl.encoding import Base64Encoder # noqa: F401
except (ImportError, OSError):
pytest.skip("libnacl/libsodium is not installed")
return
import json
await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
api_client = await hass_client()
resp = await api_client.post(
'/api/mobile_app/registrations', json=REGISTER
)
assert resp.status == 201
register_json = await resp.json()
assert CONF_WEBHOOK_ID in register_json
assert CONF_SECRET in register_json
entries = hass.config_entries.async_entries(DOMAIN)
assert entries[0].data['app_data'] == REGISTER['app_data']
assert entries[0].data['app_id'] == REGISTER['app_id']
assert entries[0].data['app_name'] == REGISTER['app_name']
assert entries[0].data['app_version'] == REGISTER['app_version']
assert entries[0].data['device_name'] == REGISTER['device_name']
assert entries[0].data['manufacturer'] == REGISTER['manufacturer']
assert entries[0].data['model'] == REGISTER['model']
assert entries[0].data['os_name'] == REGISTER['os_name']
assert entries[0].data['os_version'] == REGISTER['os_version']
assert entries[0].data['supports_encryption'] == \
REGISTER['supports_encryption']
keylen = SecretBox.KEY_SIZE
key = register_json[CONF_SECRET].encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b'\0')
payload = json.dumps(RENDER_TEMPLATE['data']).encode("utf-8")
data = SecretBox(key).encrypt(payload,
encoder=Base64Encoder).decode("utf-8")
container = {
'type': 'render_template',
'encrypted': True,
'encrypted_data': data,
}
resp = await api_client.post(
'/api/webhook/{}'.format(register_json[CONF_WEBHOOK_ID]),
json=container
)
assert resp.status == 200
webhook_json = await resp.json()
assert 'encrypted_data' in webhook_json
decrypted_data = SecretBox(key).decrypt(webhook_json['encrypted_data'],
encoder=Base64Encoder)
decrypted_data = decrypted_data.decode("utf-8")
assert json.loads(decrypted_data) == {'one': 'Hello world'}
|
{
"content_hash": "fbf0ac789890a0ae9fdb6767c2b5e0d9",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 75,
"avg_line_length": 33.901234567901234,
"alnum_prop": 0.6504005826656956,
"repo_name": "jabesq/home-assistant",
"id": "80f01315f705e9390b023e242edf44fe9227abc8",
"size": "2746",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/mobile_app/test_http_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16238292"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17615"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from freight.api.serializer import serialize
from freight.testutils import TestCase
class TaskSerializerTest(TestCase):
def test_locked(self):
user = self.create_user()
repo = self.create_repo()
app = self.create_app(repository=repo)
result = serialize(app)
assert result['id'] == str(app.id)
assert result['name'] == app.name
|
{
"content_hash": "3a876dc8719fdbff3e1b1079ad278182",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 46,
"avg_line_length": 28.133333333333333,
"alnum_prop": 0.6635071090047393,
"repo_name": "klynton/freight",
"id": "6c02cecebce6f1aec33d2fc48a25f82fe540895c",
"size": "422",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/api/serializer/test_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6955"
},
{
"name": "HTML",
"bytes": "918"
},
{
"name": "JavaScript",
"bytes": "23744"
},
{
"name": "Makefile",
"bytes": "808"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "166440"
},
{
"name": "Shell",
"bytes": "395"
}
],
"symlink_target": ""
}
|
import argparse
import sympy
import sys
from sympy import Symbol as Sym
class DuoSymbol:
def __init__(self, name, value):
self.name = name
self.value = float(value)
def __repr__(self):
return "DuoSymbol('{}', {})".format(self.name, self.value)
class KinematicEquation:
def __init__(self, func, variables):
self.func = func
self.variables = variables
def solve(self, symbols, for_var):
solved = sympy.solve(self.func, for_var)
vals = [(sym.name, sym.value) for sym in symbols]
return [eq.subs(vals) for eq in solved]
def __str__(self):
return str(self.func)
def solve(symbols, looking_for):
def find_suitable_equation(variables):
for equation in equations:
if equation.variables <= variables:
return equation
variables = {sym.name for sym in symbols}
variables.add(looking_for)
eq = find_suitable_equation(variables)
if eq:
return eq.solve(symbols, looking_for)
raise ValueError
equations = set()
equations.add(KinematicEquation(
0.5 * Sym('a') * (Sym('t') ** 2) + (Sym('Vi') * Sym('t')) - Sym('Dx'),
set(['a', 't', 'Vi', 'Dx'])))
equations.add(KinematicEquation(
0.5 * (Sym('Vf') + Sym('Vi')) * Sym('t') - Sym('Dx'),
set(['Vf', 'Vi', 'Dx', 't'])))
equations.add(KinematicEquation(
Sym('Vi') + Sym('a') * Sym('t') - Sym('Vf'),
set(['Vf', 'Vi', 'a', 't'])))
equations.add(KinematicEquation(
Sym('Vi') ** 2 + 2 * Sym('a') * Sym('Dx') - Sym('Vf') ** 2,
set(['Vi', 'Dx', 'a', 'Vf'])))
parser = argparse.ArgumentParser()
parser.add_argument('variables', nargs='+', help='t=3.2')
parser.add_argument('find', help='a')
args = parser.parse_args()
symbols = set()
for var in args.variables:
name, value = var.split('=')
symbol = DuoSymbol(name, value)
symbols.add(symbol)
try:
result = solve(symbols, args.find)
except ValueError:
print('Not possible with given values')
sys.exit(1)
for eq in result:
print(eq)
|
{
"content_hash": "c61c5e4d46e0691d24ca4a27649d718d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 71,
"avg_line_length": 24.324675324675326,
"alnum_prop": 0.6449546182594768,
"repo_name": "kochman/kinematic",
"id": "1c236a6a891ebc09446f7c5a5de17e68bdbce23e",
"size": "1897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kinematic.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1897"
}
],
"symlink_target": ""
}
|
import os, sys
import imp
from authorizenet import apicontractsv1
from authorizenet.apicontrollers import *
constants = imp.load_source('modulename', 'constants.py')
from decimal import *
def approve_or_decline_held_transaction(transactionId):
merchantAuth = apicontractsv1.merchantAuthenticationType()
merchantAuth.name = constants.apiLoginId
merchantAuth.transactionKey = constants.transactionKey
requesttype = apicontractsv1.heldTransactionRequestType()
requesttype.action = "approve"
requesttype.refTransId = transactionId
transactionrequest = apicontractsv1.updateHeldTransactionRequest()
transactionrequest.merchantAuthentication = merchantAuth
transactionrequest.heldTransactionRequest = requesttype
transactionRequestController = updateHeldTransactionController(transactionrequest)
transactionRequestController.execute()
response = transactionRequestController.getresponse()
if response is not None:
if response.messages.resultCode == "Ok":
if hasattr(response.transactionResponse, 'messages') == True:
print ('Successfully updated transaction with Transaction ID: %s' % response.transactionResponse.transId)
print ('Transaction Response Code: %s' % response.transactionResponse.responseCode)
print ('Message Code: %s' % response.transactionResponse.messages.message[0].code)
print ('Description: %s' % response.transactionResponse.messages.message[0].description)
else:
print ('Failed Transaction.')
if hasattr(response.transactionResponse, 'errors') == True:
print ('Error Code: %s' % str(response.transactionResponse.errors.error[0].errorCode))
print ('Error message: %s' % response.transactionResponse.errors.error[0].errorText)
else:
print ('Failed Transaction.')
if hasattr(response, 'transactionResponse') == True and hasattr(response.transactionResponse, 'errors') == True:
print ('Error Code: %s' % str(response.transactionResponse.errors.error[0].errorCode))
print ('Error message: %s' % response.transactionResponse.errors.error[0].errorText)
else:
print ('Error Code: %s' % response.messages.message[0]['code'].text)
print ('Error message: %s' % response.messages.message[0]['text'].text)
else:
print ('Null Response.')
return response
if(os.path.basename(__file__) == os.path.basename(sys.argv[0])):
update_held_transaction(constants.transactionId)
|
{
"content_hash": "4409e6e5abf390f69785227fd9bd1cf7",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 124,
"avg_line_length": 50.056603773584904,
"alnum_prop": 0.6871466264606106,
"repo_name": "AuthorizeNet/sample-code-python",
"id": "ce9947e8585b5a3031fcb44a808f8a5c219dbdca",
"size": "2653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FraudManagement/approve-or-decline-held-transaction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "212236"
}
],
"symlink_target": ""
}
|
import csv
import json
import time
import itertools
from datetime import timedelta
from django import http
from django.core.exceptions import PermissionDenied
from django.core.files.storage import get_storage_class
from django.db.transaction import non_atomic_requests
from django.http import HttpResponse
from django.template import loader
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.cache import add_never_cache_headers, patch_cache_control
from django.utils.encoding import force_str
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.amo.decorators import allow_cross_site_request
from olympia.amo.utils import AMOJSONEncoder
from olympia.core.languages import ALL_LANGUAGES
from olympia.stats.decorators import addon_view_stats, bigquery_api_view
from olympia.stats.forms import DateForm
from .utils import get_updates_series, get_download_series
logger = olympia.core.logger.getLogger('z.apps.stats.views')
SERIES_GROUPS = ('day', 'week', 'month')
SERIES_GROUPS_DATE = ('date', 'week', 'month') # Backwards compat.
SERIES_FORMATS = ('json', 'csv')
SERIES = (
'downloads',
'usage',
'overview',
'sources',
'os',
'locales',
'versions',
'apps',
'countries',
'mediums',
'contents',
'campaigns',
)
storage = get_storage_class()()
def csv_fields(series):
"""
Figure out all the keys in the `data` dict for csv columns.
Returns (series, fields). The series only contains the `data` dicts, plus
`count` and `date` from the top level.
"""
rv = []
fields = set()
for row in series:
fields.update(row['data'])
rv.append(row['data'])
row['data'].update(count=row['count'], date=row['date'])
# Sort the fields before returning them - we don't care much about column
# ordering, but it helps make the tests stable.
return rv, sorted(fields, key=lambda field: '' if not field else field)
def extract(dicts):
"""Turn a list of dicts like we store in ES into one big dict.
Also works if the list of dicts is nested inside another dict.
>>> extract([{'k': 'a', 'v': 1}, {'k': 'b', 'v': 2}])
{'a': 1, 'b': 2}
>>> extract({'k': 'a', 'v': 1})
{'a': 1}
>>> extract([{'mykey': [{'k': 'a', 'v': 1}, {'k': 'b', 'v': 2}]}])
{'mykey': {'a': 1, 'b': 2}}
>>> extract({'mykey': [{'k': 'a', 'v': 1}, {'k': 'b', 'v': 2}]})
{'mykey': {'a': 1, 'b': 2}}
>>> extract([{'mykey': {'k': 'a', 'v': 1}}])
{'mykey': {'a': 1}}
>>> extract({'mykey': {'k': 'a', 'v': 1}})
{'mykey': {'a': 1}}
"""
def _extract_value(data):
# We are already dealing with a dict. If it has 'k' and 'v' keys,
# then we can just return that.
if 'k' in data and 'v' in data:
return ((data['k'], data['v']),)
# Otherwise re-extract the value.
return ((k, extract(v)) for k, v in data.items())
if hasattr(dicts, 'items'):
# If it's already a dict, we just need to call extract_value which will
# iterate if necessary.
return dict(_extract_value(dicts))
extracted = {}
for d in dicts:
extracted.update(extract(d))
return extracted
@bigquery_api_view
@addon_view_stats
@non_atomic_requests
def overview_series(request, addon, group, start, end, format):
"""Combines downloads_series and updates_series into one payload."""
date_range = check_series_params_or_404(group, start, end, format)
start_date, end_date = date_range
check_stats_permission(request, addon)
downloads = get_download_series(
addon=addon, start_date=start_date, end_date=end_date
)
updates = get_updates_series(addon=addon, start_date=start_date, end_date=end_date)
series = zip_overview(downloads, updates)
return render_json(request, addon, series)
def zip_overview(downloads, updates):
# Jump through some hoops to make sure we're matching dates across download
# and update series and inserting zeroes for any missing days.
downloads, updates = list(downloads), list(updates)
if not (downloads or updates):
return
start_date = None
if downloads:
start_date = downloads[0]['date']
if updates:
d = updates[0]['date']
start_date = max(start_date, d) if start_date else d
downloads, updates = iter(downloads), iter(updates)
def iterator(series):
try:
item = next(series)
next_date = start_date
while True:
if item['date'] == next_date:
yield item['count']
item = next(series)
else:
yield 0
next_date = next_date - timedelta(days=1)
except StopIteration:
pass
series = itertools.zip_longest(iterator(downloads), iterator(updates))
for idx, (dl_count, up_count) in enumerate(series):
yield {
'date': start_date - timedelta(days=idx),
'data': {'downloads': dl_count, 'updates': up_count},
}
@bigquery_api_view
@addon_view_stats
@non_atomic_requests
def downloads_series(request, addon, group, start, end, format):
"""Generate download counts grouped by ``group`` in ``format``."""
date_range = check_series_params_or_404(group, start, end, format)
start_date, end_date = date_range
check_stats_permission(request, addon)
series = get_download_series(addon=addon, start_date=start_date, end_date=end_date)
if format == 'csv':
return render_csv(request, addon, series, ['date', 'count'])
elif format == 'json':
return render_json(request, addon, series)
@bigquery_api_view
@addon_view_stats
@non_atomic_requests
def download_breakdown_series(request, addon, group, start, end, format, source):
"""Generate download source breakdown."""
date_range = check_series_params_or_404(group, start, end, format)
start_date, end_date = date_range
check_stats_permission(request, addon)
series = get_download_series(
addon=addon,
start_date=start_date,
end_date=end_date,
source=source,
)
series = rename_unknown_values(series)
if format == 'csv':
series, fields = csv_fields(series)
return render_csv(request, addon, series, ['date', 'count'] + list(fields))
elif format == 'json':
return render_json(request, addon, series)
def rename_unknown_values(series):
"""Rename 'Unknown' values to '(none)' for download stats."""
for row in series:
if 'data' in row:
row['data'] = dict(
('(none)', count) if key == 'Unknown' else (key, count)
for key, count in row['data'].items()
)
yield row
@bigquery_api_view
@addon_view_stats
@non_atomic_requests
def usage_series(request, addon, group, start, end, format):
"""Generate ADU counts grouped by ``group`` in ``format``."""
date_range = check_series_params_or_404(group, start, end, format)
check_stats_permission(request, addon)
series = get_updates_series(
addon=addon, start_date=date_range[0], end_date=date_range[1]
)
if format == 'csv':
return render_csv(request, addon, series, ['date', 'count'])
elif format == 'json':
return render_json(request, addon, series)
@bigquery_api_view
@addon_view_stats
@non_atomic_requests
def usage_breakdown_series(request, addon, group, start, end, format, field):
"""Generate ADU breakdown of ``field``."""
date_range = check_series_params_or_404(group, start, end, format)
check_stats_permission(request, addon)
fields = {
'applications': 'apps',
'countries': 'countries',
'locales': 'locales',
'oses': 'os',
'statuses': 'status',
'versions': 'versions',
}
source = fields[field]
series = get_updates_series(
addon=addon, start_date=date_range[0], end_date=date_range[1], source=source
)
if field == 'locales':
series = process_locales(series)
if format == 'csv':
if field == 'applications':
series = flatten_applications(series)
series, fields = csv_fields(series)
return render_csv(request, addon, series, ['date', 'count'] + list(fields))
elif format == 'json':
return render_json(request, addon, series)
def flatten_applications(series):
"""Convert app guids to pretty names, flatten count structure."""
for row in series:
if 'data' in row:
new = {}
for app, versions in row['data'].items():
app = amo.APP_GUIDS.get(app)
if not app:
continue
# str() to decode the gettext proxy.
appname = str(app.pretty)
for ver, count in versions.items():
key = ' '.join([appname, ver])
new[key] = count
row['data'] = new
yield row
def process_locales(series):
"""Convert locale codes to pretty names, skip any unknown locales."""
languages = {key.lower(): value['native'] for key, value in ALL_LANGUAGES.items()}
for row in series:
if 'data' in row:
new = {}
for key, count in row['data'].items():
if key and key.lower() in languages:
k = f'{languages[key.lower()]} ({key})'
new[k] = count
row['data'] = new
yield row
def check_series_params_or_404(group, start, end, format):
"""Check common series parameters."""
if (group not in SERIES_GROUPS) or (format not in SERIES_FORMATS):
raise http.Http404
return get_daterange_or_404(start, end)
def check_stats_permission(request, addon):
"""
Check if user is allowed to view stats for ``addon``.
Raises PermissionDenied if user is not allowed.
Raises Http404 if ``addon`` does not have stats pages.
"""
user = request.user
if addon.type not in amo.ADDON_TYPES_WITH_STATS:
raise http.Http404
can_view = user.is_authenticated and (
addon.has_author(user)
or acl.action_allowed_for(request.user, amo.permissions.STATS_VIEW)
)
if not can_view:
raise PermissionDenied
@addon_view_stats
@non_atomic_requests
def stats_report(request, addon, report):
check_stats_permission(request, addon)
slug_or_id = addon.id if addon.is_deleted else addon.slug
stats_base_url = reverse('stats.overview', args=[slug_or_id])
view = get_report_view(request)
return TemplateResponse(
request,
'stats/reports/%s.html' % report,
context={
'addon': addon,
'report': report,
'stats_base_url': stats_base_url,
'view': view,
},
)
def get_report_view(request):
"""Parse and validate a pair of YYYMMDD date strings."""
dates = DateForm(data=request.GET)
if not dates.is_valid():
logger.info('Dates parsed were not valid.')
return {}
if dates.cleaned_data.get('start') and dates.cleaned_data.get('end'):
return {
'range': 'custom',
'start': dates.cleaned_data['start'].strftime('%Y%m%d'),
'end': dates.cleaned_data['end'].strftime('%Y%m%d'),
}
elif dates.cleaned_data.get('last'):
return {
'range': dates.cleaned_data['last'],
'last': str(dates.cleaned_data['last']) + ' days',
}
logger.info('Missing "start and end" or "last"')
return {}
def get_daterange_or_404(start, end):
"""Parse and validate a pair of YYYYMMDD date strings."""
dates = DateForm(data={'start': start, 'end': end})
if not dates.is_valid():
logger.info('Dates parsed were not valid.')
raise http.Http404
return (dates.cleaned_data['start'], dates.cleaned_data['end'])
def fudge_headers(response, stats):
"""Alter cache headers. Don't cache content where data could be missing."""
if not stats:
add_never_cache_headers(response)
else:
seven_days = 60 * 60 * 24 * 7
patch_cache_control(response, max_age=seven_days)
@allow_cross_site_request
@non_atomic_requests
def render_csv(request, addon, stats, fields, title=None, show_disclaimer=None):
"""Render a stats series in CSV."""
# Start with a header from the template.
ts = time.strftime('%c %z')
context = {
'addon': addon,
'timestamp': ts,
'title': title,
'show_disclaimer': show_disclaimer,
}
content = loader.render_to_string('stats/csv_header.txt', context, request=request)
response = HttpResponse(content, content_type='text/csv; charset=utf-8')
# Add CSV content by writing directly to the response.
writer = csv.DictWriter(response, fields, restval=0, extrasaction='ignore')
writer.writeheader()
writer.writerows(stats)
fudge_headers(response, stats)
return response
@allow_cross_site_request
@non_atomic_requests
def render_json(request, addon, stats):
"""Render a stats series in JSON."""
response = http.HttpResponse(content_type='application/json')
# Django's encoder supports date and datetime.
json.dump(stats, response, cls=AMOJSONEncoder)
fudge_headers(response, force_str(response.content) != json.dumps([]))
return response
|
{
"content_hash": "2b64cdcf4f204d2f7f608ce857ed66b8",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 87,
"avg_line_length": 31.301624129930396,
"alnum_prop": 0.6150767178118746,
"repo_name": "wagnerand/addons-server",
"id": "0fac8073bb2215fda2b2f3236d110165b92ca252",
"size": "13491",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/olympia/stats/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245987"
},
{
"name": "Dockerfile",
"bytes": "3900"
},
{
"name": "HTML",
"bytes": "290334"
},
{
"name": "JavaScript",
"bytes": "749163"
},
{
"name": "Less",
"bytes": "211386"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6780019"
},
{
"name": "Shell",
"bytes": "8638"
},
{
"name": "Smarty",
"bytes": "1261"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django import VERSION as django_version
from django import forms
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from .utils import get_icon_choices
CHOICES = get_icon_choices()
class IconWidget(forms.Select):
def __init__(self, attrs=None):
super(IconWidget, self).__init__(attrs, choices=CHOICES)
if django_version >= (1, 11):
def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
option = super(IconWidget, self).create_option(name, value, label, selected, index, subindex=subindex, attrs=attrs)
option["attrs"]["data-icon"] = value
return option
else:
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
return format_html('<option data-icon="{0}" value="{0}"{1}>{2}</option>',
option_value,
selected_html,
force_text(option_label),
)
class Media:
js = (
'fontawesome/js/django_fontawesome.js',
'fontawesome/select2/select2.min.js'
)
css = {
'all': (
getattr(settings, 'FONTAWESOME_CSS_URL', 'fontawesome/css/font-awesome.min.css'),
'fontawesome/select2/select2.css',
'fontawesome/select2/select2-bootstrap.css'
)
}
|
{
"content_hash": "4dd9c2f58131f315375a61697e49ae26",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 127,
"avg_line_length": 36,
"alnum_prop": 0.5893939393939394,
"repo_name": "redouane/django-fontawesome",
"id": "3589cddb4d11f4cc0104f173cf3f41f24970528d",
"size": "1980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fontawesome/widgets.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2019"
},
{
"name": "Python",
"bytes": "7419"
}
],
"symlink_target": ""
}
|
"""
Manage a single benchmark and, when run from the commandline, report
its runtime to stdout.
"""
# !!!!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!
# This file, unlike most others, must be compatible with as many
# versions of Python as possible and have no dependencies outside of
# the Python standard library. This is the only bit of code from asv
# that is imported into the benchmarking process.
import copy
import cProfile as profile
import ctypes
from ctypes.util import find_library
import errno
import imp
import inspect
import json
import os
import re
import sys
import textwrap
import timeit
# The best timer we can use is time.process_time, but it is not
# available in the Python stdlib until Python 3.3. This is a ctypes
# backport for Pythons that don't have it.
try:
from time import process_time
except ImportError: # Python <3.3
if sys.platform.startswith("linux"):
CLOCK_PROCESS_CPUTIME_ID = 2 # time.h
clockid_t = ctypes.c_int
time_t = ctypes.c_long
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', time_t), # seconds
('tv_nsec', ctypes.c_long) # nanoseconds
]
_clock_gettime = ctypes.CDLL(
find_library('rt'), use_errno=True).clock_gettime
_clock_gettime.argtypes = [clockid_t, ctypes.POINTER(timespec)]
def process_time():
tp = timespec()
if _clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ctypes.byref(tp)) < 0:
err = ctypes.get_errno()
msg = errno.errorcode[err]
if err == errno.EINVAL:
msg += (
"The clk_id (4) specified is not supported on this system")
raise OSError(err, msg)
return tp.tv_sec + tp.tv_nsec * 1e-9
elif sys.platform == 'darwin':
RUSAGE_SELF = 0 # sys/resources.h
time_t = ctypes.c_long
suseconds_t = ctypes.c_int32
class timeval(ctypes.Structure):
_fields_ = [
('tv_sec', time_t),
('tv_usec', suseconds_t)
]
class rusage(ctypes.Structure):
_fields_ = [
('ru_utime', timeval),
('ru_stime', timeval),
('ru_maxrss', ctypes.c_long),
('ru_ixrss', ctypes.c_long),
('ru_idrss', ctypes.c_long),
('ru_isrss', ctypes.c_long),
('ru_minflt', ctypes.c_long),
('ru_majflt', ctypes.c_long),
('ru_nswap', ctypes.c_long),
('ru_inblock', ctypes.c_long),
('ru_oublock', ctypes.c_long),
('ru_msgsnd', ctypes.c_long),
('ru_msgrcv', ctypes.c_long),
('ru_nsignals', ctypes.c_long),
('ru_nvcsw', ctypes.c_long),
('ru_nivcsw', ctypes.c_long)
]
_getrusage = ctypes.CDLL(find_library('c'), use_errno=True).getrusage
_getrusage.argtypes = [ctypes.c_int, ctypes.POINTER(rusage)]
def process_time():
ru = rusage()
if _getrusage(RUSAGE_SELF, ctypes.byref(ru)) < 0:
err = ctypes.get_errno()
msg = errno.errorcode[err]
if err == errno.EINVAL:
msg += (
"The clk_id (0) specified is not supported on this system")
raise OSError(err, msg)
return float(ru.ru_utime.tv_sec + ru.ru_utime.tv_usec * 1e-6 +
ru.ru_stime.tv_sec + ru.ru_stime.tv_usec * 1e-6)
else:
# Fallback to default timer
process_time = timeit.default_timer
def _get_attr(source, name, ignore_case=False):
if ignore_case:
attrs = [getattr(source, key) for key in dir(source)
if key.lower() == name.lower()]
if len(attrs) > 1:
raise ValueError(
"{0} contains multiple {1} functions.".format(
source.__name__, name))
elif len(attrs) == 1:
return attrs[0]
else:
return None
else:
return getattr(source, name, None)
def _get_all_attrs(sources, name, ignore_case=False):
for source in sources:
val = _get_attr(source, name, ignore_case=ignore_case)
if val is not None:
yield val
def _get_first_attr(sources, name, default, ignore_case=False):
for val in _get_all_attrs(sources, name, ignore_case=ignore_case):
return val
return default
def get_benchmark_type_from_name(name):
for bm_type in benchmark_types:
if bm_type.name_regex.match(name):
return bm_type
return None
class Benchmark(object):
"""
Represents a single benchmark.
"""
# The regex of the name of function or method to be considered as
# this type of benchmark. The default in the base class, will
# match nothing.
name_regex = re.compile('^$')
def __init__(self, name, func, attr_sources):
self.name = name
self.func = func
self._attr_sources = attr_sources
self._setups = list(_get_all_attrs(attr_sources, 'setup', True))[::-1]
self._teardowns = list(_get_all_attrs(attr_sources, 'teardown', True))
self.timeout = _get_first_attr(attr_sources, "timeout", 60.0)
self.code = textwrap.dedent(inspect.getsource(self.func))
self.type = "base"
self.unit = "unit"
def __repr__(self):
return '<{0} {1}>'.format(self.__class__.__name__, self.name)
@classmethod
def from_function(cls, func):
"""
Create a benchmark object from a free function.
"""
module = inspect.getmodule(func)
name = '.'.join(
[module.__name__, func.__name__])
return cls(name, func, [func, inspect.getmodule(func)])
@classmethod
def from_class_method(cls, klass, method_name):
"""
Create a benchmark object from a method.
Parameters
----------
klass : type
The class containing the method.
method_name : str
The name of the method.
"""
module = inspect.getmodule(klass)
instance = klass()
func = getattr(instance, method_name)
name = '.'.join(
[module.__name__, klass.__name__, method_name])
return cls(name, func, [func, instance, module])
@classmethod
def from_name(cls, root, name, quick=False):
"""
Create a benchmark from a fully-qualified benchmark name.
Parameters
----------
root : str
Path to the root of a benchmark suite.
name : str
Fully-qualified name to a specific benchmark.
"""
def find_on_filesystem(root, parts, package):
path = os.path.join(root, parts[0])
if package:
new_package = package + '.' + parts[0]
else:
new_package = parts[0]
if os.path.isfile(path + '.py'):
module = imp.load_source(
new_package, path + '.py')
return find_in_module(module, parts[1:])
elif os.path.isdir(path):
return find_on_filesystem(
path, parts[1:], new_package)
def find_in_module(module, parts):
attr = getattr(module, parts[0], None)
if attr is not None:
if inspect.isfunction(attr):
if len(parts) == 1:
bm_type = get_benchmark_type_from_name(parts[0])
if bm_type is not None:
return bm_type.from_function(attr)
elif inspect.isclass(attr):
if len(parts) == 2:
bm_type = get_benchmark_type_from_name(parts[1])
if bm_type is not None:
return bm_type.from_class_method(attr, parts[1])
raise ValueError(
"Could not find benchmark '{0}'".format(name))
parts = name.split('.')
benchmark = find_on_filesystem(root, parts, '')
if quick:
benchmark.repeat = 1
benchmark.number = 1
return benchmark
def do_setup(self):
for setup in self._setups:
setup()
def do_teardown(self):
for teardown in self._teardowns:
teardown()
def do_run(self):
return self.run()
def do_profile(self, filename=None):
def method_caller():
run()
if filename is not None:
if hasattr(method_caller, 'func_code'):
code = method_caller.func_code
else:
code = method_caller.__code__
profile.runctx(
code, {'run': self.run}, {}, filename)
class TimeBenchmark(Benchmark):
"""
Represents a single benchmark for timing.
"""
name_regex = re.compile(
'^(Time[A-Z_].+)|(time_.+)$')
def __init__(self, name, func, attr_sources):
Benchmark.__init__(self, name, func, attr_sources)
self.type = "time"
self.unit = "seconds"
self.goal_time = _get_first_attr(attr_sources, 'goal_time', 2.0)
self.timer = _get_first_attr(attr_sources, 'timer', process_time)
self.repeat = _get_first_attr(
attr_sources, 'repeat', timeit.default_repeat)
self.number = int(_get_first_attr(attr_sources, 'number', 0))
def run(self):
number = self.number
timer = timeit.Timer(
stmt=self.func,
timer=self.timer)
if number == 0:
# determine number automatically so that
# goal_time / 10 <= total time < goal_time
number = 1
for i in range(1, 10):
if timer.timeit(number) >= self.goal_time / 10.0:
break
number *= 10
self.number = number
all_runs = timer.repeat(self.repeat, self.number)
best = min(all_runs) / number
return best
class MemBenchmark(Benchmark):
"""
Represents a single benchmark for tracking the memory consumption
of an object.
"""
name_regex = re.compile(
'^(Mem[A-Z_].+)|(mem_.+)$')
def __init__(self, name, func, attr_sources):
Benchmark.__init__(self, name, func, attr_sources)
self.type = "memory"
self.unit = "bytes"
def run(self):
# We can't import asizeof directly, because we haven't loaded
# the asv package in the benchmarking process.
path = os.path.join(
os.path.dirname(__file__), 'extern', 'asizeof.py')
asizeof = imp.load_source('asizeof', path)
obj = self.func()
sizeof2 = asizeof.asizeof([obj, obj])
sizeofcopy = asizeof.asizeof([obj, copy.copy(obj)])
return sizeofcopy - sizeof2
class TrackBenchmark(Benchmark):
"""
Represents a single benchmark for tracking an arbitrary value.
"""
name_regex = re.compile(
'^(Track[A-Z_].+)|(track_.+)$')
def __init__(self, name, func, attr_sources):
Benchmark.__init__(self, name, func, attr_sources)
self.type = _get_first_attr(attr_sources, "type", "track")
self.unit = _get_first_attr(attr_sources, "unit", "unit")
def run(self):
return self.func()
# TODO: Support the creation of custom benchmark types
benchmark_types = [
TimeBenchmark, MemBenchmark, TrackBenchmark
]
def disc_class(klass):
"""
Iterate over all benchmarks in a given class.
For each method with a special name, yields a Benchmark
object.
"""
for key, val in inspect.getmembers(klass):
bm_type = get_benchmark_type_from_name(key)
if bm_type is not None and (inspect.isfunction(val) or inspect.ismethod(val)):
yield bm_type.from_class_method(klass, key)
def disc_objects(module):
"""
Iterate over all benchmarks in a given module, returning
Benchmark objects.
For each class definition, looks for any methods with a
special name.
For each free function, yields all functions with a special
name.
"""
for key, val in module.__dict__.items():
if key.startswith('_'):
continue
if inspect.isclass(val):
for benchmark in disc_class(val):
yield benchmark
elif inspect.isfunction(val):
bm_type = get_benchmark_type_from_name(key)
if bm_type is not None:
yield bm_type.from_function(val)
def disc_files(root, package=''):
"""
Iterate over all .py files in a given directory tree.
"""
for filename in os.listdir(root):
path = os.path.join(root, filename)
if os.path.isfile(path):
filename, ext = os.path.splitext(filename)
if ext == '.py':
module = imp.load_source(package + filename, path)
yield module
elif os.path.isdir(path):
for x in disc_files(path, package + filename + "."):
yield x
def disc_benchmarks(root):
"""
Discover all benchmarks in a given directory tree.
"""
for module in disc_files(root):
for benchmark in disc_objects(module):
yield benchmark
def list_benchmarks(root):
"""
List all of the discovered benchmarks to stdout as JSON.
"""
# Streaming of JSON back out to the master process
sys.stdout.write('[')
first = True
for benchmark in disc_benchmarks(root):
if not first:
sys.stdout.write(', ')
clean = dict(
(k, v) for (k, v) in benchmark.__dict__.items()
if isinstance(v, (str, int, float, list, dict)) and not
k.startswith('_'))
json.dump(clean, sys.stdout, skipkeys=True)
first = False
sys.stdout.write(']')
if __name__ == '__main__':
mode = sys.argv[1]
args = sys.argv[2:]
if mode == 'discover':
benchmark_dir = args[0]
list_benchmarks(benchmark_dir)
sys.exit(0)
elif mode == 'run':
benchmark_dir, benchmark_id, quick, profile_path = args
quick = (quick == 'True')
if profile_path == 'None':
profile_path = None
benchmark = Benchmark.from_name(
benchmark_dir, benchmark_id, quick=quick)
benchmark.do_setup()
result = benchmark.do_run()
if profile_path is not None:
benchmark.do_profile(profile_path)
benchmark.do_teardown()
# Write the output value as the last line of the output.
sys.stdout.write('\n')
sys.stdout.write(json.dumps(result))
sys.stdout.write('\n')
sys.stdout.flush()
# Not strictly necessary, but it's explicit about the successful
# exit code that we want.
sys.exit(0)
sys.stderr.write("Unknown mode {0}\n".format(mode))
sys.exit(1)
|
{
"content_hash": "ea4f0fe0dbea910032f879a23d1db84e",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 86,
"avg_line_length": 31.018480492813143,
"alnum_prop": 0.5483251688070965,
"repo_name": "cpcloud/asv",
"id": "c177a584e03e4b7ede37967f3cd8d64bcab891ef",
"size": "15195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asv/benchmark.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2209"
},
{
"name": "JavaScript",
"bytes": "35038"
},
{
"name": "Python",
"bytes": "184213"
},
{
"name": "Shell",
"bytes": "7144"
}
],
"symlink_target": ""
}
|
"""
Combiner for sort with Hadoop streaming.
"""
from __future__ import print_function
import sys
def main(stdin):
"""
Take unsorted standard input from mapper and return sorted block.
Value is just a place holder.
"""
for line_num in sorted(stdin):
# Remove trailing newlines.
line_num = line_num.rstrip()
# Omit empty lines.
try:
(line, num) = line_num.rsplit('\t', 1)
print(("{line}\t{num}").format(line=line, num=num))
except ValueError:
pass
return None
if __name__ == '__main__':
main(stdin=sys.stdin)
|
{
"content_hash": "ef339ad448b0a82e6c6ea0833971065c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 69,
"avg_line_length": 24.6,
"alnum_prop": 0.5788617886178862,
"repo_name": "stharrold/bench_mapr",
"id": "0983434e90c81fbce56709960612a729d55d50ee",
"size": "637",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sort/hadoop_streaming/combiner.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71271"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import object
import configparser
from jnpr.space import xmlutil
from jnpr.space import rest
class TestDevices(object):
def setup_class(self):
# Extract Space URL, userid, password from config file
config = configparser.RawConfigParser()
import os
config.read(os.path.dirname(os.path.realpath(__file__)) +
"/test.conf")
url = config.get('space', 'url')
user = config.get('space', 'user')
passwd = config.get('space', 'passwd')
# Create a Space REST end point
self.space = rest.Space(url, user, passwd)
def test_devices_raw_config(self):
devices_list = self.space.device_management.devices.get(
filter_={'managedStatus': 'In Sync'})
assert len(devices_list) > 1, "Not enough devices on Space"
for d in devices_list:
raw = d.configurations.raw.get()
assert raw is not None
raw_config = xmlutil.xml2obj(raw.configuration.text)
assert raw_config.version[:7] == d.OSVersion[:7]
if hasattr(raw_config, 'groups'):
for g in raw_config.groups:
print("Found config group %s on device %s" % (g.name, d.name))
def test_devices_raw_config_post(self):
devices_list = self.space.device_management.devices.get(
filter_={'managedStatus': 'In Sync'})
assert len(devices_list) > 1, "Not enough devices on Space"
for d in devices_list:
raw = d.configurations.raw.post(xpaths=['/configuration/version',
'/configuration/interfaces/interface[starts-with(name, "ge-")]'])
c = raw.configuration
if hasattr(c, 'interface'):
for i in c.interface:
print(i.name)
assert i.name.pyval.startswith('ge-')
else:
print('Device %s has no interfaces' % d.name)
assert c.version[:7] == d.OSVersion[:7]
def test_devices_expanded_config(self):
devices_list = self.space.device_management.devices.get(
filter_={'managedStatus': 'In Sync'})
assert len(devices_list) > 1, "Not enough devices on Space"
for d in devices_list:
exp = d.configurations.expanded.get()
assert exp
exp_config = xmlutil.xml2obj(exp.configuration.text)
import pytest
with pytest.raises(AttributeError):
assert exp_config.groups is None
assert exp_config.version[:7] == d.OSVersion[:7]
def test_devices_expanded_config_post(self):
devices_list = self.space.device_management.devices.get(
filter_={'managedStatus': 'In Sync'},
sortby=['name', 'platform'])
assert len(devices_list) > 1, "Not enough devices on Space"
for d in devices_list:
exp = d.configurations.expanded.post(xpaths=['/configuration/version',
'/configuration/interfaces/interface[starts-with(name, "ge-")]'])
c = exp.configuration
if hasattr(c, 'interface'):
for i in c.interface:
print(i.name)
assert i.name.pyval.startswith('ge-'), \
"Intf name %s failed check" % i.name
"""
if isinstance(i.name, str):
print(i.name)
assert i.name.startswith('ge-'), \
"Intf name %s failed check" % i.name
else:
print(i.name.pyval)
assert i.name.pyval.startswith('ge-'), \
"Intf name %s failed check" % i.name.data
"""
assert c.version[:7] == d.OSVersion[:7]
def test_devices_configs(self):
devices_list = self.space.device_management.devices.get(
filter_={'managedStatus': 'In Sync'})
assert len(devices_list) > 1, "Not enough devices on Space"
for d in devices_list:
configs = d.configurations.get()
assert len(configs) == 2
for c in configs:
xml_config = c.get()
xml_config = xmlutil.xml2obj(xml_config.configuration.text)
assert xml_config.version[:7] == d.OSVersion[:7]
def test_devices_scripts(self):
devices_list = self.space.device_management.devices.get()
assert len(devices_list) > 1, "Not enough devices on Space"
for d in devices_list[:1]:
try:
scripts = d.associated_scripts.get()
assert len(scripts) > 0
for s in scripts:
assert s.script_device_association.device_name == d.name
except:
pass
def test_devices_softwares(self):
devices_list = self.space.device_management.devices.get()
assert len(devices_list) > 1, "Not enough devices on Space"
for d in devices_list[:1]:
try:
sws = d.associated_softwares.get()
assert len(sws) >= 0
except:
pass
def test_devices_change_requests(self):
devices_list = self.space.device_management.devices.get()
assert len(devices_list) > 1, "Not enough devices on Space"
for d in devices_list[:1]:
crs = d.change_requests.get()
assert len(crs) >= 0
for cr in crs:
assert int(cr.deviceId) == int(d.key)
|
{
"content_hash": "82b2f95b0e7805bc0a8266802de76407",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 82,
"avg_line_length": 38.56953642384106,
"alnum_prop": 0.5444711538461539,
"repo_name": "Juniper/py-space-platform",
"id": "90631ed251e87df0b637402e88852baf2f95e19a",
"size": "6536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jnpr/space/test/test_devices.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "21680"
},
{
"name": "HTML",
"bytes": "22851644"
},
{
"name": "JavaScript",
"bytes": "70383"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "301916"
},
{
"name": "Smarty",
"bytes": "22058"
}
],
"symlink_target": ""
}
|
"""Tests for `multi_process_runner`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
from six.moves import queue as Queue
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import test
def proc_func_that_adds_task_type_in_return_data(test_obj, val):
test_obj.assertEqual(val, 3)
return multi_worker_test_base.get_task_type()
def proc_func_that_errors():
raise ValueError('This is an error.')
def proc_func_that_does_nothing():
pass
def proc_func_that_adds_simple_return_data():
return 'dummy_data'
def proc_func_that_return_args_and_kwargs(*args, **kwargs):
return list(args) + list(kwargs.items())
class MultiProcessRunnerTest(test.TestCase):
def _worker_idx(self):
config_task = json.loads(os.environ['TF_CONFIG'])['task']
return config_task['index']
def test_multi_process_runner(self):
returned_data, _ = multi_process_runner.run(
proc_func_that_adds_task_type_in_return_data,
multi_worker_test_base.create_cluster_spec(
num_workers=2, num_ps=3, has_eval=1),
args=(self, 3))
job_count_dict = {'worker': 2, 'ps': 3, 'evaluator': 1}
for data in returned_data:
job_count_dict[data] -= 1
self.assertEqual(job_count_dict['worker'], 0)
self.assertEqual(job_count_dict['ps'], 0)
self.assertEqual(job_count_dict['evaluator'], 0)
def test_multi_process_runner_error_propagates_from_subprocesses(self):
runner = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
max_run_time=20)
runner.start()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
runner.join()
def test_multi_process_runner_queue_emptied_between_runs(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
returned_data, _ = multi_process_runner.run(
proc_func_that_adds_simple_return_data, cluster_spec)
self.assertTrue(returned_data)
self.assertEqual(returned_data[0], 'dummy_data')
self.assertEqual(returned_data[1], 'dummy_data')
returned_data, _ = multi_process_runner.run(proc_func_that_does_nothing,
cluster_spec)
self.assertFalse(returned_data)
def test_multi_process_runner_args_passed_correctly(self):
returned_data, _ = multi_process_runner.run(
proc_func_that_return_args_and_kwargs,
multi_worker_test_base.create_cluster_spec(num_workers=1),
args=('a', 'b'),
kwargs={'c_k': 'c_v'})
self.assertEqual(returned_data[0][0], 'a')
self.assertEqual(returned_data[0][1], 'b')
self.assertEqual(returned_data[0][2], ('c_k', 'c_v'))
def test_stdout_captured(self):
def simple_print_func():
print('This is something printed.')
return 'This is returned data.'
returned_data, std_stream_data = multi_process_runner.run(
simple_print_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
capture_std_stream=True)
num_string_std_stream = len(
[d for d in std_stream_data if d == 'This is something printed.'])
num_string_returned_data = len(
[d for d in returned_data if d == 'This is returned data.'])
self.assertEqual(num_string_std_stream, 2)
self.assertEqual(num_string_returned_data, 2)
def test_process_that_exits(self):
def func_to_exit_in_10_sec():
time.sleep(5)
mpr._add_return_data('foo')
time.sleep(20)
mpr._add_return_data('bar')
mpr = multi_process_runner.MultiProcessRunner(
func_to_exit_in_10_sec,
multi_worker_test_base.create_cluster_spec(num_workers=1),
max_run_time=10)
mpr.start()
returned_data, _ = mpr.join()
self.assertLen(returned_data, 1)
def test_signal_doesnt_fire_after_process_exits(self):
mpr = multi_process_runner.MultiProcessRunner(
proc_func_that_does_nothing,
multi_worker_test_base.create_cluster_spec(num_workers=1),
max_run_time=10)
mpr.start()
mpr.join()
with self.assertRaisesRegexp(Queue.Empty, ''):
# If the signal was fired, another message would be added to internal
# queue, so verifying it's empty.
mpr._get_process_status_queue().get(block=False)
def test_termination(self):
def proc_func():
for i in range(0, 10):
print('index {}, iteration {}'.format(self._worker_idx(), i))
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
capture_std_stream=True)
mpr.start()
time.sleep(5)
mpr.terminate('worker', 0)
std_stream_result = mpr.join()[1]
# Worker 0 is terminated in the middle, so it should not have iteration 9
# printed.
self.assertIn('index 0, iteration 0', std_stream_result)
self.assertNotIn('index 0, iteration 9', std_stream_result)
self.assertIn('index 1, iteration 0', std_stream_result)
self.assertIn('index 1, iteration 9', std_stream_result)
def test_termination_and_start_single_process(self):
def proc_func():
for i in range(0, 10):
print('index {}, iteration {}'.format(self._worker_idx(), i))
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
capture_std_stream=True)
mpr.start()
time.sleep(5)
mpr.terminate('worker', 0)
mpr.start_single_process('worker', 0)
std_stream_result = mpr.join()[1]
# Worker 0 is terminated in the middle, but a new worker 0 is added, so it
# should still have iteration 9 printed. Moreover, iteration 0 of worker 0
# should happen twice.
self.assertLen(
[s for s in std_stream_result if s == 'index 0, iteration 0'], 2)
self.assertIn('index 0, iteration 9', std_stream_result)
self.assertIn('index 1, iteration 0', std_stream_result)
self.assertIn('index 1, iteration 9', std_stream_result)
if __name__ == '__main__':
multi_process_runner.test_main()
|
{
"content_hash": "c2ad99409c4811f91f7bb9e8fb4413ea",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 78,
"avg_line_length": 33.98924731182796,
"alnum_prop": 0.6621322366339766,
"repo_name": "jhseu/tensorflow",
"id": "839646a5d1f7f9a8be282b274ff884e7394a908f",
"size": "7011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/multi_process_runner_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "27480"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "875455"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "80051513"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112748"
},
{
"name": "Go",
"bytes": "1853641"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1729057"
},
{
"name": "Makefile",
"bytes": "62498"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "304661"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "19515"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "36791185"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "56741"
},
{
"name": "Shell",
"bytes": "685877"
},
{
"name": "Smarty",
"bytes": "35147"
},
{
"name": "Starlark",
"bytes": "3504187"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
"""
flask.ext.security.forms
~~~~~~~~~~~~~~~~~~~~~~~~
Flask-Security forms module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
import inspect
try:
from urlparse import urlsplit
except ImportError:
from urllib.parse import urlsplit
from flask import request, current_app
from flask_wtf import Form as BaseForm
from wtforms import TextField, PasswordField, validators, \
SubmitField, HiddenField, BooleanField, ValidationError, Field
from flask_login import current_user
from werkzeug.local import LocalProxy
from .confirmable import requires_confirmation
from .utils import verify_and_update_password, get_message, config_value
# Convenient reference
_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
_default_field_labels = {
'email': 'Email Address',
'password': 'Password',
'remember_me': 'Remember Me',
'login': 'Login',
'retype_password': 'Retype Password',
'register': 'Register',
'send_confirmation': 'Resend Confirmation Instructions',
'recover_password': 'Recover Password',
'reset_password': 'Reset Password',
'retype_password': 'Retype Password',
'new_password': 'New Password',
'change_password': 'Change Password',
'send_login_link': 'Send Login Link'
}
class ValidatorMixin(object):
def __call__(self, form, field):
if self.message and self.message.isupper():
self.message = get_message(self.message)[0]
return super(ValidatorMixin, self).__call__(form, field)
class EqualTo(ValidatorMixin, validators.EqualTo):
pass
class Required(ValidatorMixin, validators.Required):
pass
class Email(ValidatorMixin, validators.Email):
pass
class Length(ValidatorMixin, validators.Length):
pass
email_required = Required(message='EMAIL_NOT_PROVIDED')
email_validator = Email(message='INVALID_EMAIL_ADDRESS')
password_required = Required(message='PASSWORD_NOT_PROVIDED')
password_length = Length(min=6, max=128, message='PASSWORD_INVALID_LENGTH')
def get_form_field_label(key):
return _default_field_labels.get(key, '')
def unique_user_email(form, field):
if _datastore.find_user(email=field.data) is not None:
msg = get_message('EMAIL_ALREADY_ASSOCIATED', email=field.data)[0]
raise ValidationError(msg)
def valid_user_email(form, field):
form.user = _datastore.find_user(email=field.data)
if form.user is None:
raise ValidationError(get_message('USER_DOES_NOT_EXIST')[0])
class Form(BaseForm):
def __init__(self, *args, **kwargs):
if current_app.testing:
self.TIME_LIMIT = None
super(Form, self).__init__(*args, **kwargs)
class EmailFormMixin():
email = TextField(
get_form_field_label('email'),
validators=[email_required, email_validator])
class UserEmailFormMixin():
user = None
email = TextField(
get_form_field_label('email'),
validators=[email_required, email_validator, valid_user_email])
class UniqueEmailFormMixin():
email = TextField(
get_form_field_label('email'),
validators=[email_required, email_validator, unique_user_email])
class PasswordFormMixin():
password = PasswordField(
get_form_field_label('password'), validators=[password_required])
class NewPasswordFormMixin():
password = PasswordField(
get_form_field_label('password'),
validators=[password_required, password_length])
class PasswordConfirmFormMixin():
password_confirm = PasswordField(
get_form_field_label('retype_password'),
validators=[EqualTo('password', message='RETYPE_PASSWORD_MISMATCH')])
class NextFormMixin():
next = HiddenField()
def validate_next(self, field):
if field.data:
url_next = urlsplit(field.data)
url_base = urlsplit(request.host_url)
if url_next.netloc and url_next.netloc != url_base.netloc:
field.data = ''
raise ValidationError(get_message('INVALID_REDIRECT')[0])
class RegisterFormMixin():
submit = SubmitField(get_form_field_label('register'))
def to_dict(form):
def is_field_and_user_attr(member):
return isinstance(member, Field) and \
hasattr(_datastore.user_model, member.name)
fields = inspect.getmembers(form, is_field_and_user_attr)
return dict((key, value.data) for key, value in fields)
class SendConfirmationForm(Form, UserEmailFormMixin):
"""The default forgot password form"""
submit = SubmitField(get_form_field_label('send_confirmation'))
def __init__(self, *args, **kwargs):
super(SendConfirmationForm, self).__init__(*args, **kwargs)
if request.method == 'GET':
self.email.data = request.args.get('email', None)
def validate(self):
if not super(SendConfirmationForm, self).validate():
return False
if self.user.confirmed_at is not None:
self.email.errors.append(get_message('ALREADY_CONFIRMED')[0])
return False
return True
class ForgotPasswordForm(Form, UserEmailFormMixin):
"""The default forgot password form"""
submit = SubmitField(get_form_field_label('recover_password'))
class PasswordlessLoginForm(Form, UserEmailFormMixin):
"""The passwordless login form"""
submit = SubmitField(get_form_field_label('send_login_link'))
def __init__(self, *args, **kwargs):
super(PasswordlessLoginForm, self).__init__(*args, **kwargs)
def validate(self):
if not super(PasswordlessLoginForm, self).validate():
return False
if not self.user.is_active():
self.email.errors.append(get_message('DISABLED_ACCOUNT')[0])
return False
return True
class LoginForm(Form, NextFormMixin):
"""The default login form"""
email = TextField(get_form_field_label('email'))
password = PasswordField(get_form_field_label('password'))
remember = BooleanField(get_form_field_label('remember_me'))
submit = SubmitField(get_form_field_label('login'))
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.remember.default = config_value('DEFAULT_REMEMBER_ME')
def validate(self):
if not super(LoginForm, self).validate():
return False
if self.email.data.strip() == '':
self.email.errors.append(get_message('EMAIL_NOT_PROVIDED')[0])
return False
if self.password.data.strip() == '':
self.password.errors.append(get_message('PASSWORD_NOT_PROVIDED')[0])
return False
self.user = _datastore.get_user(self.email.data)
if self.user is None:
self.email.errors.append(get_message('USER_DOES_NOT_EXIST')[0])
return False
if not self.user.password:
self.password.errors.append(get_message('PASSWORD_NOT_SET')[0])
return False
if not verify_and_update_password(self.password.data, self.user):
self.password.errors.append(get_message('INVALID_PASSWORD')[0])
return False
if requires_confirmation(self.user):
self.email.errors.append(get_message('CONFIRMATION_REQUIRED')[0])
return False
if not self.user.is_active():
self.email.errors.append(get_message('DISABLED_ACCOUNT')[0])
return False
return True
class ConfirmRegisterForm(Form, RegisterFormMixin,
UniqueEmailFormMixin, NewPasswordFormMixin):
pass
class RegisterForm(ConfirmRegisterForm, PasswordConfirmFormMixin):
pass
class ResetPasswordForm(Form, NewPasswordFormMixin, PasswordConfirmFormMixin):
"""The default reset password form"""
submit = SubmitField(get_form_field_label('reset_password'))
class ChangePasswordForm(Form, PasswordFormMixin):
"""The default change password form"""
new_password = PasswordField(
get_form_field_label('new_password'),
validators=[password_required, password_length])
new_password_confirm = PasswordField(
get_form_field_label('retype_password'),
validators=[EqualTo('new_password', message='RETYPE_PASSWORD_MISMATCH')])
submit = SubmitField(get_form_field_label('change_password'))
def validate(self):
if not super(ChangePasswordForm, self).validate():
return False
if self.password.data.strip() == '':
self.password.errors.append(get_message('PASSWORD_NOT_PROVIDED')[0])
return False
if not verify_and_update_password(self.password.data, current_user):
self.password.errors.append(get_message('INVALID_PASSWORD')[0])
return False
if self.password.data.strip() == self.new_password.data.strip():
self.password.errors.append(get_message('PASSWORD_IS_THE_SAME')[0])
return False
return True
|
{
"content_hash": "1c906aecb8bf1cdfb9e8b4b757e8ffe3",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 81,
"avg_line_length": 31.480836236933797,
"alnum_prop": 0.6598782512451578,
"repo_name": "maxziv/SEApp",
"id": "54876ae8727364b5da0994b76ba2e22dce94e7ed",
"size": "9059",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "server/lib/flask_security/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1449"
},
{
"name": "CSS",
"bytes": "438732"
},
{
"name": "JavaScript",
"bytes": "190797"
},
{
"name": "PHP",
"bytes": "232"
},
{
"name": "Perl",
"bytes": "36"
},
{
"name": "Python",
"bytes": "4621804"
},
{
"name": "Shell",
"bytes": "4561"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class RuleAction(Model):
"""The action that is performed when the alert rule becomes active, and when
an alert condition is resolved.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: RuleEmailAction, RuleWebhookAction
All required parameters must be populated in order to send to Azure.
:param odatatype: Required. Constant filled by server.
:type odatatype: str
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'odatatype': {'key': 'odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odatatype': {'Microsoft.Azure.Management.Insights.Models.RuleEmailAction': 'RuleEmailAction', 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction': 'RuleWebhookAction'}
}
def __init__(self, **kwargs):
super(RuleAction, self).__init__(**kwargs)
self.odatatype = None
|
{
"content_hash": "9c45a502e5359ca1f8486f7875dd3a65",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 187,
"avg_line_length": 31.548387096774192,
"alnum_prop": 0.6697341513292433,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "3fbc729ef113fca8554b24a81fc430ddae58854f",
"size": "1452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-monitor/azure/mgmt/monitor/models/rule_action.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
from f5_openstack_agent.lbaasv2.drivers.bigip.icontrol_driver import \
iControlDriver
from f5_openstack_agent.lbaasv2.drivers.bigip.system_helper import \
SystemHelper
import json
import logging
import os
import pytest
import requests
from ..testlib.bigip_client import BigIpClient
from ..testlib.fake_rpc import FakeRPCPlugin
from ..testlib.service_reader import LoadbalancerReader
from ..testlib.resource_validator import ResourceValidator
requests.packages.urllib3.disable_warnings()
LOG = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def services():
neutron_services_filename = (
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../testdata/service_requests/test_purge_folder.json')
)
return (json.load(open(neutron_services_filename)))
@pytest.fixture(scope="module")
def bigip():
return BigIpClient(pytest.symbols.bigip_floating_ips[0],
pytest.symbols.bigip_username,
pytest.symbols.bigip_password)
@pytest.fixture
def fake_plugin_rpc(services):
rpcObj = FakeRPCPlugin(services)
return rpcObj
@pytest.fixture
def icontrol_driver(icd_config, fake_plugin_rpc):
class ConfFake(object):
def __init__(self, params):
self.__dict__ = params
for k, v in self.__dict__.items():
if isinstance(v, unicode):
self.__dict__[k] = v.encode('utf-8')
def __repr__(self):
return repr(self.__dict__)
icd = iControlDriver(ConfFake(icd_config),
registerOpts=False)
icd.plugin_rpc = fake_plugin_rpc
icd.connect()
return icd
def test_purge_folder(track_bigip_cfg, bigip, services, icd_config,
icontrol_driver):
env_prefix = icd_config['environment_prefix']
service_iter = iter(services)
validator = ResourceValidator(bigip, env_prefix)
# create loadbalancer
service = service_iter.next()
lb_reader = LoadbalancerReader(service)
folder = '{0}_{1}'.format(env_prefix, lb_reader.tenant_id())
icontrol_driver._common_service_handler(service)
assert bigip.folder_exists(folder)
# create listener
service = service_iter.next()
listener = service['listeners'][0]
icontrol_driver._common_service_handler(service)
validator.assert_virtual_valid(listener, folder)
# create pool
service = service_iter.next()
pool = service['pools'][0]
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool, folder)
# create l7policy with l7rule attached to the above created
# listener
service = service_iter.next()
icontrol_driver._common_service_handler(service)
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_policy_valid(listener, folder)
sh = SystemHelper()
sh.purge_folder_contents(bigip.bigip, folder)
# delete folder and check that it does not exist
sh.purge_folder(bigip.bigip, folder)
assert not bigip.folder_exists(folder)
|
{
"content_hash": "bf665043f0bbf0d246dce1e26160c55a",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 78,
"avg_line_length": 29.02803738317757,
"alnum_prop": 0.6748229233741146,
"repo_name": "F5Networks/f5-openstack-agent",
"id": "5b257ec16b94e733965dc00a54992eccf4594a84",
"size": "3721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/neutronless/loadbalancer/test_purge_folder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2220"
},
{
"name": "Makefile",
"bytes": "853"
},
{
"name": "Python",
"bytes": "1395055"
},
{
"name": "Ruby",
"bytes": "78"
},
{
"name": "Shell",
"bytes": "15836"
}
],
"symlink_target": ""
}
|
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import realms
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# Napoleon settings
napoleon_use_admonition_for_notes = True
# napoleon_use_ivar = True
# numpydoc settings
# numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'realms'
copyright = u"2017, Zach Mitchell"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = realms.__version__
# The full version, including alpha/beta/rc tags.
release = realms.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
# html_static_path = ['_static']
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'realmsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'realms.tex',
u'realms Documentation',
u'Zach Mitchell', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'realms',
u'realms Documentation',
[u'Zach Mitchell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'realms',
u'realms Documentation',
u'Zach Mitchell',
'realms',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "9cdc986560b72d419af0edf12ec4452a",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 76,
"avg_line_length": 30.3690036900369,
"alnum_prop": 0.701093560145808,
"repo_name": "zmitchell/realms",
"id": "bf547efadee37fafc9b4fc6e2d7c50a72a9990ff",
"size": "8671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2493"
},
{
"name": "Python",
"bytes": "49672"
}
],
"symlink_target": ""
}
|
from tgext.pluggable import PluggableSession
DBSession = PluggableSession()
def init_model(app_session):
DBSession.configure(app_session)
from models import TemporaryPhotosBucket
|
{
"content_hash": "a02bda2b7aae32e4d7f564867e03fb7c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 44,
"avg_line_length": 18.7,
"alnum_prop": 0.8128342245989305,
"repo_name": "gasbasd/tgapp-stroller2",
"id": "2d33ec32b7198daeed268b97983b6d54532ec9dd",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stroller2/model/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3359"
},
{
"name": "Python",
"bytes": "74545"
}
],
"symlink_target": ""
}
|
getObject = {
'id': 1000,
'globalIdentifier': '1a2b3c-1701',
'datacenter': {'id': 50, 'name': 'TEST00',
'description': 'Test Data Center'},
'billingItem': {
'id': 6327,
'recurringFee': 1.54,
'nextInvoiceTotalRecurringAmount': 16.08,
'children': [
{'nextInvoiceTotalRecurringAmount': 1},
{'nextInvoiceTotalRecurringAmount': 1},
{'nextInvoiceTotalRecurringAmount': 1},
{'nextInvoiceTotalRecurringAmount': 1},
{'nextInvoiceTotalRecurringAmount': 1},
],
'orderItem': {
'order': {
'userRecord': {
'username': 'chechu',
}
}
}
},
'primaryIpAddress': '172.16.1.100',
'hostname': 'hardware-test1',
'domain': 'test.sftlyr.ws',
'bareMetalInstanceFlag': True,
'fullyQualifiedDomainName': 'hardware-test1.test.sftlyr.ws',
'processorPhysicalCoreAmount': 2,
'memoryCapacity': 2,
'primaryBackendIpAddress': '10.1.0.2',
'networkManagementIpAddress': '10.1.0.3',
'hardwareStatus': {'status': 'ACTIVE'},
'primaryNetworkComponent': {'maxSpeed': 10, 'speed': 10},
'provisionDate': '2013-08-01 15:23:45',
'notes': 'These are test notes.',
'operatingSystem': {
'softwareLicense': {
'softwareDescription': {
'referenceCode': 'UBUNTU_12_64',
'name': 'Ubuntu',
'version': 'Ubuntu 12.04 LTS',
}
},
'passwords': [
{'username': 'root', 'password': 'abc123'}
],
},
'remoteManagementAccounts': [
{'username': 'root', 'password': 'abc123'}
],
'networkVlans': [
{
'networkSpace': 'PRIVATE',
'vlanNumber': 1800,
'id': 9653
},
{
'networkSpace': 'PUBLIC',
'vlanNumber': 3672,
'id': 19082
},
],
'tagReferences': [
{'tag': {'name': 'test_tag'}}
],
'activeTransaction': {
'transactionStatus': {
'name': 'TXN_NAME',
'friendlyName': 'Friendly Transaction Name',
'id': 6660
}
}
}
editObject = True
setTags = True
setPrivateNetworkInterfaceSpeed = True
setPublicNetworkInterfaceSpeed = True
powerOff = True
powerOn = True
powerCycle = True
rebootSoft = True
rebootDefault = True
rebootHard = True
createFirmwareUpdateTransaction = True
setUserMetadata = ['meta']
reloadOperatingSystem = 'OK'
getReverseDomainRecords = [
{'resourceRecords': [{'data': '2.0.1.10.in-addr.arpa'}]}]
bootToRescueLayer = True
getFrontendNetworkComponents = [
{'maxSpeed': 100},
{
'maxSpeed': 1000,
'networkComponentGroup': {
'groupTypeId': 2,
'networkComponents': [{'maxSpeed': 1000}, {'maxSpeed': 1000}]
}
},
{
'maxSpeed': 1000,
'networkComponentGroup': {
'groupTypeId': 2,
'networkComponents': [{'maxSpeed': 1000}, {'maxSpeed': 1000}]
}
},
{
'maxSpeed': 1000,
'networkComponentGroup': {
'groupTypeId': 2,
'networkComponents': [{'maxSpeed': 1000}, {'maxSpeed': 1000}]
}
},
{
'maxSpeed': 1000,
'networkComponentGroup': {
'groupTypeId': 2,
'networkComponents': [{'maxSpeed': 1000}, {'maxSpeed': 1000}]
}
}
]
|
{
"content_hash": "44567040f227f86b0b5f34c43a674c0b",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 73,
"avg_line_length": 28.557377049180328,
"alnum_prop": 0.5229621125143513,
"repo_name": "skraghu/softlayer-python",
"id": "26dc7c3bf690994cd282ed7874531e9f61003b8e",
"size": "3484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SoftLayer/fixtures/SoftLayer_Hardware_Server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "854"
},
{
"name": "Python",
"bytes": "1039495"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
# set parameters:
def build_model(train_data, max_features=5000, maxlen=400,
batch_size=32, embedding_dims=50,
filters=250, kernel_size=3, hidden_dims=250):
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
# model.fit(x_train, y_train,
# batch_size=batch_size,
# epochs=epochs,
# validation_data=(x_test, y_test))
|
{
"content_hash": "192f7d7e7daef01022fa49822587e9bc",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 32.96153846153846,
"alnum_prop": 0.6213535589264878,
"repo_name": "Avsecz/concise",
"id": "fe1d715110c22284baa0c572e8acaf134b42145a",
"size": "1714",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/hyopt/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1891454"
},
{
"name": "Makefile",
"bytes": "2389"
},
{
"name": "Python",
"bytes": "385188"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
}
|
class PermissionException(Exception):
pass
|
{
"content_hash": "7deda8d334daa549ad8441666d6fbebe",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 37,
"avg_line_length": 23.5,
"alnum_prop": 0.7872340425531915,
"repo_name": "davidsanfal/bii-ide",
"id": "3d820eb31df49a420ad02cd6d64d28288d0d6054",
"size": "47",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bii-ide/bii_ide/common/exception.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68657"
},
{
"name": "Shell",
"bytes": "906"
}
],
"symlink_target": ""
}
|
from urlparse import urlparse, urljoin
from flask import request, redirect
from flask.ext.wtf import Form
from flask.ext.wtf import HiddenField, BooleanField, TextField, PasswordField
from flask.ext.wtf import Required, Length, EqualTo, Email, URL
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
def get_redirect_target():
for target in request.args.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return target
class RedirectForm(Form):
next = HiddenField()
def __init__(self, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
if not self.next.data:
self.next.data = get_redirect_target() or ''
def redirect(self, endpoint='index', **values):
if is_safe_url(self.next.data):
return redirect(self.next.data)
target = get_redirect_target()
return redirect(target or url_for(endpoint, **values))
class LoginForm(RedirectForm):
email = TextField('Email address', [Required(), Email()])
password = PasswordField('Password', [Required()])
remember = BooleanField('Remember me')
class SignupForm(RedirectForm):
name = TextField('Name', [Required()])
email = TextField('Email address', [Email()])
password = PasswordField('Password', [
Required(),
EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Repeat password')
class ClientForm(Form):
name = TextField('Name', [Required()])
description = TextField('Description')
callback = TextField('Callback', [Required(), URL(require_tld=False)])
|
{
"content_hash": "abc28a7d27b3bce208a0c276c7357451",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 85,
"avg_line_length": 32.5,
"alnum_prop": 0.6527472527472528,
"repo_name": "Avamagic/mgserver-web-api",
"id": "828e64a679439690231e03b0bccf97d8a90fe8c7",
"size": "1820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mgserver/frontend/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "58516"
},
{
"name": "Python",
"bytes": "50936"
}
],
"symlink_target": ""
}
|
import os
from string import digits
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def get_setting(name, default=None):
return getattr(settings, f"LIPPUKALA_{name}", default)
def get_integer_setting(name, default=0):
try:
value = get_setting(name, default)
return int(value)
except ValueError: # pragma: no cover
raise ImproperlyConfigured(f"LIPPUKALA_{name} must be an integer (got {value!r})")
PREFIXES = get_setting("PREFIXES", {})
LITERATE_KEYSPACES = get_setting("LITERATE_KEYSPACES", {})
CODE_MIN_N_DIGITS = get_integer_setting("CODE_MIN_N_DIGITS", 10)
CODE_MAX_N_DIGITS = get_integer_setting("CODE_MAX_N_DIGITS", 10)
CODE_ALLOW_LEADING_ZEROES = bool(get_setting("CODE_ALLOW_LEADING_ZEROES", True))
PRINT_LOGO_PATH = get_setting("PRINT_LOGO_PATH")
PRINT_LOGO_SIZE_CM = get_setting("PRINT_LOGO_SIZE_CM")
if PREFIXES:
PREFIX_CHOICES = [(p, f"{p} [{t}]") for (p, t) in sorted(PREFIXES.items())]
PREFIX_MAY_BE_BLANK = False
else:
PREFIX_CHOICES = [("", "---")]
PREFIX_MAY_BE_BLANK = True
def validate_settings(): # pragma: no cover
_validate_code()
_validate_prefixes()
_validate_print()
def _validate_code():
if CODE_MIN_N_DIGITS <= 5 or CODE_MAX_N_DIGITS < CODE_MIN_N_DIGITS:
raise ImproperlyConfigured(
"The range (%d .. %d) for Lippukala code digits is invalid"
% (CODE_MIN_N_DIGITS, CODE_MAX_N_DIGITS)
)
def _validate_prefixes():
key_lengths = [len(k) for k in PREFIXES]
if key_lengths and not all(k == key_lengths[0] for k in key_lengths):
raise ImproperlyConfigured("All LIPPUKALA_PREFIXES keys must be the same length!")
for prefix in PREFIXES:
if not all(c in digits for c in prefix):
raise ImproperlyConfigured(
f"The prefix {prefix!r} has invalid characters. Only digits are allowed."
)
for prefix, literate_keyspace in list(LITERATE_KEYSPACES.items()):
if isinstance(literate_keyspace, str):
raise ImproperlyConfigured(
f"A string ({literate_keyspace!r}) was passed as the literate keyspace for prefix {prefix!r}"
)
too_short_keys = any(len(key) <= 1 for key in literate_keyspace)
maybe_duplicate = len(set(literate_keyspace)) != len(literate_keyspace)
if too_short_keys or maybe_duplicate:
raise ImproperlyConfigured(
f"The literate keyspace for prefix {prefix!r} has invalid or duplicate entries."
)
def _validate_print():
if PRINT_LOGO_PATH:
if not os.path.isfile(PRINT_LOGO_PATH):
raise ImproperlyConfigured(
f"PRINT_LOGO_PATH was defined, but does not exist ({PRINT_LOGO_PATH!r})"
)
if not all(float(s) > 0 for s in PRINT_LOGO_SIZE_CM):
raise ImproperlyConfigured(
f"PRINT_LOGO_SIZE_CM values not valid: {PRINT_LOGO_SIZE_CM!r}"
)
validate_settings()
del validate_settings # aaaand it's gone
|
{
"content_hash": "bf96b2af70ab4d8b8b2bf9baa7e76bcf",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 109,
"avg_line_length": 36.05882352941177,
"alnum_prop": 0.6443719412724307,
"repo_name": "kcsry/lippukala",
"id": "fbba471e281e2ef7bdcfe9abf5514da83e66a091",
"size": "3065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lippukala/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6067"
},
{
"name": "Python",
"bytes": "32790"
}
],
"symlink_target": ""
}
|
from Child import Child
from Node import Node # noqa: I201
ATTRIBUTE_NODES = [
# token-list -> token token-list?
Node('TokenList', kind='SyntaxCollection',
element='Token'),
# attribute -> '@' identifier '('? token-list ')'?
Node('Attribute', kind='Syntax',
children=[
Child('AtSignToken', kind='AtSignToken'),
Child('AttributeName', kind='Token'),
# FIXME: more structure
Child('BalancedTokens', kind='TokenList'),
]),
# attribute-list -> attribute attribute-list?
Node('AttributeList', kind='SyntaxCollection',
element='Attribute'),
]
|
{
"content_hash": "f3ef58bcda6d2f027b759c93bab9c8a4",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 55,
"avg_line_length": 31.095238095238095,
"alnum_prop": 0.5865237366003063,
"repo_name": "frootloops/swift",
"id": "6825b5e235f2a18d4e1330a40fba01da9a01468a",
"size": "653",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "utils/gyb_syntax_support/AttributeNodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "34"
},
{
"name": "C",
"bytes": "71563"
},
{
"name": "C++",
"bytes": "26067180"
},
{
"name": "CMake",
"bytes": "386418"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2438"
},
{
"name": "Emacs Lisp",
"bytes": "57055"
},
{
"name": "LLVM",
"bytes": "62046"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "333187"
},
{
"name": "Objective-C++",
"bytes": "200829"
},
{
"name": "Perl",
"bytes": "2211"
},
{
"name": "Python",
"bytes": "1018108"
},
{
"name": "Ruby",
"bytes": "2091"
},
{
"name": "Shell",
"bytes": "198717"
},
{
"name": "Swift",
"bytes": "21669370"
},
{
"name": "Vim script",
"bytes": "15610"
}
],
"symlink_target": ""
}
|
from test.ditestcase import DITestCase
from mock import Mock, patch
from os.path import expanduser
class ConfigurationTests(DITestCase):
def setUp(self):
super(ConfigurationTests, self).setUp()
self.patchers = {
'configparser': patch('scrolls.configuration.configparser'),
'os': patch('scrolls.configuration.os'),
}
configparser = self.patchers['configparser'].start()
self.os = self.patchers['os'].start()
self.os.path.isfile.return_value = False # default: no conf file
self.os.path.expanduser = expanduser
self.parser = Mock()
self.parser.sections.return_value = ['scrolls']
configparser.ConfigParser.return_value = self.parser
def tearDown(self):
super(ConfigurationTests, self).tearDown()
for patcher in self.patchers.values():
patcher.stop()
def test_defaults(self):
from scrolls.configuration import Configuration
config = Configuration(self.dependencies)
self.assertEqual(config.server, '0.0.0.0')
def test_useCommandlineArgs_overrides_defaults_and_config_file(self):
from scrolls.configuration import Configuration
config = Configuration(self.dependencies)
self.assertEqual(config.server, '0.0.0.0')
self.assertEqual(config.dry_run, False)
self.parser.get.side_effect = lambda s, k: 'somewhere.else'
args = Mock()
args.server = 'remote.com'
args.dry_run = True
config.useCommandlineArgs(args)
self.assertEqual(config.server, 'remote.com')
self.assertEqual(config.dry_run, True)
def test_If_config_file_reads_it(self):
from scrolls.configuration import Configuration
self.os.path.isfile.return_value = True
Configuration(self.dependencies)
self.parser.read.assert_called_with(expanduser('~/scrolls.conf'))
def test_Uses_values_in_file(self):
from scrolls.configuration import Configuration
self.os.path.isfile.return_value = True
self.parser.getboolean.side_effect = lambda s, k: True
self.parser.get.side_effect = lambda s, k: 'mothership'
config = Configuration(self.dependencies)
self.assertEqual('mothership', config.ticket_secret)
def test_selectApplications(self):
from scrolls.configuration import Configuration
config = Configuration(self.dependencies)
pkgs = {'nginx': False, 'mongodb': True}
self.filesys.hasPackage.side_effect = lambda p: pkgs[p]
apps = config.selectApplications()
self.assertEqual(apps, {
'mongodb': '/var/log/mongodb/mongodb.log'
})
self.log.selectedApplication.assert_any_call(
name='mongodb',
logfile='/var/log/mongodb/mongodb.log'
)
pkgs = {'nginx': True, 'mongodb': False}
self.filesys.hasPackage.side_effect = lambda p: pkgs[p]
config = Configuration(self.dependencies)
apps = config.selectApplications()
self.assertEqual(apps, {
'nginx-access': '/var/log/nginx/access.log',
'nginx-error': '/var/log/nginx/error.log',
})
self.log.selectedApplication.assert_any_call(
name='nginx-access',
logfile='/var/log/nginx/access.log'
)
self.log.selectedApplication.assert_any_call(
name='nginx-error',
logfile='/var/log/nginx/error.log'
)
|
{
"content_hash": "01dfe0173e09fa9bf976d11d21448265",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 73,
"avg_line_length": 40.160919540229884,
"alnum_prop": 0.6402404121350888,
"repo_name": "ilogue/scrolls",
"id": "4eaf4889d56f479c07fa6ee6cf49c372111f18fd",
"size": "3494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/configuration_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1497"
},
{
"name": "Python",
"bytes": "81915"
}
],
"symlink_target": ""
}
|
"""Django Template Tags for the socialprofile module"""
|
{
"content_hash": "8d4ca3e5e42ffebf94c5bb3594a7621a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 55,
"avg_line_length": 56,
"alnum_prop": 0.7678571428571429,
"repo_name": "DLRSP/django-sp",
"id": "08d57a26c9f0df54635171b0a4798d697cbca24d",
"size": "56",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/socialprofile/templatetags/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11526"
},
{
"name": "HTML",
"bytes": "20344"
},
{
"name": "JavaScript",
"bytes": "11529"
},
{
"name": "Python",
"bytes": "192665"
}
],
"symlink_target": ""
}
|
"""Library containing utility functions used for Chrome-specific build tasks."""
from __future__ import print_function
import ast
import functools
import glob
import os
import re
import shlex
import shutil
import sys
from chromite.lib import failures_lib
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import osutils
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# Taken from external/gyp.git/pylib.
def _NameValueListToDict(name_value_list):
"""Converts Name-Value list to dictionary.
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = {}
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ProcessShellFlags(defines):
"""Validate and convert a string of shell style flags to a dictionary."""
assert defines is not None
return _NameValueListToDict(shlex.split(defines))
class Conditions(object):
"""Functions that return conditions used to construct Path objects.
Condition functions returned by the public methods have signature
f(gn_args, staging_flags). For descriptions of gn_args and
staging_flags see docstring for StageChromeFromBuildDir().
"""
@classmethod
def _GnSetTo(cls, flag, value, gn_args, _staging_flags):
val = gn_args.get(flag)
return val == value
@classmethod
def _StagingFlagSet(cls, flag, _gn_args, staging_flags):
return flag in staging_flags
@classmethod
def _StagingFlagNotSet(cls, flag, gn_args, staging_flags):
return not cls._StagingFlagSet(flag, gn_args, staging_flags)
@classmethod
def GnSetTo(cls, flag, value):
"""Returns condition that tests a gn flag is set to a value."""
return functools.partial(cls._GnSetTo, flag, value)
@classmethod
def StagingFlagSet(cls, flag):
"""Returns condition that tests a staging_flag is set."""
return functools.partial(cls._StagingFlagSet, flag)
@classmethod
def StagingFlagNotSet(cls, flag):
"""Returns condition that tests a staging_flag is not set."""
return functools.partial(cls._StagingFlagNotSet, flag)
class MultipleMatchError(failures_lib.StepFailure):
"""A glob pattern matches multiple files but a non-dir dest was specified."""
class MissingPathError(failures_lib.StepFailure):
"""An expected path is non-existant."""
class MustNotBeDirError(failures_lib.StepFailure):
"""The specified path should not be a directory, but is."""
class GetRuntimeDepsError(failures_lib.StepFailure):
"""Unable to get runtime deps for a build target."""
class GnIsolateMapFileError(failures_lib.StepFailure):
"""Failed to parse gn isolate map file."""
class Copier(object):
"""File/directory copier.
Provides destination stripping and permission setting functionality.
"""
def __init__(self, strip_bin=None, strip_flags=None, default_mode=0o644,
dir_mode=0o755, exe_mode=0o755):
"""Initialization.
Args:
strip_bin: Path to the program used to strip binaries. If set to None,
binaries will not be stripped.
strip_flags: A list of flags to pass to the |strip_bin| executable.
default_mode: Default permissions to set on files.
dir_mode: Mode to set for directories.
exe_mode: Permissions to set on executables.
"""
self.strip_bin = strip_bin
self.strip_flags = strip_flags
self.default_mode = default_mode
self.dir_mode = dir_mode
self.exe_mode = exe_mode
@staticmethod
def Log(src, dest, directory):
sep = ' [d] -> ' if directory else ' -> '
logging.debug('%s %s %s', src, sep, dest)
def _CopyFile(self, src, dest, path):
"""Perform the copy.
Args:
src: The path of the file/directory to copy.
dest: The exact path of the destination. Does nothing if it already
exists.
path: The Path instance containing copy operation modifiers (such as
Path.exe, Path.strip, etc.)
"""
assert not os.path.isdir(src), '%s: Not expecting a directory!' % src
# This file has already been copied by an earlier Path.
if os.path.exists(dest):
return
osutils.SafeMakedirs(os.path.dirname(dest), mode=self.dir_mode)
if path.exe and self.strip_bin and path.strip and os.path.getsize(src) > 0:
strip_flags = (['--strip-unneeded'] if self.strip_flags is None else
self.strip_flags)
cros_build_lib.dbg_run(
[self.strip_bin] + strip_flags + ['-o', dest, src])
shutil.copystat(src, dest)
else:
shutil.copy2(src, dest)
mode = path.mode
if mode is None:
mode = self.exe_mode if path.exe else self.default_mode
os.chmod(dest, mode)
def Copy(self, src_base, dest_base, path, sloppy=False):
"""Copy artifact(s) from source directory to destination.
Args:
src_base: The directory to apply the src glob pattern match in.
dest_base: The directory to copy matched files to. |Path.dest|.
path: A Path instance that specifies what is to be copied.
sloppy: If set, ignore when mandatory artifacts are missing.
Returns:
A list of the artifacts copied.
"""
copied_paths = []
src = os.path.join(src_base, path.src)
if not src.endswith('/') and os.path.isdir(src):
raise MustNotBeDirError('%s must not be a directory\n'
'Aborting copy...' % (src,))
paths = glob.glob(src)
if not paths:
if path.optional:
logging.debug('%s does not exist and is optional. Skipping.', src)
elif sloppy:
logging.warning('%s does not exist and is required. Skipping anyway.',
src)
else:
msg = ('%s does not exist and is required.\n'
'You can bypass this error with --sloppy.\n'
'Aborting copy...' % src)
raise MissingPathError(msg)
elif len(paths) > 1 and path.dest and not path.dest.endswith('/'):
raise MultipleMatchError(
'Glob pattern %r has multiple matches, but dest %s '
'is not a directory.\n'
'Aborting copy...' % (path.src, path.dest))
else:
for p in paths:
rel_src = os.path.relpath(p, src_base)
if path.IsBlacklisted(rel_src):
continue
if path.dest is None:
rel_dest = rel_src
elif path.dest.endswith('/'):
rel_dest = os.path.join(path.dest, os.path.basename(p))
else:
rel_dest = path.dest
assert not rel_dest.endswith('/')
dest = os.path.join(dest_base, rel_dest)
copied_paths.append(p)
self.Log(p, dest, os.path.isdir(p))
if os.path.isdir(p):
for sub_path in osutils.DirectoryIterator(p):
rel_path = os.path.relpath(sub_path, p)
sub_dest = os.path.join(dest, rel_path)
if path.IsBlacklisted(rel_path):
continue
if sub_path.endswith('/'):
osutils.SafeMakedirs(sub_dest, mode=self.dir_mode)
else:
self._CopyFile(sub_path, sub_dest, path)
else:
self._CopyFile(p, dest, path)
return copied_paths
class Path(object):
"""Represents an artifact to be copied from build dir to staging dir."""
DEFAULT_BLACKLIST = (r'(^|.*/)\.git($|/.*)',)
def __init__(self, src, exe=False, cond=None, dest=None, mode=None,
optional=False, strip=True, blacklist=None):
"""Initializes the object.
Args:
src: The relative path of the artifact. Can be a file or a directory.
Can be a glob pattern.
exe: Identifes the path as either being an executable or containing
executables. Executables may be stripped during copy, and have
special permissions set. We currently only support stripping of
specified files and glob patterns that return files. If |src| is a
directory or contains directories, the content of the directory will
not be stripped.
cond: A condition (see Conditions class) to test for in deciding whether
to process this artifact.
dest: Name to give to the target file/directory. Defaults to keeping the
same name as the source.
mode: The mode to set for the matched files, and the contents of matched
directories.
optional: Whether to enforce the existence of the artifact. If unset, the
script errors out if the artifact does not exist. In 'sloppy'
mode, the Copier class treats all artifacts as optional.
strip: If |exe| is set, whether to strip the executable.
blacklist: A list of path patterns to ignore during the copy. This gets
added to a default blacklist pattern.
"""
self.src = src
self.exe = exe
self.cond = cond
self.dest = dest
self.mode = mode
self.optional = optional
self.strip = strip
self.blacklist = self.DEFAULT_BLACKLIST
if blacklist is not None:
self.blacklist += tuple(blacklist)
def IsBlacklisted(self, path):
"""Returns whether |path| is in the blacklist.
A file in the blacklist is not copied over to the staging directory.
Args:
path: The path of a file, relative to the path of this Path object.
"""
for pattern in self.blacklist:
if re.match(pattern, path):
return True
return False
def ShouldProcess(self, gn_args, staging_flags):
"""Tests whether this artifact should be copied."""
if not gn_args and not staging_flags:
return True
if self.cond and isinstance(self.cond, list):
for c in self.cond:
if not c(gn_args, staging_flags):
return False
elif self.cond:
return self.cond(gn_args, staging_flags)
return True
_ENABLE_NACL = 'enable_nacl'
_IS_CHROME_BRANDED = 'is_chrome_branded'
_IS_COMPONENT_BUILD = 'is_component_build'
_HIGHDPI_FLAG = 'highdpi'
STAGING_FLAGS = (
_HIGHDPI_FLAG,
)
C = Conditions
# In the below Path lists, if two Paths both match a file, the earlier Path
# takes precedence.
# Files shared between all deployment types.
_COPY_PATHS_COMMON = (
# Copying icudtl.dat has to be optional because in CROS, icudtl.dat will
# be installed by the package "chrome-icu", and icudtl.dat in chrome is
# deleted in the chromeos-chrome ebuild. But we can not delete this line
# totally because chromite/deloy_chrome is used outside of ebuild
# (see https://crbug.com/1081884).
Path('icudtl.dat', optional=True),
Path('libosmesa.so', exe=True, optional=True),
# Do not strip the nacl_helper_bootstrap binary because the binutils
# objcopy/strip mangles the ELF program headers.
Path('nacl_helper_bootstrap',
exe=True,
strip=False,
cond=C.GnSetTo(_ENABLE_NACL, True)),
Path('nacl_irt_*.nexe', cond=C.GnSetTo(_ENABLE_NACL, True)),
Path('nacl_helper',
exe=True,
optional=True,
cond=C.GnSetTo(_ENABLE_NACL, True)),
Path('nacl_helper_nonsfi',
exe=True,
optional=True,
cond=C.GnSetTo(_ENABLE_NACL, True)),
Path('natives_blob.bin', optional=True),
Path('pnacl/', cond=C.GnSetTo(_ENABLE_NACL, True)),
Path('snapshot_blob.bin', optional=True),
)
_COPY_PATHS_APP_SHELL = (
Path('app_shell', exe=True),
Path('extensions_shell_and_test.pak'),
) + _COPY_PATHS_COMMON
_COPY_PATHS_CHROME = (
Path('chrome', exe=True),
Path('chrome-wrapper'),
Path('chrome_100_percent.pak'),
Path('chrome_200_percent.pak', cond=C.StagingFlagSet(_HIGHDPI_FLAG)),
# TODO(jperaza): make the handler required when Crashpad is enabled.
Path('crashpad_handler', exe=True, optional=True),
Path('dbus/', optional=True),
Path('keyboard_resources.pak'),
Path('libassistant.so', exe=True, optional=True),
Path('libmojo_core.so', exe=True),
# The ARC++ mojo_core libraries are pre-stripped and don't play well with
# the binutils stripping tools, hence stripping is disabled here.
Path('libmojo_core_arc32.so', exe=True, strip=False),
Path('libmojo_core_arc64.so', exe=True, strip=False),
# Widevine CDM is already pre-stripped. In addition, it doesn't
# play well with the binutils stripping tools, so skip stripping.
# Optional for arm64 builds (http://crbug.com/881022)
Path('libwidevinecdm.so',
exe=True,
strip=False,
cond=C.GnSetTo(_IS_CHROME_BRANDED, True),
optional=C.GnSetTo('target_cpu', 'arm64')),
# In component build, copy so files (e.g. libbase.so) except for the
# blacklist.
Path('*.so',
blacklist=(r'libwidevinecdm.so',),
exe=True,
cond=C.GnSetTo(_IS_COMPONENT_BUILD, True)),
Path('locales/*.pak', optional=True),
Path('locales/*.pak.gz', optional=True),
Path('Packages/chrome_content_browser/manifest.json', optional=True),
Path('Packages/chrome_content_gpu/manifest.json', optional=True),
Path('Packages/chrome_content_plugin/manifest.json', optional=True),
Path('Packages/chrome_content_renderer/manifest.json', optional=True),
Path('Packages/chrome_content_utility/manifest.json', optional=True),
Path('Packages/chrome_mash/manifest.json', optional=True),
Path('Packages/chrome_mash_content_browser/manifest.json', optional=True),
Path('Packages/content_browser/manifest.json', optional=True),
Path('resources/chromeos/'),
Path('resources.pak'),
Path('xdg-settings'),
Path('*.png'),
) + _COPY_PATHS_COMMON
_COPY_PATHS_MAP = {
'app_shell': _COPY_PATHS_APP_SHELL,
'chrome': _COPY_PATHS_CHROME,
}
def _FixPermissions(dest_base):
"""Last minute permission fixes."""
cros_build_lib.dbg_run(['chmod', '-R', 'a+r', dest_base])
cros_build_lib.dbg_run(
['find', dest_base, '-perm', '/110', '-exec', 'chmod', 'a+x', '{}', '+'])
def GetCopyPaths(deployment_type='chrome'):
"""Returns the list of copy paths used as a filter for staging files.
Args:
deployment_type: String describing the deployment type. Either "app_shell"
or "chrome".
Returns:
The list of paths to use as a filter for staging files.
"""
paths = _COPY_PATHS_MAP.get(deployment_type)
if paths is None:
raise RuntimeError('Invalid deployment type "%s"' % deployment_type)
return paths
def _GetGnLabel(build_dir, build_target):
"""Gets the gn label for a build target in a build dir.
Args:
build_dir: The build output directory.
build_target: The build target whose gn label to be returned.
Returns:
Gn label for the build target as a string.
"""
src_dir = os.path.dirname(os.path.dirname(build_dir))
# Look up gn label from testing/buildbot/gn_isolate_map.pyl, which contains
# a mapping of build targets to GN labels. This is faster than extracting the
# gn label from "gn ls" output.
isolate_map_file = os.path.join(src_dir, 'testing', 'buildbot',
'gn_isolate_map.pyl')
try:
isolate_map = ast.literal_eval(osutils.ReadFile(isolate_map_file))
except SyntaxError as e:
raise GnIsolateMapFileError(
'Failed to parse isolate map file "%s": %s' % (isolate_map_file, e))
if not build_target in isolate_map:
raise GnIsolateMapFileError(
'Target %s not found in %s' % (build_target, isolate_map_file))
gn_label = isolate_map[build_target]['label']
assert gn_label.startswith('//')
return gn_label
def GetChromeRuntimeDeps(build_dir, build_target):
"""Returns a list of runtime deps files for the given build target.
Args:
build_dir: The build output directory.
build_target: A chrome build target.
Returns:
The list of runtime deps files for |build_target| relative to two levels up
|build_dir|, i.e. chrome src dir.
"""
gn_label = _GetGnLabel(build_dir, build_target)
# Runtime deps files are generated for test executables of ChromeOS build
# inside SDK env in testing/test.gni.
# https://cs.chromium.org/chromium/src/testing/test.gni?rcl=9b95cd58&l=336
# Check out test.gni if the file is missing and the slow "gn desc" code path
# is used.
generated_runtime_deps_file = os.path.join(build_dir,
'gen.runtime',
gn_label.split(':')[0].lstrip('/'),
build_target,
build_target + '.runtime_deps')
runtime_deps = None
if not os.path.exists(generated_runtime_deps_file):
logging.warning('Unable to find generated runtime deps file: %s',
generated_runtime_deps_file)
else:
runtime_deps = osutils.ReadFile(generated_runtime_deps_file).splitlines()
if not runtime_deps:
result = cros_build_lib.run(['gn', 'desc', build_dir, gn_label,
'runtime_deps'],
capture_output=True, encoding='utf-8')
if result.returncode != 0:
raise GetRuntimeDepsError('Failed to get runtime deps for: %s' %
build_target)
runtime_deps = result.output.splitlines()
# |runtime_deps| is relative to |build_dir|. Make them relative to |src_dir|.
src_dir = os.path.dirname(os.path.dirname(build_dir))
rebased_runtime_deps = []
for f in runtime_deps:
rebased = os.path.relpath(os.path.abspath(os.path.join(build_dir, f)),
src_dir)
# Dirs from a "data" rule in gn file do not have trailing '/' in runtime
# deps. Ensures such dirs are ended with a trailing '/'.
if os.path.isdir(rebased) and not rebased.endswith('/'):
rebased += '/'
rebased_runtime_deps.append(rebased)
return rebased_runtime_deps
def GetChromeTestCopyPaths(build_dir, test_target):
"""Returns the list of copy paths for the given chrome test target.
Args:
build_dir: The build output directory that |runtime_deps| is relative to.
test_target: A build target defined in //chrome/test/BUILD.gn
Returns:
The list of paths to stage for |runtime_deps| relative to two levels up
|build_dir|, i.e. chrome src dir.
"""
# Black list of file patterns for files in the runtime deps of the test target
# but are not really needed to run the test. Keep sync with the list in
# build/chromeos/test_runner.py in chromium code.
_BLACKLIST = [
re.compile(r'.*build/android.*'),
re.compile(r'.*build/chromeos.*'),
re.compile(r'.*build/cros_cache.*'),
re.compile(r'.*testing/(?!buildbot/filters).*'),
re.compile(r'.*third_party/chromite.*'),
re.compile(r'.*tools/swarming_client.*'),
]
src_dir = os.path.dirname(os.path.dirname(build_dir))
copy_paths = []
for f in GetChromeRuntimeDeps(build_dir, test_target):
if not any(regex.match(f) for regex in _BLACKLIST):
local_path = os.path.join(src_dir, f)
is_exe = os.path.isfile(local_path) and os.access(local_path, os.X_OK)
copy_paths.append(Path(f, exe=is_exe))
return copy_paths
def StageChromeFromBuildDir(staging_dir, build_dir, strip_bin, sloppy=False,
gn_args=None, staging_flags=None,
strip_flags=None, copy_paths=_COPY_PATHS_CHROME):
"""Populates a staging directory with necessary build artifacts.
If |gn_args| or |staging_flags| are set, then we decide what to stage
based on the flag values. Otherwise, we stage everything that we know
about that we can find.
Args:
staging_dir: Path to an empty staging directory.
build_dir: Path to location of Chrome build artifacts.
strip_bin: Path to executable used for stripping binaries.
sloppy: Ignore when mandatory artifacts are missing.
gn_args: A dictionary of args.gn valuses that Chrome was built with.
staging_flags: A list of extra staging flags. Valid flags are specified in
STAGING_FLAGS.
strip_flags: A list of flags to pass to the tool used to strip binaries.
copy_paths: The list of paths to use as a filter for staging files.
"""
os.mkdir(os.path.join(staging_dir, 'plugins'), 0o755)
if gn_args is None:
gn_args = {}
if staging_flags is None:
staging_flags = []
copier = Copier(strip_bin=strip_bin, strip_flags=strip_flags)
copied_paths = []
for p in copy_paths:
if p.ShouldProcess(gn_args, staging_flags):
copied_paths += copier.Copy(build_dir, staging_dir, p, sloppy=sloppy)
if not copied_paths:
raise MissingPathError("Couldn't find anything to copy!\n"
'Are you looking in the right directory?\n'
'Aborting copy...')
_FixPermissions(staging_dir)
|
{
"content_hash": "273b63b5a4be42266cfd137ae5a403e6",
"timestamp": "",
"source": "github",
"line_count": 589,
"max_line_length": 80,
"avg_line_length": 36.10016977928693,
"alnum_prop": 0.6511781028076942,
"repo_name": "endlessm/chromium-browser",
"id": "ca024a51aa04376e7796e5df88d6db4b657f7208",
"size": "21457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/lib/chrome_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='JissRenderingService',
description='Service for operations on files.',
version='1.0',
author='Anton Iskov',
author_email='aiskov@jiss-software.com',
url='http://www.jiss-software.com',
packages=[
'core',
'handler',
'utils'
],
install_requires=[
'tornado==4.2.1',
'Pillow',
'bson'
]
)
|
{
"content_hash": "5728aa4d7d7fe8886af56595a5972e55",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 51,
"avg_line_length": 20.75,
"alnum_prop": 0.5614457831325301,
"repo_name": "jiss-software/jiss-rendering-service",
"id": "e650a17e2548277594ffeee879f549c9837d382d",
"size": "438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17262"
},
{
"name": "Shell",
"bytes": "272"
}
],
"symlink_target": ""
}
|
"""Test node responses to invalid locators.
"""
from test_framework.messages import msg_getheaders, msg_getblocks, MAX_LOCATOR_SZ
from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
class InvalidLocatorTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0] # convenience reference to the node
node.generatetoaddress(1, node.get_deterministic_priv_key().address) # Get node out of IBD
self.log.info('Test max locator size')
block_count = node.getblockcount()
for msg in [msg_getheaders(), msg_getblocks()]:
self.log.info('Wait for disconnect when sending {} hashes in locator'.format(MAX_LOCATOR_SZ + 1))
node.add_p2p_connection(P2PInterface())
msg.locator.vHave = [int(node.getblockhash(i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ + 1), -1)]
node.p2p.send_message(msg)
node.p2p.wait_for_disconnect()
node.disconnect_p2ps()
self.log.info('Wait for response when sending {} hashes in locator'.format(MAX_LOCATOR_SZ))
node.add_p2p_connection(P2PInterface())
msg.locator.vHave = [int(node.getblockhash(i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ), -1)]
node.p2p.send_message(msg)
if type(msg) == msg_getheaders:
node.p2p.wait_for_header(int(node.getbestblockhash(), 16))
else:
node.p2p.wait_for_block(int(node.getbestblockhash(), 16))
if __name__ == '__main__':
InvalidLocatorTest().main()
|
{
"content_hash": "00905869cc3dcce2093af196693397a4",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 135,
"avg_line_length": 44.69230769230769,
"alnum_prop": 0.6431440045897877,
"repo_name": "UdjinM6/dash",
"id": "c8c752d1f7cbd59e82cbeccb83356f9f03152fda",
"size": "1957",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "test/functional/p2p_invalid_locator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1866352"
},
{
"name": "C++",
"bytes": "9729795"
},
{
"name": "CMake",
"bytes": "32255"
},
{
"name": "CSS",
"bytes": "113028"
},
{
"name": "Dockerfile",
"bytes": "6344"
},
{
"name": "GDB",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "M4",
"bytes": "235904"
},
{
"name": "Makefile",
"bytes": "128711"
},
{
"name": "Objective-C++",
"bytes": "5478"
},
{
"name": "Python",
"bytes": "1899906"
},
{
"name": "QMake",
"bytes": "1389"
},
{
"name": "Sage",
"bytes": "39795"
},
{
"name": "Shell",
"bytes": "134642"
}
],
"symlink_target": ""
}
|
"""This module implements the :class:`CropObjectClass`, which
represents one possible :class:`CropObject` class, such as
a notehead or a time signature. Aside from defining the "vocabulary"
of available object classes for annotation, it also contains
some information about how objects of the given class should
be displayed in the MUSCIMarker annotation software (ordering
related object classes together in menus, implementing a sensible
color scheme, etc.). There is nothing interesting about this class,
we pulled it into the ``muscima`` package because the object
grammar (i.e. which relationships are allowed and which are not)
depends on having CropObjectClass object as its "vocabulary",
and you will probably want to manipulate the data somehow based
on the objects' relationships (like reassembling notes from notation
primitives: notehead plus stem plus flags...), and the grammar
file is a reference for doing that.
CropObjectClass is a plain old data class, nothing interesting
about it. The only catch is that colors for rendering
in MUSCIMarker are kept as a ``#RRGGBB`` string in the XML
file, but represented in the ``CropObjectClass.color`` attribute
as a triplet of floats between 0 (``00``) and 255 (``ff``).
The ``___str__()`` method of the class will output the correct
XML representation.
**XML example**
This is what a single CropObjectClass element might look like::
<CropObjectClass>
<Id>1</Id>
<Name>notehead-empty</Name>
<GroupName>note-primitive/notehead-empty</GroupName>
<Color>#FF7566</Color>
</CropObjectClass>
See e.g. ``test/test_data/mff-muscima-classes-annot.xml``,
which is incidentally the real CropObjectClass list used
for annotating MUSCIMA++.
"""
from __future__ import division
from builtins import object
import logging
from typing import Tuple
__version__ = "1.0"
__author__ = "Jan Hajic jr."
#######################################################################
class CropObjectClass(object):
"""Information about the annotation class. We're using it
mostly to get the color of rendered CropObjects.
CropObjectClass is a Plain Old Data class, there is no other
functionality beyond simply existing and writing itself
out in the appropriate XML format.
"""
def __init__(self, clsid, name, group_name, color):
# type: (int, str, str, str) -> None
self.clsid = clsid
self.name = name
self.group_name = group_name
# Parse the string into a RGB spec.
r, g, b = hex2rgb(color)
logging.debug('CropObjectClass {0}: color {1}'.format(name, (r, g, b)))
self.color = (r, g, b)
def __str__(self):
lines = []
lines.append('<CropObjectClass>')
lines.append('\t<Id>{0}</Id>'.format(self.clsid))
lines.append('\t<Name>{0}</Name>'.format(self.name))
lines.append('\t<GroupName>{0}</GroupName>'.format(self.group_name))
lines.append('\t<Color>{0}</Color>'.format(rgb2hex(self.color)))
lines.append('\t</CropObjectClass>')
return '\n'.join(lines)
#######################################################################
# Utility functions for name/writer conversions
_hex_tr = {
'0': 0,
'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7,
'8': 8, '9': 9,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15,
}
_hex_itr = {v: k for k, v in list(_hex_tr.items())}
def parse_hex(hstr):
# type: (str) -> int
"""Convert a hexadecimal number string to integer.
>>> parse_hex('33')
51
>>> parse_hex('abe8')
44008
"""
out = 0
for i, l in enumerate(reversed(hstr)):
out += (16**i) * _hex_tr[l]
return out
def hex2rgb(hstr):
# type: (str) -> Tuple[float, float, float]
"""Parse a hex-coded color like '#AA0202' into a floating-point representation.
>>> hex2rgb('#abe822')
(0.6705882352941176, 0.9098039215686274, 0.13333333333333333)
"""
if hstr.startswith('#'):
hstr = hstr[1:]
rs, gs, bs = hstr[:2], hstr[2:4], hstr[4:]
r, g, b = parse_hex(rs), parse_hex(gs), parse_hex(bs)
return r / 255.0, g / 255.0, b / 255.0
def rgb2hex(rgb):
# type: (Tuple[float, float, float]) -> str
"""Convert a floating-point representation of R, G, B values
between 0 and 1 (inclusive) to a hex string (strating with a
hashmark). Will use uppercase letters for 10 - 15.
>>> rgb = (0.6705882352941176, 0.9098039215686274, 0.13333333333333333)
>>> rgb2hex(rgb)
'#ABE822'
"""
rgb_int = [int(ch * 255) for ch in rgb]
return '#' + ''.join(['{:02X}'.format(ch) for ch in rgb_int])
|
{
"content_hash": "87f25b910e41ff3e0db48d0c7bfe2c20",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 83,
"avg_line_length": 34.43065693430657,
"alnum_prop": 0.6277294890820436,
"repo_name": "hajicj/muscima",
"id": "83d725cc35ec5a081e3a23a20c7ec1c11d2ed1c9",
"size": "4717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "muscima/cropobject_class.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "515599"
},
{
"name": "Shell",
"bytes": "1087"
}
],
"symlink_target": ""
}
|
import maya.cmds as mc
import pymel.core as pm
# appleseedMaya imports.
from appleseedMaya.logger import logger
class AEappleseedNodeTemplate(pm.ui.AETemplate):
def __init__(self, nodeName):
super(AEappleseedNodeTemplate, self).__init__(nodeName)
self.buildBody(nodeName)
logger.debug('Built custom appleseed AETemplate.')
def __buildVisibilitySection(self):
self.beginLayout('Visibility', collapse=1)
self.addControl('asVisibilityCamera', label='Camera')
self.addControl('asVisibilityLight', label='Light')
self.addControl('asVisibilityShadow', label='Shadow')
self.addControl('asVisibilityDiffuse', label='Diffuse')
self.addControl('asVisibilitySpecular', label='Specular')
self.addControl('asVisibilityGlossy', label='Glossy')
self.endLayout()
@staticmethod
def meshAlphaMapCreateNew(node, attr):
logger.debug("Alpha Map create new: %s.%s" % (node, attr))
alphaMap = mc.createNode("appleseedAlphaMap")
mc.connectAttr(alphaMap + ".shape", node + '.' + attr)
def __meshAlphaCreateNewPyCmd(self, attr):
(nodeName, attrName) = attr.split('.')
thisClass = 'appleseedMaya.AETemplates.AEappleseedNodeTemplate'
return "%s.meshAlphaMapCreateNew('%s', '%s')" % (thisClass, nodeName, attrName)
def meshAlphaMapNew(self, attr):
py_cmd = self.__meshAlphaCreateNewPyCmd(attr)
pm.attrNavigationControlGrp(
'asAlphaMap',
label='Alpha Map',
createNew='python("%s")' % py_cmd,
at=attr)
def meshAlphaMapUpdate(self, attr):
py_cmd = self.__meshAlphaCreateNewPyCmd(attr)
pm.attrNavigationControlGrp(
'asAlphaMap',
edit=True,
createNew='python("%s")' % py_cmd,
at=attr)
def buildBody(self, nodeName):
self.thisNode = pm.PyNode(nodeName)
if self.thisNode.type() == 'areaLight':
self.beginLayout('appleseed', collapse=1)
self.addControl('asIntensityScale', label='Intensity Scale')
self.addControl('asExposure', label='Exposure')
self.addSeparator()
self.addControl('asNormalize', label='Normalize')
self.addSeparator()
self.__buildVisibilitySection()
self.endLayout()
elif self.thisNode.type() in {'pointLight', 'spotLight', 'directionalLight'}:
self.beginLayout('appleseed', collapse=1)
self.addControl('asCastIndirectLight', label='Cast Indirect Light')
self.addSeparator()
self.__buildVisibilitySection()
self.endLayout()
elif self.thisNode.type() == 'bump2d':
self.beginLayout('appleseed', collapse=1)
self.addControl('asNormalMapMode', label='Map Mode')
self.addSeparator()
self.addControl('asNormalMapFlipR', label='Flip Red Channel')
self.addControl('asNormalMapFlipG', label='Flip Green Channel')
self.addControl('asNormalMapSwapRG', label='Swap R/G Channels')
self.endLayout()
elif self.thisNode.type() == 'camera':
self.beginLayout('appleseed', collapse=1)
self.addControl('asAutofocus', label='Enable Autofocus')
self.addControl('asHorizontalTarget', label='Horizontal Target')
self.addControl('asVerticalTarget', label='Vertical Target')
self.endLayout()
elif self.thisNode.type() == 'mesh':
self.beginLayout('appleseed', collapse=1)
self.__buildVisibilitySection()
self.beginLayout('Alpha Map', collapse=1)
self.callCustom(
self.meshAlphaMapNew, self.meshAlphaMapUpdate, 'asAlphaMap')
self.endLayout()
self.beginLayout('Rendering', collapse=1)
self.addControl('asMediumPriority', label='Medium Priority')
self.addSeparator()
self.addControl('asSubsurfaceSet', label='SSS Set')
self.addSeparator()
self.addControl('asIsPhotonTarget', label='SPPM Photon Target')
self.addSeparator()
self.addControl('asShadowTerminatorCorrection', label='Shadow Terminator Fix')
self.endLayout()
self.beginLayout('Export', collapse=1)
self.addControl('asExportUVs', label='Export UVs')
self.addControl('asExportNormals', label='Export Normals')
self.addControl('asSmoothTangents', label='Smooth Tangents')
self.endLayout()
self.endLayout()
elif self.thisNode.type() == 'shadingEngine':
self.beginLayout('appleseed', collapse=1)
self.addControl('asDoubleSided', label='Double Sided')
self.addSeparator()
self.addControl('asShadingSamples', label='Shading Samples')
self.endLayout()
def appleseedAETemplateCallback(nodeName):
AEappleseedNodeTemplate(nodeName)
|
{
"content_hash": "14a5a7a1e5a32632552daba51e54f9d7",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 90,
"avg_line_length": 40.216,
"alnum_prop": 0.6246270141237319,
"repo_name": "dictoon/appleseed-maya",
"id": "cab47effd444022671b9f6b3a962c34154200ede",
"size": "6335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/appleseedMaya/AETemplates/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17782"
},
{
"name": "C++",
"bytes": "631253"
},
{
"name": "CMake",
"bytes": "29880"
},
{
"name": "Python",
"bytes": "165826"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('enhance', '0018_auto_20170306_0714'),
]
operations = [
migrations.CreateModel(
name='DiffTokenVectorElement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('frequency', models.IntegerField(default=0)),
('dic_token_index', models.IntegerField(default=0)),
('tfidf', models.FloatField(default=0.0)),
('dic_token', models.ForeignKey(related_name='diff_token_vector_elements', to='enhance.DictToken')),
('dictionary', models.ForeignKey(default=None, blank=True, to='enhance.Dictionary', null=True, db_index=False)),
('script_diff', models.ForeignKey(related_name='diff_token_vector_elements', to='enhance.ScriptDiff')),
],
options={
},
bases=(models.Model,),
),
]
|
{
"content_hash": "140288faa0fe3f16f1b9ccdfd8697eee",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 128,
"avg_line_length": 38.964285714285715,
"alnum_prop": 0.5866177818515124,
"repo_name": "nanchenchen/script-analysis",
"id": "be5c9fbf24994c9c49c6f3fb49cb7c81c4e8ca45",
"size": "1115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyanalysis/apps/enhance/migrations/0019_difftokenvectorelement.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53354"
},
{
"name": "HTML",
"bytes": "92901"
},
{
"name": "JavaScript",
"bytes": "286195"
},
{
"name": "Python",
"bytes": "202204"
},
{
"name": "Ruby",
"bytes": "8396"
},
{
"name": "Shell",
"bytes": "13182"
}
],
"symlink_target": ""
}
|
"""Collect Treadmill node information after a crash.
"""
import os
import glob
import logging
import shutil
import tempfile
from treadmill import fs
from treadmill import subproc
_LOGGER = logging.getLogger(__name__)
_IFCONFIG = 'ifconfig'
_SYSCTL = 'sysctl'
_DMESG = 'dmesg'
_TAIL = 'tail'
def _safe_copy(src, dest):
"""Copy file from src to dest if need, generate sub directory for dest"""
parent = os.path.dirname(dest)
if not os.path.exists(parent):
os.makedirs(parent)
try:
shutil.copyfile(src, dest)
_LOGGER.debug('file copied %s => %s', src, dest)
except OSError:
_LOGGER.exception('unable to copy %s => %s', src, dest)
def collect(approot, archive_filename):
"""Collect node information in case of blackout.
:param approot:
treadmill root, usually /var/tmp/treadmill
:type approot:
``str``
:param archive_filename:
archive path file
:type archive_filename:
``str``
"""
destroot = tempfile.mkdtemp()
_LOGGER.info('save node info in %s', destroot)
collect_init_services(approot, destroot)
collect_running_app(approot, destroot)
collect_sysctl(destroot)
collect_cgroup(approot, destroot)
collect_localdisk(approot, destroot)
collect_network(approot, destroot)
collect_message(destroot)
try:
archive_filename = fs.tar(sources=destroot,
target=archive_filename,
compression='gzip').name
_LOGGER.info('node info archive file: %s', archive_filename)
shutil.rmtree(destroot)
return archive_filename
except: # pylint: disable=W0702
# if tar bar is not generated successfully, we keep destroot
# we can find destroot path in log to check the files
_LOGGER.exception('Failed to generate node info archive')
return None
def collect_init_services(approot, destroot):
"""Get treadmill init services information in node."""
pattern = '%s/init/*/log/current' % approot
for current in glob.glob(pattern):
target = '%s%s' % (destroot, current)
_safe_copy(current, target)
def collect_running_app(approot, destroot):
"""Get treadmill running application information in node."""
pattern = '%s/running/*/run.*' % approot
for run_log in glob.glob(pattern):
target = '%s%s' % (destroot, run_log)
_safe_copy(run_log, target)
pattern = '%s/running/*/sys/*/log/current' % approot
for current in glob.glob(pattern):
target = '%s%s' % (destroot, current)
_safe_copy(current, target)
def collect_sysctl(destroot):
"""Get host sysctl (related to kernel)."""
sysctl = subproc.check_output([_SYSCTL, '-a'])
with open('%s/sysctl' % destroot, 'w+') as f:
f.write(sysctl)
def collect_cgroup(approot, destroot):
"""Get host treadmill cgroups inforamation."""
src = "%s/cgroup_svc" % approot
dest = "%s%s" % (destroot, src)
try:
shutil.copytree(src, dest)
except (shutil.Error, OSError):
_LOGGER.exception('fail to copy %s => %s', src, dest)
pattern = '/cgroup/*/treadmill/core'
for cgrp_core in glob.glob(pattern):
core_dest = '%s%s' % (destroot, cgrp_core)
try:
shutil.copytree(cgrp_core, core_dest)
except (shutil.Error, OSError):
_LOGGER.exception('fail to copy %s => %s', src, dest)
def collect_localdisk(approot, destroot):
"""Get host local disk information."""
src = '%s/localdisk_svc' % approot
dest = '%s%s' % (destroot, src)
try:
shutil.copytree(src, dest)
except (shutil.Error, OSError):
_LOGGER.exception('fail to copy %s => %s', src, dest)
# FIXME vgdisplay requires root
def collect_network(approot, destroot):
"""Get host network information."""
src = '%s/network_svc' % approot
dest = '%s%s' % (destroot, src)
try:
shutil.copytree(src, dest)
except (shutil.Error, OSError):
_LOGGER.exception('fail to copy %s => %s', src, dest)
ifconfig = subproc.check_output([_IFCONFIG])
with open('%s/ifconfig' % destroot, 'w') as f:
f.write(ifconfig)
def collect_message(destroot):
"""Get messages on the host."""
dmesg = subproc.check_output([_DMESG])
with open('%s/dmesg' % destroot, 'w') as f:
f.write(dmesg)
messages = subproc.check_output(
[_TAIL, '-n', '100', '/var/log/messages']
)
dest_messages = '%s/var/log/messages' % destroot
if not os.path.exists(os.path.dirname(dest_messages)):
os.makedirs(os.path.dirname(dest_messages))
with open(dest_messages, 'w') as f:
f.write(messages)
|
{
"content_hash": "9bbcd63141d52b31ef22bbf362f8cdb9",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 77,
"avg_line_length": 28.226190476190474,
"alnum_prop": 0.6193589202867988,
"repo_name": "gaocegege/treadmill",
"id": "c6f19058fa44b4ecb49d6a3f9175806fcd600239",
"size": "4742",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev/2017-5-18",
"path": "treadmill/postmortem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "63"
},
{
"name": "HTML",
"bytes": "3973869"
},
{
"name": "Python",
"bytes": "2127593"
},
{
"name": "R",
"bytes": "2119"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "41660"
}
],
"symlink_target": ""
}
|
"""Backend objects for saving and loading data
DataStores provide a uniform interface for saving and loading data in different
formats. They should not be used directly, but rather through Dataset objects.
"""
from .common import AbstractDataStore
from .memory import InMemoryDataStore
from .netCDF4_ import NetCDF4DataStore
from .pydap_ import PydapDataStore
from .pynio_ import NioDataStore
from .scipy_ import ScipyDataStore
from .h5netcdf_ import H5NetCDFStore
|
{
"content_hash": "d329dcd4722f0ab9f1be54530779efd4",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 79,
"avg_line_length": 38.833333333333336,
"alnum_prop": 0.8240343347639485,
"repo_name": "NicWayand/xray",
"id": "a082bd53e5ebe9f7a553adb70adb9e07b4509495",
"size": "466",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "xarray/backends/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3148"
},
{
"name": "Python",
"bytes": "954209"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import datetime as pydt
import logging
import json
import geojson as gj
import bson.json_util as bju
import os
# Our imports
import emission.core.get_database as edb
import emission.net.usercache.abstract_usercache as enua
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.pipeline_queries as epq
import emission.core.wrapper.motionactivity as ecwm
import emission.analysis.intake.segmentation.trip_segmentation as eaist
import emission.analysis.intake.segmentation.section_segmentation as eaiss
import emission.analysis.intake.cleaning.location_smoothing as eaicl
import emission.analysis.intake.cleaning.clean_and_resample as eaicr
import emission.analysis.classification.inference.mode.pipeline as eacimp
import emission.analysis.plotting.geojson.geojson_feature_converter as gjfc
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.stop_queries as esdst
import emission.storage.decorations.section_queries as esds
import emission.storage.decorations.timeline as esdtl
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
# Test imports
import emission.tests.common as etc
class TestGeojsonFeatureConverter(unittest.TestCase):
def setUp(self):
self.copied_model_path = etc.copy_dummy_seed_for_inference()
etc.setupRealExample(self, "emission/tests/data/real_examples/shankari_2015-aug-27")
eaicf.filter_accuracy(self.testUUID)
def tearDown(self):
self.clearRelatedDb()
os.remove(self.copied_model_path)
def clearRelatedDb(self):
edb.get_timeseries_db().delete_many({"user_id": self.testUUID})
edb.get_analysis_timeseries_db().delete_many({"user_id": self.testUUID})
def testTripGeojson(self):
eaist.segment_current_trips(self.testUUID)
eaiss.segment_current_sections(self.testUUID)
eaicl.filter_current_sections(self.testUUID)
tl = esdtl.get_raw_timeline(self.testUUID, 1440658800, 1440745200)
self.assertEqual(len(tl.trips), 9)
eaicr.clean_and_resample(self.testUUID)
eacimp.predict_mode(self.testUUID)
tl = esdtl.get_cleaned_timeline(self.testUUID, 1440658800, 1440745200)
tl.fill_start_end_places()
created_trips = tl.trips
self.assertEqual(len(created_trips), 9)
trip_geojson = gjfc.trip_to_geojson(created_trips[0], tl)
logging.debug("first trip_geojson = %s" % bju.dumps(trip_geojson, indent=4))
self.assertEqual(trip_geojson.type, "FeatureCollection")
self.assertEqual(trip_geojson.properties["feature_type"], "trip")
self.assertEqual(len(trip_geojson.features), 5)
day_geojson = gjfc.get_geojson_for_timeline(self.testUUID, tl)
self.assertEqual(len(day_geojson), 8)
self.assertEqual(day_geojson[-1].type, "FeatureCollection")
self.assertEqual(day_geojson[-1].properties["feature_type"], "trip")
self.assertEqual(len(day_geojson[-1].features), 5)
if __name__ == '__main__':
etc.configLogging()
unittest.main()
|
{
"content_hash": "9061bb507f413b7bbb3f92e66d1bb938",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 92,
"avg_line_length": 38.98837209302326,
"alnum_prop": 0.7450044736057262,
"repo_name": "e-mission/e-mission-server",
"id": "f043e198eeb39b2ace872619fa33435d17cbd2e9",
"size": "3353",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "emission/tests/analysisTests/plottingTests/TestGeojsonFeatureConverter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "445"
},
{
"name": "CSS",
"bytes": "97039"
},
{
"name": "Dockerfile",
"bytes": "1326"
},
{
"name": "HTML",
"bytes": "64875"
},
{
"name": "JavaScript",
"bytes": "116761"
},
{
"name": "Jupyter Notebook",
"bytes": "4656584"
},
{
"name": "Python",
"bytes": "2219428"
},
{
"name": "SCSS",
"bytes": "41755"
},
{
"name": "Shell",
"bytes": "11419"
}
],
"symlink_target": ""
}
|
from __future__ import division
from fable \
import unsigned_integer_scan, \
identifier_scan, \
find_closing_parenthesis, \
SemanticError
from fable import tokenization
from fable import intrinsics
from fable import equivalence
from fable import utils
import sys
class Error(Exception): pass
class raise_errors_mixin(object):
__slots__ = []
def text_location(O, i):
sl, i = O.stmt_location(i)
if (i is not None and sl.stmt_offs is not None):
i += sl.stmt_offs
return sl, i
def format_error(O, i, msg, prefix=""):
sl, i = O.stmt_location(i)
from libtbx.str_utils import expandtabs_track_columns
t, js = expandtabs_track_columns(s=sl.text)
if (i is None):
ptr = ""
else:
if (i < 0):
j = -i - 1
t += " " * (j - len(t) + 1)
else:
j = js[sl.stmt_offs + i]
ptr = "\n" + "-"*(3+j) + "^"
if (msg is None): intro = ""
else: intro = "%s:\n at " % msg
result = "%s%s:\n |%s|%s" % (
intro, sl.format_file_name_and_line_number(), t, ptr)
if (prefix is None or prefix == ""):
return result
return "\n".join([(prefix + line).rstrip()
for line in result.splitlines()])
def raise_error(O, msg, i=None, ErrorType=None):
if (ErrorType is None): ErrorType = Error
raise ErrorType(O.format_error(i=i, msg=msg))
def raise_syntax_error(O, i=None):
O.raise_error(msg="Syntax error", i=i)
def raise_syntax_error_or_not_implemented(O, i=None):
O.raise_error(msg="Syntax error or not implemented", i=i)
def raise_semantic_error(O, msg=None, i=None):
O.raise_error(msg=msg, i=i, ErrorType=SemanticError)
def raise_internal_error(O, i=None):
O.raise_error(
msg="Sorry: fable internal error", i=i, ErrorType=AssertionError)
class source_line(raise_errors_mixin):
__slots__ = [
"global_line_index",
"file_name",
"line_number",
"text",
"label",
"stmt",
"stmt_offs",
"is_cont",
"index_of_exclamation_mark"]
def format_file_name_and_line_number(O):
return "%s(%d)" % (O.file_name, O.line_number)
def stmt_location(O, i):
return O, i
def __init__(O, global_line_index_generator, file_name, line_number, text):
O.global_line_index = global_line_index_generator.next()
O.file_name = file_name
O.line_number = line_number
O.text = text
O.label = None
O.stmt = ""
O.stmt_offs = None
i = text.find("\t", 0, 6)
if (i >= 0):
soff = i + 1
O.is_cont = False
l = text[:i].strip()
s = text[soff:72]
else:
soff = 6
c = text[5:6]
O.is_cont = (c != " " and c != "\t" and c != "")
l = text[:5].strip()
s = text[6:72]
if (len(l) == 0):
if (len(s) != 0):
O.stmt = s
O.stmt_offs = soff
else:
i = unsigned_integer_scan(code=l)
if (i < 0 or i != len(l)):
O.is_cont = False
else:
if (O.is_cont):
O.raise_error(
msg="A continuation character is illegal on a line with"
" a statement label",
i=-6)
O.label = l
if (len(s) == 0):
O.raise_error(msg="Labelled statement is empty", i=-7)
O.stmt = s
O.stmt_offs = soff
if (not O.is_cont and len(O.stmt.rstrip()) == 0):
O.stmt_offs = None
O.index_of_exclamation_mark = None
class stripped_source_line(raise_errors_mixin):
__slots__ = [
"source_line_cluster",
"label",
"code0_locations",
"start",
"code",
"strings",
"strings_locs",
"string_indices"]
def __init__(O,
source_line_cluster,
code0_locations,
code,
start,
strings,
strings_locs,
string_indices):
if (source_line_cluster is None):
assert code0_locations is None
else:
assert len(source_line_cluster) != 0
assert len(code0_locations) >= start + len(code)
assert len(strings) == len(string_indices)
O.source_line_cluster = source_line_cluster
O.label = None
for sl in O.source_line_cluster:
if (sl.label is not None):
O.label = sl.label
break
O.code0_locations = code0_locations
O.start = start
O.code = code
O.strings = strings
O.strings_locs = strings_locs
O.string_indices = string_indices
def code_with_strings(O):
result = []
j = 0
for c in O.code:
if (c == "'"):
result.append("'" + O.strings[j].replace("'","''") + "'")
j += 1
else:
result.append(c)
assert j == len(O.strings)
return "".join(result)
def stmt_location(O, i):
if (i is None):
return O.source_line_cluster[0], None
if (i < 0):
return O.source_line_cluster[0], i
return O.code0_locations[O.start + i]
def is_comment(O):
return (O.source_line_cluster[0].stmt_offs is None)
def __getitem__(O, key):
if (isinstance(key, slice)):
start, stop, step = key.indices(len(O.code))
assert step == 1
del step
else:
start = key
stop = key + 1
slice_strings = []
slice_strings_locs = []
slice_string_indices = []
for s,locs,si in zip(O.strings, O.strings_locs, O.string_indices):
if (si < start): continue
if (si >= stop): break
slice_strings.append(s)
slice_strings_locs.append(locs)
slice_string_indices.append(si-start)
return stripped_source_line_slice(
source_line_cluster=O.source_line_cluster,
code0_locations=O.code0_locations,
start=O.start+start,
code=O.code[key],
strings=slice_strings,
strings_locs=slice_strings_locs,
string_indices=slice_string_indices)
def raise_if_not_identifier(O):
i = identifier_scan(code=O.code)
if (i < 0 or i != len(O.code)):
O.raise_error("Not an identifier: %s" % repr(O.code), i=0)
def extract_identifier(O):
O.raise_if_not_identifier()
return O.code
def index_of_closing_parenthesis(O, start=0):
i = find_closing_parenthesis(code=O.code, start=start)
if (i < 0):
O.raise_error(msg='Missing a closing ")"', i=max(0, start-1))
return i
def comma_scan(O, start=0):
code = O.code
n = len(code)
i = start
while (i < n):
c = code[i]
if (c == ","):
return i
i += 1
if (c == "("):
i = O.index_of_closing_parenthesis(start=i) + 1
return -1
def get_hollerith_count_index(code):
i = len(code)
while (i != 0):
i -= 1
c = code[i]
digit = "0123456789".find(c)
if (digit < 0):
if (i+1 == len(code)):
return None
if (",(/$".find(c) >= 0):
return i+1
return None
return None
class stripped_source_line_slice(stripped_source_line):
__slots__ = stripped_source_line.__slots__
def strip_spaces_separate_strings(source_line_cluster):
code = []
locs = []
strings = []
strings_locs = []
string_indices = []
ca = code.append
la = locs.append
n_sl = len(source_line_cluster)
i_sl = 0
while (i_sl < n_sl):
sl = source_line_cluster[i_sl]
s = sl.stmt
n = len(s)
i = 0
while (i < n):
c = s[i]
if (c == "!"):
sl.index_of_exclamation_mark = i
break
if (c == "'" or c == '"'):
opening_quote = c
string_indices.append(len(code))
ca("'")
la((sl,i))
i += 1
string_chars = []
string_chars_locs = []
in_string = True
while in_string:
while (i < n):
c = s[i]
ci = i
i += 1
if (c == opening_quote):
if (not s.startswith(opening_quote, i)):
in_string = False
break
i += 1
string_chars.append(c)
string_chars_locs.append((sl,ci))
else:
i_sl += 1
if (i_sl == n_sl):
locs[-1][0].raise_error(
msg="Missing terminating %s character" % opening_quote,
i=locs[-1][1])
sl = source_line_cluster[i_sl]
s = sl.stmt
n = len(s)
i = 0
strings.append("".join(string_chars))
strings_locs.append(string_chars_locs)
elif (" \t".find(c) < 0):
c = c.lower()
if (c == 'h'):
j = get_hollerith_count_index(code)
else:
j = None
if (j is None):
ca(c.lower())
la((sl,i))
i += 1
else:
hollerith_count = int("".join(code[j:]))
del code[j:]
del locs[j:]
string_indices.append(len(code))
ca("'")
la((sl,i))
i += 1
string_chars = []
string_chars_locs = []
while True:
if (i < n):
string_chars.append(s[i])
string_chars_locs.append((sl,i))
i += 1
if (len(string_chars) == hollerith_count):
break
else:
i_sl += 1
if (i_sl == n_sl):
break
sl = source_line_cluster[i_sl]
s = sl.stmt
n = len(s)
i = 0
if (len(string_chars) != hollerith_count):
locs[-1][0].raise_error(
msg="Missing characters for Hollerith constant",
i=locs[-1][1])
strings.append("".join(string_chars))
strings_locs.append(string_chars_locs)
else:
i += 1
i_sl += 1
return stripped_source_line(
source_line_cluster=source_line_cluster,
code0_locations=locs,
code="".join(code),
start=0,
strings=strings,
strings_locs=strings_locs,
string_indices=string_indices)
class fmt_string_stripped(raise_errors_mixin):
__slots__ = ["code", "locs", "strings", "strings_locs", "string_indices"]
def __init__(O, fmt_tok):
ssl = fmt_tok.ssl
i = ssl.string_indices.index(fmt_tok.i_code)
fmt_string = ssl.strings[i]
fmt_string_locs = ssl.strings_locs[i]
assert len(fmt_string) == len(fmt_string_locs)
code = []
O.locs = []
O.strings = []
O.strings_locs = []
O.string_indices = []
ca = code.append
la = O.locs.append
n = len(fmt_string)
have_leading_parenthesis = False
i = 0
while (i < n):
c = fmt_string[i]
loc = fmt_string_locs[i]
if (c == "'" or c == '"'):
if (not have_leading_parenthesis):
raise_must_start()
opening_quote = c
O.string_indices.append(len(code))
ca("'")
la(loc)
i += 1
string_chars = []
string_chars_locs = []
in_string = True
while in_string:
while (i < n):
c = fmt_string[i]
loc = fmt_string_locs[i]
i += 1
if (c == opening_quote):
if (not fmt_string.startswith(opening_quote, i)):
in_string = False
break
i += 1
string_chars.append(c)
string_chars_locs.append(loc)
else:
loc = O.locs[-1]
loc[0].raise_error(
msg='Missing terminating %s within character format'
' specifier "%s"' % (opening_quote, fmt_string),
i=loc[1])
O.strings.append("".join(string_chars))
O.strings_locs.append(string_chars_locs)
else:
if (" \t".find(c) < 0):
if (have_leading_parenthesis):
ca(c.lower())
la(loc)
else:
if (c != "("):
raise_must_start()
have_leading_parenthesis = True
i += 1
def raise_must_start():
fmt_tok.raise_error(msg='Format string must start with "("')
def raise_must_end():
fmt_tok.raise_error(msg='Format string must end with ")"')
if (len(code) == 0):
if (have_leading_parenthesis):
raise_must_end()
raise_must_start()
elif (code[-1] != ")"):
raise_must_end()
code.pop()
O.locs.pop()
O.code = "".join(code)
def stmt_location(O, i):
if (i is None): i = 0
return O.locs[i]
def combine_continuation_lines_and_strip_spaces(source_lines):
result = []
rapp = result.append
n_sl = len(source_lines)
i_sl = 0
while (i_sl < n_sl):
sl = source_lines[i_sl]
if (sl.stmt_offs is None):
rapp(strip_spaces_separate_strings(source_line_cluster=[sl]))
i_sl += 1
else:
assert not sl.is_cont
code_sls = [sl]
k_sl = i_sl
for j_sl in xrange(i_sl+1, n_sl):
sl = source_lines[j_sl]
if (sl.is_cont):
code_sls.append(sl)
k_sl = j_sl
elif (sl.stmt_offs is not None):
break
for j_sl in xrange(i_sl+1, k_sl):
sl = source_lines[j_sl]
if (not sl.is_cont):
rapp(strip_spaces_separate_strings(source_line_cluster=[sl]))
rapp(strip_spaces_separate_strings(source_line_cluster=code_sls))
i_sl = k_sl + 1
return result
def load_includes(global_line_index_generator, stripped_source_lines):
import os.path as op
result = []
for ssl in stripped_source_lines:
if (ssl.code == "include'"):
assert len(ssl.strings) == 1
file_name = ssl.strings[0]
if (op.isabs(file_name)):
file_path = file_name
else:
sl = ssl.code0_locations[-1][0]
file_path = op.join(op.dirname(sl.file_name), file_name)
if (not op.isfile(file_path)):
ssl.raise_semantic_error(msg="Missing include file", i=7)
# TODO potential performance problem if deeply nested includes
result.extend(load(
global_line_index_generator=global_line_index_generator,
file_name=file_path))
else:
result.append(ssl)
return result
def load(global_line_index_generator, file_name, skip_load_includes=False):
source_lines = []
for i_line,line in enumerate(open(file_name).read().splitlines()):
source_lines.append(source_line(
global_line_index_generator=global_line_index_generator,
file_name=file_name,
line_number=i_line+1,
text=line))
stripped_source_lines = combine_continuation_lines_and_strip_spaces(
source_lines=source_lines)
if (skip_load_includes):
return stripped_source_lines
return load_includes(
global_line_index_generator=global_line_index_generator,
stripped_source_lines=stripped_source_lines)
def tokenize_expression(
ssl,
start=0,
stop=None,
allow_commas=False,
allow_equal_signs=False):
result = []
if (stop is None): stop = len(ssl.code)
tokenize_expression_impl(
tokens=result,
tokenizer=tokenization.ssl_iterator(ssl=ssl, start=start, stop=stop),
allow_commas=allow_commas,
allow_equal_signs=allow_equal_signs,
tok_opening_parenthesis=None)
return result
def tokenize_expression_impl(
tokens,
tokenizer,
allow_commas,
allow_equal_signs,
tok_opening_parenthesis):
from tokenization import tk_seq, tk_parentheses
if (allow_commas):
tlist = []
tokens.append(tk_seq(ssl=tokenizer.ssl, i_code=tokenizer.i, value=tlist))
else:
tlist = tokens
tapp = tlist.append
for tok in tokenizer:
if (tok.is_op()):
tv = tok.value
if (tv == "("):
nested_tokens = []
tokenize_expression_impl(
tokens=nested_tokens,
tokenizer=tokenizer,
allow_commas=True,
allow_equal_signs=allow_equal_signs,
tok_opening_parenthesis=tok)
tapp(tk_parentheses(
ssl=tok.ssl, i_code=tok.i_code, value=nested_tokens))
continue
if (tv == ")"):
if (tok_opening_parenthesis is None):
tok.raise_missing_opening()
return
if (tv == ","):
if (not allow_commas):
tok.ssl.raise_syntax_error(i=tok.i_code)
tlist = []
tokens.append(tk_seq(
ssl=tokenizer.ssl, i_code=tokenizer.i, value=tlist))
tapp = tlist.append
continue
if (tv == "="):
if (not allow_equal_signs):
tok.ssl.raise_syntax_error(i=tok.i_code)
tapp(tok)
continue
tapp(tok)
continue
if (tok_opening_parenthesis is not None):
tok_opening_parenthesis.raise_missing_closing()
def indices_of_tokenized_equal_signs(tokens):
result = []
for i,tok in enumerate(tokens):
if (tok.is_op() and tok.value == "="):
result.append(i)
return result
# variable types
class vt_used(object): pass
class vt_scalar(object): pass
class vt_string(object): pass
class vt_array(object): pass
class vt_intrinsic(object): pass
class vt_external(object): pass
class vt_function(object): pass
class vt_subroutine(object): pass
# variable storage
class vs_fproc_name(object): pass
class vs_argument(object): pass
class vs_common(object): pass
class vs_save(object): pass
class vs_local(object): pass
class vs_parameter(object): pass
class fdecl_info(object):
__slots__ = [
"id_tok",
"var_type",
"var_storage",
"data_type",
"size_tokens",
"dim_tokens",
"parameter_assignment_tokens",
"f90_decl",
"is_modified",
"use_count",
"passed_as_arg",
"passed_as_arg_plain"]
def __init__(O,
id_tok,
var_type,
var_storage,
data_type,
size_tokens,
dim_tokens,
f90_decl=None):
assert size_tokens is None or isinstance(size_tokens, list)
assert dim_tokens is None or isinstance(dim_tokens, list)
O.id_tok = id_tok
O.var_type = var_type
O.var_storage = var_storage
O.data_type = data_type
O.size_tokens = size_tokens
O.dim_tokens = dim_tokens
O.parameter_assignment_tokens = None
O.f90_decl = f90_decl
O.is_modified = False
O.use_count = 0
O.passed_as_arg = {}
O.passed_as_arg_plain = {}
def is_used(O): return O.var_type is vt_used
def is_scalar(O): return O.var_type is vt_scalar
def is_string(O): return O.var_type is vt_string
def is_array(O): return O.var_type is vt_array
def is_intrinsic(O): return O.var_type is vt_intrinsic
def is_external(O): return O.var_type is vt_external
def is_function(O): return O.var_type is vt_function
def is_subroutine(O): return O.var_type is vt_subroutine
def is_user_defined_callable(O):
vt = O.var_type
return (vt is vt_external or vt is vt_function or vt is vt_subroutine)
def is_fproc_name(O): return O.var_storage is vs_fproc_name
def is_argument(O): return O.var_storage is vs_argument
def is_common(O): return O.var_storage is vs_common
def is_save(O): return O.var_storage is vs_save
def is_local(O): return O.var_storage is vs_local
def is_parameter(O): return O.var_storage is vs_parameter
def required_parameter_assignment_tokens(O):
result = O.parameter_assignment_tokens
if (result is None):
O.id_tok.raise_internal_error()
return result
def extract_size_tokens(ssl, start):
code = ssl.code
c = code[start]
if (c == "("):
i_clp = ssl.index_of_closing_parenthesis(start=start+1)
return i_clp+1, tokenize_expression(
ssl=ssl,
start=start+1,
stop=i_clp)
i_size = unsigned_integer_scan(code=code, start=start)
if (i_size < 0):
ssl.raise_syntax_error(i=start)
return \
i_size, \
[tokenization.tk_integer(ssl=ssl, i_code=start, value=code[start:i_size])]
data_types = """\
byte
character
complex
doublecomplex
doubleprecision
integer
logical
real
""".splitlines()
def extract_data_type(ssl, start=0, optional=False):
sw = ssl.code.startswith
for data_type in data_types:
if (sw(data_type, start)):
return (
start + len(data_type),
tokenization.tk_identifier(ssl=ssl, i_code=start, value=data_type))
if (not optional):
ssl.raise_syntax_error()
return None, None
def extract_data_type_and_size(ssl, start=0, optional=False):
i_code, data_type = extract_data_type(
ssl=ssl, start=start, optional=optional)
if (optional and i_code is None):
return None, None, None
if (not ssl.code.startswith("*", i_code)):
return i_code, data_type, None
i_code, size_tokens = extract_size_tokens(ssl=ssl, start=i_code+1)
return i_code, data_type, size_tokens
def extract_f90_decl(ssl, start):
code = ssl.code
if (start == len(code)):
ssl.raise_syntax_error(i=start)
i_cc = code.find("::", start)
if (i_cc >= 0):
return i_cc+2, ssl[start:i_cc]
if (code.startswith("(", start)):
i_clp = ssl.index_of_closing_parenthesis(start=start+1)
return i_clp+1, ssl[start+1:i_clp]
return start, None
def extract_fdecl(
result,
ssl,
start,
data_type,
size_tokens,
allow_size,
f90_decl=None):
code = ssl.code
stop = len(code)
def parse_decl(start):
item_size_tokens = None
dim_tokens = None
i_id = identifier_scan(code=code, start=start)
if (i_id < 0):
ssl.raise_syntax_error(i=start)
i_code = i_id
while True:
if (i_code == stop):
break
c = code[i_code]
if (c == ","):
i_code += 1
break
if (c == "("):
if (dim_tokens is not None):
ssl.raise_syntax_error(i=i_code)
i_clp = ssl.index_of_closing_parenthesis(start=i_code+1)
dim_tokens = tokenize_expression(
ssl=ssl,
start=i_code+1,
stop=i_clp,
allow_commas=True)
i_code = i_clp + 1
elif (c == "*"):
if (not allow_size or item_size_tokens is not None):
ssl.raise_syntax_error(i=i_code)
i_code, item_size_tokens = extract_size_tokens(ssl=ssl, start=i_code+1)
else:
ssl.raise_syntax_error(i=i_code)
if (item_size_tokens is None):
item_size_tokens = size_tokens
result.append(fdecl_info(
id_tok=tokenization.tk_identifier(
ssl=ssl, i_code=start, value=code[start:i_id]),
var_type=None,
var_storage=None,
data_type=data_type,
size_tokens=item_size_tokens,
dim_tokens=dim_tokens,
f90_decl=f90_decl))
return i_code
if (ssl.code.startswith(",", start)):
start += 1
while (start < stop):
start = parse_decl(start=start)
def dimensions_are_simple(dim_tokens):
is_star = tokenization.tok_seq_is_star
for tok_seq in dim_tokens:
if (is_star(tok_seq=tok_seq)):
return False
for tok in tok_seq.value:
if (tok.is_op_with(value=":")):
return False
return True
def process_labels_list(ssl, start, stop, len_min, len_max):
if (start == stop):
ssl.raise_syntax_error(i=start)
result = []
code = ssl.code
i = start
while True:
if (len_max is not None and len(result) >= len_max):
ssl.raise_syntax_error(i=i)
j = unsigned_integer_scan(code=code, start=i, stop=stop)
if (j < 0):
ssl.raise_syntax_error(i=i)
result.append(
tokenization.tk_integer(ssl=ssl, i_code=i, value=code[i:j]))
if (j == stop):
break
if (code[j] != ","):
ssl.raise_syntax_error(i=j)
i = j + 1
if (len(result) < len_min):
ssl.raise_syntax_error(i=i)
return result
class executable_info(object):
__slots__ = []
def __init__(O, **ks):
O.key = O.__class__.__name__[3:]
O.ssl = ks["ssl"]
O.start = ks["start"]
for k,v in ks.items():
setattr(O, k, v)
def s4it(O, callback, tokens):
tokenization.search_for_id_tokens(
callback=callback, tokens=tokens, with_next_token=True)
def s4it_slots(O, callback, obj_with_slots):
for s in obj_with_slots.__slots__:
attr = getattr(obj_with_slots, s)
if (attr is not None):
O.s4it(callback, attr)
def set_is_modified(O, fdecl_by_identifier):
pass
def mksl(*names): return ("key", "ssl", "start") + names
class ei_allocate(executable_info):
__slots__ = mksl()
def search_for_id_tokens(O, callback):
pass # TODO
class ei_assign(executable_info):
__slots__ = mksl()
def search_for_id_tokens(O, callback):
pass # TODO
class ei_assignment(executable_info):
__slots__ = mksl("lhs_tokens", "rhs_tokens")
def search_for_id_tokens(O, callback):
O.s4it(callback, O.lhs_tokens)
O.s4it(callback, O.rhs_tokens)
def set_is_modified(O, fdecl_by_identifier):
assert len(O.lhs_tokens) != 0
id_tok = O.lhs_tokens[0]
if (not id_tok.is_identifier()):
id_tok.raise_syntax_error()
tf = fdecl_by_identifier.get(id_tok.value)
assert tf is not None
tf.is_modified = True
class ei_file_positioning(executable_info):
__slots__ = mksl("io_function", "alist")
def search_for_id_tokens(O, callback):
if (O.alist is not None):
O.s4it_slots(callback, O.alist)
class ei_call(executable_info):
__slots__ = mksl("subroutine_name", "arg_token")
def search_for_id_tokens(O, callback):
callback(O.subroutine_name, O.arg_token)
if (O.arg_token is not None):
O.s4it(callback, O.arg_token.value)
class ei_close(executable_info):
__slots__ = mksl("cllist")
def search_for_id_tokens(O, callback):
O.s4it_slots(callback, O.cllist)
class ei_continue(executable_info):
__slots__ = mksl()
def search_for_id_tokens(O, callback):
pass
class ei_cycle(executable_info):
__slots__ = mksl()
def search_for_id_tokens(O, callback):
pass
class ei_deallocate(executable_info):
__slots__ = mksl()
def search_for_id_tokens(O, callback):
pass # TODO
class ei_do(executable_info):
__slots__ = mksl("label", "id_tok", "tokens")
def search_for_id_tokens(O, callback):
callback(O.id_tok, None)
O.s4it(callback, O.tokens)
def set_is_modified(O, fdecl_by_identifier):
fdecl = fdecl_by_identifier[O.id_tok.value]
fdecl.is_modified = True
class ei_dowhile(executable_info):
__slots__ = mksl("label", "cond_tokens")
def search_for_id_tokens(O, callback):
O.s4it(callback, O.cond_tokens)
class ei_else(executable_info):
__slots__ = mksl()
def search_for_id_tokens(O, callback):
pass
class ei_elseif_then(executable_info):
__slots__ = mksl("cond_tokens")
def search_for_id_tokens(O, callback):
O.s4it(callback, O.cond_tokens)
class ei_enddo(executable_info):
__slots__ = mksl()
def search_for_id_tokens(O, callback):
pass
class ei_endif(executable_info):
__slots__ = mksl()
def search_for_id_tokens(O, callback):
pass
class ei_entry(executable_info):
__slots__ = mksl()
def search_for_id_tokens(O, callback):
pass # TODO
class ei_exit(executable_info):
__slots__ = mksl()
def search_for_id_tokens(O, callback):
pass
class ei_goto(executable_info):
__slots__ = mksl("label")
def search_for_id_tokens(O, callback):
pass
class ei_goto_computed(executable_info):
__slots__ = mksl("labels", "tokens")
def search_for_id_tokens(O, callback):
O.s4it(callback, O.tokens)
class ei_if(executable_info):
__slots__ = mksl("cond_tokens")
def search_for_id_tokens(O, callback):
O.s4it(callback, O.cond_tokens)
class ei_if_then(executable_info):
__slots__ = mksl("cond_tokens")
def search_for_id_tokens(O, callback):
O.s4it(callback, O.cond_tokens)
class ei_if_arithmetic(executable_info):
__slots__ = mksl("cond_tokens", "labels")
def search_for_id_tokens(O, callback):
O.s4it(callback, O.cond_tokens)
class ei_inquire(executable_info):
__slots__ = mksl("iuflist")
def search_for_id_tokens(O, callback):
O.s4it_slots(callback, O.iuflist)
class ei_open(executable_info):
__slots__ = mksl("olist")
def search_for_id_tokens(O, callback):
O.s4it_slots(callback, O.olist)
class ei_print(executable_info):
__slots__ = mksl("cilist", "iolist", "fmt_tokens")
def search_for_id_tokens(O, callback):
O.s4it_slots(callback, O.cilist)
O.s4it(callback, O.iolist)
class ei_read(executable_info):
__slots__ = mksl("cilist", "iolist", "fmt_tokens")
def search_for_id_tokens(O, callback):
if (O.cilist is not None):
O.s4it_slots(callback, O.cilist)
if (O.iolist is not None):
O.s4it(callback, O.iolist)
def set_is_modified(O, fdecl_by_identifier):
if (O.iolist is not None):
def callback(tok):
fdecl = fdecl_by_identifier[tok.value]
fdecl.is_modified = True
tokenization.search_for_data_or_read_target_tokens(
callback=callback, tokens=O.iolist)
class ei_return(executable_info):
__slots__ = mksl("return_label")
def search_for_id_tokens(O, callback):
pass
class ei_stop(executable_info):
__slots__ = mksl("arg_token")
def search_for_id_tokens(O, callback):
pass
class ei_write(executable_info):
__slots__ = mksl("cilist", "iolist", "fmt_tokens")
def search_for_id_tokens(O, callback):
O.s4it_slots(callback, O.cilist)
O.s4it(callback, O.iolist)
def set_is_modified(O, fdecl_by_identifier):
if ( O.cilist is not None
and O.cilist.unit is not None
and len(O.cilist.unit) != 0):
first_tok = O.cilist.unit[0]
if (first_tok.is_identifier()):
fdecl = fdecl_by_identifier[first_tok.value]
if ( fdecl.data_type is not None
and fdecl.data_type.value == "character"):
fdecl.is_modified = True
del mksl
class fproc_p_methods(object):
"Separated from class fproc for clarity and a minor getattr speed gain."
__slots__ = []
def p_allocate(O, ssl, start):
O.executable.append(ei_allocate(ssl=ssl, start=start)) # TODO
def p_assign(O, ssl, start):
O.executable.append(ei_assign(ssl=ssl, start=start)) # TODO
def p_file_positioning(O, ssl, start, io_function):
liof = len(io_function)
if (ssl.code.startswith("(", start+liof)):
tz = tokenization.ssl_iterator(ssl=ssl, start=start+liof)
alist = collect_io_alist(tz=tz, unit=None)
if (alist.unit is None):
ssl.raise_semantic_error(
msg="Required UNIT information is not defined", i=start)
else:
alist = collect_io_alist(
tz=None,
unit=tokenize_expression(ssl=ssl, start=start+liof))
O.executable.append(ei_file_positioning(
ssl=ssl, start=start, io_function=io_function, alist=alist))
def p_backspace(O, ssl, start):
O.p_file_positioning(ssl=ssl, start=start, io_function="backspace")
def p_call(O, ssl, start):
tokens = tokenize_expression(ssl=ssl, start=start+4)
if ( len(tokens) == 0
or len(tokens) > 2
or not tokens[0].is_identifier()):
ssl.raise_syntax_error()
subroutine_name = tokens[0]
if (len(tokens) == 1):
arg_token = None
else:
if (not tokens[1].is_parentheses()):
ssl.raise_syntax_error()
arg_token = tokens[1]
O.executable.append(ei_call(ssl=ssl, start=start,
subroutine_name=subroutine_name,
arg_token=arg_token))
def p_close(O, ssl, start):
tz = tokenization.ssl_iterator(ssl=ssl, start=start+5)
cllist = collect_io_cllist(tz=tz)
tok = tz.look_ahead(optional=True)
if (tok is not None):
tok.raise_syntax_error()
O.executable.append(ei_close(ssl=ssl, start=start, cllist=cllist))
O.uses_io = True
def p_common(O, ssl, start):
assert start == 0
code = ssl.code
if (len(code) == 6):
ssl.raise_syntax_error()
c = code[6]
if (c == "/"):
i = code.find("/", 7)
if (i < 0):
ssl.raise_syntax_error_or_not_implemented()
if (i == 7):
common_name = "commonymous"
i_code = 8
else:
common_name = ssl[7:i].extract_identifier()
i_code = i + 1
else:
common_name = "commonymous"
i_code = 6
extract_fdecl(
result=O.common.get(key=common_name),
ssl=ssl,
start=i_code,
data_type=None,
size_tokens=None,
allow_size=False)
def p_continue(O, ssl, start):
O.executable.append(ei_continue(ssl=ssl, start=start))
def p_cycle(O, ssl, start):
if (len(ssl.code) != start+5):
ssl.raise_syntax_error(i=start+5)
O.executable.append(ei_cycle(ssl=ssl, start=start))
def p_data(O, ssl, start):
assert start == 0
tz = tokenization.ssl_iterator(ssl=ssl, start=4)
tok = None
while True: # loop over nlist, clist pairs
nlist = []
while True:
if (tok is None):
tok = tz.get()
if (tok.is_identifier()):
ntoks = [tok]
nlist.append(
tokenization.tk_seq(ssl=tz.ssl, i_code=tz.i, value=ntoks))
tok = tz.get()
if (tok.is_op_with(value="(")):
tz.collect_to_matching_parenthesis(
callback=ntoks.append, opening_token=tok)
tok = tz.get()
if (tok.is_op_with(value="(")):
tz.collect_to_matching_parenthesis(
callback=ntoks.append, opening_token=tok)
tok = tz.get()
elif (tok.is_op_with(value="(")):
ntoks = []
nlist.append(
tokenization.tk_seq(ssl=tz.ssl, i_code=tz.i, value=ntoks))
ntoks.append(tz.get_implied_do(opening_token=tok))
tok = tz.get()
else:
tok.raise_syntax_error()
if (not tok.is_op_with(value=",")):
break
tok = None
if (not tok.is_op_with(value="/")):
tok.raise_syntax_error()
clist = []
repetition_tok = None
sign_count = 0
ctoks = []
while True:
tok = tz.get()
if ( len(ctoks) == 0
and repetition_tok is None
and (tok.is_integer() or tok.is_identifier())
and tz.look_ahead().is_op_with(value="*")):
repetition_tok = tok
tz.get()
tok = tz.get()
if (tok.is_op()):
if (tok.value in ["+", "-"]):
if (sign_count != 0 or len(ctoks) != 0):
tok.raise_syntax_error()
sign_count = 1
ctoks.append(tok)
elif (tok.value == "("):
if (len(ctoks) != sign_count):
tok.raise_syntax_error()
ctoks.append(tz.get_complex_literal(opening_token=tok))
elif (tok.value == "/"):
if (len(ctoks) == sign_count):
tok.raise_syntax_error()
clist.append((repetition_tok, ctoks))
break
elif (tok.value == ","):
if (len(ctoks) == sign_count):
tok.raise_syntax_error()
clist.append((repetition_tok, ctoks))
repetition_tok = None
sign_count = 0
ctoks = []
else:
tok.raise_syntax_error()
else:
if (len(ctoks) != sign_count):
tok.raise_syntax_error()
ctoks.append(tok)
O.data.append((nlist, clist))
tok = tz.get(optional=True)
if (tok is None):
break
if (tok.is_op_with(value=",")):
tok = None
def p_deallocate(O, ssl, start):
O.executable.append(ei_deallocate(ssl=ssl, start=start)) # TODO
def p_dimension(O, ssl, start):
assert start == 0
extract_fdecl(
result=O.dimension,
ssl=ssl,
start=9,
data_type=None,
size_tokens=None,
allow_size=False)
def p_do(O, ssl, start):
assert start == 0
code = ssl.code
i = unsigned_integer_scan(code=code, start=2)
if (i < 3):
i = 2
label = None
else:
label = code[2:i]
if (code[i] == ","):
i += 1
j = identifier_scan(code=code, start=i)
assert j >= 3
assert code[j] == "="
tokens = tokenize_expression(ssl=ssl, start=j+1, allow_commas=True)
if (not (2 <= len(tokens) <= 3)):
ssl.raise_syntax_error(i=j+1)
O.executable.append(ei_do(ssl=ssl, start=start,
label=label,
id_tok=tokenization.tk_identifier(ssl=ssl, i_code=i, value=code[i:j]),
tokens=tokens))
def p_dowhile(O, ssl, start, label_end=None):
assert start == 0
if (label_end is None):
i = 7
label = None
else:
i = label_end + 5
label = ssl.code[2:label_end]
cond_tokens = tokenize_expression(ssl=ssl, start=i)
if (len(cond_tokens) != 1):
ssl.raise_syntax_error(i=i)
O.executable.append(ei_dowhile(ssl=ssl, start=start,
label=label,
cond_tokens=cond_tokens))
def p_else(O, ssl, start):
assert start == 0
O.executable.append(ei_else(ssl=ssl, start=start))
def p_elseif(O, ssl, start):
assert start == 0
O.p_if_elseif(ssl=ssl, keyword="elseif", start=0)
def p_enddo(O, ssl, start):
assert start == 0
O.executable.append(ei_enddo(ssl=ssl, start=start))
def p_endfile(O, ssl, start):
O.p_file_positioning(ssl=ssl, start=start, io_function="endfile")
def p_endif(O, ssl, start):
assert start == 0
O.executable.append(ei_endif(ssl=ssl, start=start))
def p_entry(O, ssl, start):
assert start == 0
O.executable.append(ei_entry(ssl=ssl, start=start)) # TODO
def p_equivalence(O, ssl, start):
assert start == 0
buffer = []
tz = tokenization.ssl_iterator(ssl=ssl, start=11)
while True:
tok = tz.get()
if (not tok.is_op_with(value="(")):
tok.raise_syntax_error()
def callback(tok):
if (len(tok.value) == 0):
tok.raise_syntax_error()
for tok_seq in tok.value:
if (len(tok_seq.value) == 0):
tok.raise_syntax_error()
id_tok = tok_seq.value[0]
if (not id_tok.is_identifier()):
id_tok.raise_syntax_error()
O.equivalence.append(tok)
tz.collect_to_matching_parenthesis(
callback=callback,
opening_token=tok)
tok = tz.get(optional=True)
if (tok is None):
break
if (not tok.is_op_with(value=",")):
tok.raise_syntax_error()
def p_exit(O, ssl, start):
if (len(ssl.code) != start+4):
ssl.raise_syntax_error(i=start+4)
O.executable.append(ei_exit(ssl=ssl, start=start))
def p_external(O, ssl, start):
assert start == 0
tokenization.ssl_iterator(
ssl=ssl, start=8).collect_comma_separated_identifiers(
callback=O.external.append, one_required=True)
def p_format(O, ssl, start):
assert start == 0
code = ssl.code
assert code.startswith("format(")
assert code.endswith(")")
if (ssl.label is None):
ssl.raise_error(
msg="FORMAT without a statement label in columns 1-5", i=0)
if (ssl.label in O.format):
ssl.raise_error(
msg="Duplicate statement label in columns 1-5", i=-1)
O.format[ssl.label] = list(tokenization.fss_iterator(fss=ssl[7:-1]))
def p_goto(O, ssl, start):
code = ssl.code
i = start + 4
if (i == len(code)):
ssl.raise_syntax_error(i=i)
j = unsigned_integer_scan(code=code, start=i)
if (j == len(code)):
O.executable.append(ei_goto(ssl=ssl, start=start,
label=tokenization.tk_integer(ssl=ssl, i_code=i, value=code[i:])))
return
if (j > 0):
ssl.raise_syntax_error(i=i)
if (code[i] == "("):
# GO TO (s [,s]...)[,] i
j = ssl.index_of_closing_parenthesis(start=i+1)
labels = process_labels_list(
ssl=ssl, start=i+1, stop=j, len_min=1, len_max=None)
j += 1
if (j == len(code)):
ssl.raise_syntax_error(i=j)
if (code[j] == ","):
j += 1
if (j == len(code)):
ssl.raise_syntax_error(i=j)
tokens = tokenize_expression(ssl=ssl, start=j)
O.executable.append(ei_goto_computed(ssl=ssl, start=start,
labels=labels,
tokens=tokens))
return
# GO TO i [[,](s [,s]...)]
if (code[-1] != ")"):
ssl.raise_syntax_error()
k = code.rfind("(")
if (k < 0):
ssl.raise_syntax_error()
j = k - 1
if (code[j] == ","):
j -= 1
tokens = tokenize_expression(ssl=ssl, start=i+1, stop=j+1)
labels = process_labels_list(
ssl=ssl, start=k+1, stop=len(code)-1, len_min=1, len_max=None)
O.executable.append(ei_goto_computed(ssl=ssl, start=start,
labels=labels,
tokens=tokens))
def p_if(O, ssl, start):
i = O.p_if_elseif(ssl=ssl, keyword="if", start=start)
if (i is not None):
O.process_body_line(ssl=ssl, start=i)
def p_if_elseif(O, ssl, keyword, start):
i_open = start + len(keyword) + 1
i_clp = ssl.index_of_closing_parenthesis(start=i_open)
cond_tokens = tokenize_expression(ssl=ssl, start=i_open, stop=i_clp)
if (len(cond_tokens) == 0):
ssl.raise_syntax_error(i=i_open)
code = ssl.code
if (code.startswith("then", i_clp+1) and len(code) == i_clp+5):
if (start != 0):
ssl.raise_syntax_error()
if (keyword == "if"): ei = ei_if_then
else: ei = ei_elseif_then
O.executable.append(ei(ssl=ssl, start=start, cond_tokens=cond_tokens))
return None
if (keyword != "if"):
ssl.raise_syntax_error()
i = i_clp + 1
if (i == len(code)):
ssl.raise_syntax_error(i=i)
j = unsigned_integer_scan(code=code, start=i, stop=i+1)
if (j < 0):
if (start != 0):
ssl.raise_syntax_error()
O.executable.append(ei_if(ssl=ssl, start=start, cond_tokens=cond_tokens))
return i
labels = process_labels_list(
ssl=ssl, start=i, stop=len(code), len_min=3, len_max=3)
O.executable.append(ei_if_arithmetic(ssl=ssl, start=start,
cond_tokens=cond_tokens,
labels=labels))
return None
def p_implicit(O, ssl, start):
assert start == 0
if (ssl.code == "implicitnone"):
O.implicit = {}
return
i_code, data_type = extract_data_type(ssl=ssl, start=8)
if ( not ssl.code.startswith("(", i_code)
or not ssl.code.endswith(")")):
ssl.raise_syntax_error_or_not_implemented()
letters = "abcdefghijklmnopqrstuvwxyz"
def get(c):
i = letters.find(c)
if (i < 0):
ssl.raise_syntax_error_or_not_implemented()
return i
for part in ssl.code[i_code+1:-1].split(","):
if (len(part) == 3 and part[1] == "-"):
i = get(part[0])
j = get(part[2])
for c in letters[i:j+1]:
O.implicit[c] = data_type
else:
for c in part:
if (c not in "abcdefghijklmnopqrstuvwxyz"):
ssl.raise_syntax_error_or_not_implemented()
O.implicit[c] = data_type
def p_inquire(O, ssl, start):
tz = tokenization.ssl_iterator(ssl=ssl, start=start+7)
iuflist = collect_io_iuflist(tz=tz)
tok = tz.look_ahead(optional=True)
if (tok is not None):
tok.raise_syntax_error()
O.executable.append(ei_inquire(ssl=ssl, start=start, iuflist=iuflist))
O.uses_io = True
def p_intrinsic(O, ssl, start):
assert start == 0
tokenization.ssl_iterator(
ssl=ssl, start=9).collect_comma_separated_identifiers(
callback=O.intrinsic.append, one_required=True)
def p_open(O, ssl, start):
tz = tokenization.ssl_iterator(ssl=ssl, start=start+4)
olist = collect_io_olist(tz=tz)
tok = tz.look_ahead(optional=True)
if (tok is not None):
tok.raise_syntax_error()
O.executable.append(ei_open(ssl=ssl, start=start, olist=olist))
O.uses_io = True
def p_parameter(O, ssl, start):
assert start == 0
code = ssl.code
if ( not code.startswith("(", 9)
or not code.endswith(")")):
ssl.raise_syntax_error()
tokens_ll = tokenize_expression(
ssl=ssl,
start=10,
stop=len(code)-1,
allow_commas=True,
allow_equal_signs=True)
for tokens_l in tokens_ll:
i_equal_signs = indices_of_tokenized_equal_signs(tokens=tokens_l.value)
if (len(i_equal_signs) != 1 or i_equal_signs[0] != 1):
ssl.raise_syntax_error()
key_token = tokens_l.value[0]
if (not key_token.is_identifier()):
key_token.raise_syntax_error()
O.parameter.append((key_token, tokens_l.value[2:]))
def p_print(O, ssl, start):
tz = tokenization.ssl_iterator(ssl=ssl, start=start+5)
fmt_buffer = []
tz.collect_comma_separated_expressions(
callback=fmt_buffer.append,
first_get_optional=False,
stop_after_given_number_of_commas=1)
assert len(fmt_buffer) == 1
cilist = collect_io_cilist(tz=None, fmt=fmt_buffer[0].value)
fmt_tokens = None
if (len(cilist.fmt) == 1 and cilist.fmt[0].is_string()):
fmt_tokens = list(tokenization.fss_iterator(
fss=fmt_string_stripped(fmt_tok=cilist.fmt[0])))
iolist = collect_iolist(tz=tz)
O.executable.append(ei_print(ssl=ssl, start=start,
cilist=cilist, fmt_tokens=fmt_tokens, iolist=iolist))
O.uses_io = True
O.uses_write = True
def p_read_write(O, ssl, start, ei_type):
tz = tokenization.ssl_iterator(ssl=ssl, start=start)
cilist = collect_io_cilist(tz=tz)
if (cilist.unit is None):
ssl.raise_semantic_error(
msg="Required UNIT information is not defined", i=start)
fmt_tokens = None
if (cilist.fmt is not None):
if (len(cilist.fmt) == 1 and cilist.fmt[0].is_string()):
fmt_tokens = list(tokenization.fss_iterator(
fss=fmt_string_stripped(fmt_tok=cilist.fmt[0])))
if (ei_type is ei_write and cilist.end is not None):
cilist.end[0].raise_semantic_error(
msg="END is invalid for WRITE statements")
iolist = collect_iolist(tz=tz)
O.executable.append(ei_type(ssl=ssl, start=start,
cilist=cilist, fmt_tokens=fmt_tokens, iolist=iolist))
def p_read(O, ssl, start):
code = ssl.code
if (code.startswith("*", start+4)):
if (code.startswith(",", start+5)):
tz = tokenization.ssl_iterator(ssl=ssl, start=start+6)
iolist = []
tz.collect_comma_separated_expressions(
callback=iolist.append,
enable_implied_do=1)
iolist = tokenization.remove_redundant_parentheses(tokens=iolist)
if (len(iolist) == 0):
ssl.raise_syntax_error(i=start+5)
elif (len(code) == start+5):
iolist = None
else:
ssl.raise_syntax_error(i=start+5)
O.executable.append(ei_read(ssl=ssl, start=start,
cilist=None, fmt_tokens=None, iolist=iolist))
elif (code.startswith("(", start+4)):
O.p_read_write(ssl=ssl, start=start+4, ei_type=ei_read)
else:
ssl.raise_syntax_error(i=start+4)
O.uses_io = True
O.uses_read = True
def p_return(O, ssl, start):
O.executable.append(ei_return(ssl=ssl, start=start,
return_label=ssl[start:]))
def p_rewind(O, ssl, start):
O.p_file_positioning(ssl=ssl, start=start, io_function="rewind")
def p_save(O, ssl, start):
assert start == 0
if (O.save is not None):
if (tokenization.ssl_iterator(
ssl=ssl, start=4).collect_comma_separated_identifiers(
callback=O.save.append, enable_common=True) == 0):
O.save = None
def p_stop(O, ssl, start):
tz = tokenization.ssl_iterator(ssl=ssl, start=start+4)
tok = tz.get(optional=True)
if (tok is not None):
if (not (tok.is_integer() or tok.is_string())):
tok.raise_syntax_error()
next_tok = tz.get(optional=True)
if (next_tok is not None):
next_tok.raise_syntax_error()
O.executable.append(ei_stop(ssl=ssl, start=start, arg_token=tok))
def p_write(O, ssl, start):
O.p_read_write(ssl=ssl, start=start+5, ei_type=ei_write)
O.uses_io = True
O.uses_write = True
def collect_keyword_arguments(O, tz, n_implied):
for known in O.__slots__:
setattr(O, known, None)
tok = tz.get()
if (not tok.is_op()):
tok.raise_syntax_error()
if (tok.value == "*"):
tok = tz.get()
if (not tok.is_op_with(value=",")):
tok.raise_syntax_error()
O.fmt = [tok]
else:
if (tok.value != "("):
tok.raise_syntax_error()
while True: # loop over comma-separated arguments
tok = tz.get()
if (tok.is_op_with(value=")")):
break
value_tokens = []
next_tok = tz.look_ahead()
if (next_tok.is_op_with(value="=")):
tz.get()
if (not tok.is_identifier()):
tok.raise_syntax_error()
key = tok.value
if (key not in O.__slots__):
tok.raise_syntax_error()
else:
value_tokens.append(tok)
for key in O.__slots__[:n_implied]:
if (getattr(O, key) is None):
break
else:
tok.raise_syntax_error()
while True:
tok = tz.look_ahead()
if (tok.is_op_with(value=")")):
break
tz.get()
if (tok.is_op_with(value=",")):
break
if (tok.is_op_with(value="(")):
nested_tokens = []
tz.collect_comma_separated_expressions(
callback=nested_tokens.append,
opening_token=tok)
value_tokens.append(tokenization.tk_parentheses(
ssl=tok.ssl, i_code=tok.i_code, value=nested_tokens))
else:
value_tokens.append(tok)
setattr(O, key, value_tokens)
class collect_io_cilist(object):
"Control Information List f77_std 12.8"
__slots__ = ["unit", "fmt", "rec", "iostat", "err", "end"]
def __init__(O, tz, fmt=None):
if (fmt is None):
collect_keyword_arguments(O=O, tz=tz, n_implied=2)
else:
for known in O.__slots__:
setattr(O, known, None)
O.fmt = fmt
class collect_io_olist(object):
"Open List f77_std 12.10.1"
chain = ["access", "form", "recl", "blank", "status", "iostat"]
__slots__ = ["unit", "file", "err"] + chain
def __init__(O, tz):
collect_keyword_arguments(O=O, tz=tz, n_implied=1)
class collect_io_cllist(object):
"Close List f77_std 12.10.2"
chain = ["iostat", "status"]
__slots__ = ["unit", "err"] + chain
def __init__(O, tz):
collect_keyword_arguments(O=O, tz=tz, n_implied=1)
class collect_io_iuflist(object):
"iulist or iflist f77_std 12.10.3"
chain = [
"iostat", "exist", "opened", "number", "named", "name", "access",
"sequential", "direct", "form", "formatted", "unformatted", "recl",
"nextrec", "blank"]
__slots__ = ["unit", "file", "err"] + chain
def __init__(O, tz):
collect_keyword_arguments(O=O, tz=tz, n_implied=1)
class collect_io_alist(object):
"f77_std 12.10.4"
chain = ["iostat"]
__slots__ = ["unit", "err"] + chain
def __init__(O, tz, unit):
if (tz is not None):
assert unit is None
collect_keyword_arguments(O=O, tz=tz, n_implied=1)
else:
O.unit = unit
O.iostat = None
O.err = None
def collect_iolist(tz):
result = []
tok = tz.look_ahead(optional=True)
if (tok is not None):
if (tok.is_op_with(value=",")):
tz.get()
tz.collect_comma_separated_expressions(
callback=result.append,
enable_implied_do=1)
result = tokenization.remove_redundant_parentheses(tokens=result)
return result
class fproc(fproc_p_methods):
__slots__ = [
"leading_comments",
"trailing_comments",
"top_ssl", "fproc_type", "data_type", "size_tokens",
"body_lines", "end_ssl",
"name_plain", "name", "args",
"body_lines_processed_already",
"common",
"data",
"declarations",
"dimension",
"equivalence",
"executable",
"external",
"format",
"implicit",
"intrinsic",
"parameter",
"save",
"fdecl_by_identifier",
"args_fdecl",
"uses_common",
"uses_save",
"uses_io",
"uses_read",
"uses_write",
"uses_iargc_getarg",
"_fmt_counts_by_statement_label",
"_common_name_by_identifier",
"_equivalence_info",
"_classified_equivalence_info",
"_target_statement_labels",
"dynamic_parameters",
"needs_cmn",
"is_passed_as_external",
"externals_passed_by_arg_identifier",
"conv_hook"]
def __init__(O,
leading_comments,
top_ssl,
fproc_type,
i_code,
data_type,
size_tokens,
body_lines,
end_ssl):
assert fproc_type in ["program", "function", "subroutine", "blockdata"]
O.leading_comments = leading_comments
O.trailing_comments = []
O.top_ssl = top_ssl
O.fproc_type = fproc_type
O.body_lines = body_lines
O.end_ssl = end_ssl
O.data_type = data_type
O.size_tokens = size_tokens
O.set_name_and_args(i_code=i_code)
O.body_lines_processed_already = False
O.common = utils.keyed_lists()
O.data = []
O.declarations = []
O.dimension = []
O.equivalence = []
O.executable = []
O.external = []
O.format = {}
O.init_implicit()
O.intrinsic = []
O.parameter = []
O.save = []
O.fdecl_by_identifier = None
O.args_fdecl = None
O.uses_common = None
O.uses_save = None
O.uses_io = False
O.uses_read = False
O.uses_write = False
O.uses_iargc_getarg = False
O._fmt_counts_by_statement_label = None
O._common_name_by_identifier = None
O._equivalence_info = None
O._classified_equivalence_info = None
O._target_statement_labels = None
O.dynamic_parameters = set()
O.needs_cmn = None
O.is_passed_as_external = False
O.externals_passed_by_arg_identifier = {}
O.conv_hook = None
def is_program(O): return (O.fproc_type == "program")
def is_function(O): return (O.fproc_type == "function")
def is_subroutine(O): return (O.fproc_type == "subroutine")
def is_blockdata(O): return (O.fproc_type == "blockdata")
def first_body_source_line(O):
assert len(O.body_lines) != 0
assert len(O.body_lines[0].source_line_cluster) != 0
return O.body_lines[0].source_line_cluster[0]
def set_name_and_args(O, i_code):
O.name_plain = None
O.name = None
O.args = []
if (O.top_ssl is None):
assert O.is_program()
assert i_code == 0
O.name = tokenization.tk_identifier(
ssl=None, i_code=None, value=O.fproc_type+"_unnamed")
return
j_code = i_code + len(O.fproc_type)
pat = 'recursive'
if O.top_ssl.code.startswith(pat, i_code):
j_code += len(pat)
tz = tokenization.ssl_iterator(ssl=O.top_ssl, start=j_code)
O.name = tz.get(optional=True)
if (O.name is None):
if (not O.is_program() and not O.is_blockdata()):
O.top_ssl.raise_syntax_error(i=j_code-1)
O.name = tokenization.tk_identifier(
ssl=O.top_ssl, i_code=0, value=O.fproc_type+"_unnamed")
return
opening_token = tz.get(optional=True)
if (opening_token is None):
if (O.is_program() or O.is_blockdata()):
O.name_plain = O.name
O.name = tokenization.tk_identifier(
ssl=O.name.ssl,
i_code=O.name.i_code,
value=O.fproc_type+"_"+O.name.value)
return
if (not opening_token.is_op_with(value="(") or O.is_blockdata()):
opening_token.raise_syntax_error()
need_arg = False
while True:
tok = tz.get_inside_parentheses(opening_token)
if (tok.is_identifier()):
O.args.append(tok)
elif (tok.is_op_with(value="*")):
if (O.fproc_type != "subroutine"):
tok.raise_syntax_error()
O.args.append(tok)
elif (need_arg):
tok.raise_syntax_error()
elif (tok.is_op_with(value=")")):
break
else:
tok.raise_syntax_error()
tok = tz.get_inside_parentheses(opening_token)
if (tok.is_op_with(value=")")):
break
if (not tok.is_op_with(value=",")):
tok.raise_syntax_error()
need_arg = True
tok = tz.get(optional=True)
if (tok is not None):
tok.raise_syntax_error()
def all_ssl(O):
result = list(O.leading_comments)
if (O.top_ssl is not None):
result.append(O.top_ssl)
result.extend(O.body_lines)
result.append(O.end_ssl)
result.extend(O.trailing_comments)
return result
def init_implicit(O):
O.implicit = {}
data_type = tokenization.tk_identifier(
ssl=None, i_code=None, value="real")
for c in "abcdefghopqrstuvwxyz":
O.implicit[c] = data_type
data_type = tokenization.tk_identifier(
ssl=None, i_code=None, value="integer")
for c in "ijklmn":
O.implicit[c] = data_type
def process_body_line(O, ssl, start):
code = ssl.code
if (len(code) == start): return
i_lid = identifier_scan(code, start=start) # i_leading_identifier
if (i_lid < 0):
ssl.raise_syntax_error()
if (i_lid == len(code)):
if (code.endswith("continue", start)):
O.p_continue(ssl=ssl, start=start)
return
for s in [
"assign",
"backspace",
"call",
"cycle",
"endfile",
"exit",
"goto",
"print",
"return",
"rewind",
"stop"]:
if (code.startswith(s, start)):
p = getattr(fproc_p_methods, "p_"+s)
p(O, ssl=ssl, start=start)
return
if (start != 0):
ssl.raise_syntax_error(i=start)
if (code in ["else", "enddo", "endif"]):
p = getattr(fproc_p_methods, "p_"+code)
p(O, ssl=ssl, start=start)
return
for s in [
"common",
"external",
"entry",
"implicit",
"intrinsic",
"save"]:
if (code.startswith(s)):
p = getattr(fproc_p_methods, "p_"+s)
p(O, ssl=ssl, start=start)
return
O.process_declaration(ssl=ssl, start=start, enable_size=False)
return
c = code[i_lid]
if (c == "="):
i = ssl.comma_scan(start=i_lid+1)
if (i < 0):
O.process_assignment(ssl=ssl, start=start, i_equal_sign=i_lid)
return
if (start != 0):
ssl.raise_syntax_error()
if (code.startswith("do")):
O.p_do(ssl=ssl, start=start)
return
ssl.raise_syntax_error()
if (c == "("):
i_clp = ssl.index_of_closing_parenthesis(start=i_lid+1)
if (i_clp+1 == len(code)):
cid = code[start:i_lid]
if (cid in [
"allocate",
"backspace",
"close",
"deallocate",
"endfile",
"inquire",
"open",
"read",
"rewind",
"write"]):
p = getattr(fproc_p_methods, "p_"+cid)
p(O, ssl=ssl, start=start)
return
for s in ["call", "goto"]:
if (code.startswith(s, start)):
p = getattr(fproc_p_methods, "p_"+s)
p(O, ssl=ssl, start=start)
return
if (start != 0):
ssl.raise_syntax_error(i=start)
if (cid.startswith("do") and cid.endswith("while")):
label_end = unsigned_integer_scan(code=cid, start=2)
if (label_end == len(cid)-5):
O.p_dowhile(ssl=ssl, start=start, label_end=label_end)
return
for s in [
"common",
"dimension",
"dowhile",
"entry",
"equivalence",
"format",
"implicit",
"parameter"]:
if (code.startswith(s)):
p = getattr(fproc_p_methods, "p_"+s)
p(O, ssl=ssl, start=start)
return
O.process_declaration(ssl=ssl, start=start, enable_size=True)
return
c = code[i_clp+1]
if (c == "="):
O.process_assignment(ssl=ssl, start=start, i_equal_sign=i_clp+1)
return
if (c == "("):
i_clp2 = ssl.index_of_closing_parenthesis(start=i_clp+2)
if (i_clp2+1 < len(code) and code[i_clp2+1] == "="):
O.process_assignment(ssl=ssl, start=start, i_equal_sign=i_clp2+1)
return
for s in ["allocate(", "backspace(", "deallocate(", "read(", "write("]:
if (code.startswith(s, start)):
p = getattr(fproc_p_methods, "p_"+s[:-1])
p(O, ssl=ssl, start=start)
return
if (code.startswith("data", start)):
O.p_data(ssl=ssl, start=start)
return
ssl.raise_syntax_error_or_not_implemented()
if (c == ","):
cid = code[start:i_lid]
if (cid == "goto"):
O.p_goto(ssl=ssl, start=start)
return
if (cid in [
"allocate",
"backspace",
"equivalence",
"deallocate",
"read",
"write"]):
p = getattr(fproc_p_methods, "p_"+cid)
p(O, ssl=ssl, start=start)
return
if (start != 0):
ssl.raise_syntax_error(i=start)
for s in ["common", "data", "dimension", "print"]:
if (code.startswith(s)):
p = getattr(fproc_p_methods, "p_"+s)
p(O, ssl=ssl, start=start)
return
O.process_declaration(ssl=ssl, start=start, enable_size=True)
return
for s in [
"allocate(",
"backspace(",
"deallocate(",
"goto(",
"if(",
"read(",
"write("]:
if (code.startswith(s, start)):
p = getattr(fproc_p_methods, "p_"+s[:-1])
p(O, ssl=ssl, start=start)
return
if (start != 0):
ssl.raise_syntax_error(i=start)
if (code.startswith("elseif(")):
O.p_elseif(ssl=ssl, start=start)
return
if (code.startswith("data")):
O.p_data(ssl=ssl, start=start)
return
O.process_declaration(ssl=ssl, start=start, enable_size=True)
return
if (c == "/"):
if (start != 0):
ssl.raise_syntax_error(i=start)
for s in ["common", "data", "save"]:
if (code.startswith(s)):
p = getattr(fproc_p_methods, "p_"+s)
p(O, ssl=ssl, start=start)
return
ssl.raise_syntax_error_or_not_implemented()
if (c == ","):
if (code.startswith("goto", start)):
O.p_goto(ssl=ssl, start=start)
return
if (code.startswith("print", start)):
O.p_print(ssl=ssl, start=start)
return
if (start != 0):
ssl.raise_syntax_error(i=start)
for s in ["common", "data", "external", "intrinsic", "save"]:
if (code.startswith(s)):
p = getattr(fproc_p_methods, "p_"+s)
p(O, ssl=ssl, start=start)
return
if ( code.startswith("do")
and unsigned_integer_scan(code=code, start=2) == i_lid):
O.p_do(ssl=ssl, start=start)
return
O.process_declaration(ssl=ssl, start=start, enable_size=False)
return
if (code.endswith("stop'", start)):
O.p_stop(ssl=ssl, start=start)
return
for s in ["backspace", "print", "read", "rewind"]:
if (code.startswith(s, start)):
p = getattr(fproc_p_methods, "p_"+s)
p(O, ssl=ssl, start=start)
return
if (start != 0):
ssl.raise_syntax_error(i=start)
O.process_declaration(ssl=ssl, start=start, enable_size=True)
def process_declaration(O, ssl, start, enable_size):
assert start == 0
if (enable_size):
i_code, data_type, size_tokens = extract_data_type_and_size(ssl=ssl)
else:
i_code, data_type = extract_data_type(ssl=ssl)
size_tokens = None
code = ssl.code
if (i_code == len(code)):
ssl.raise_syntax_error(i=start)
i_code, f90_decl = extract_f90_decl(ssl=ssl, start=i_code)
extract_fdecl(
result=O.declarations,
ssl=ssl,
start=i_code,
data_type=data_type,
size_tokens=size_tokens,
allow_size=True,
f90_decl=f90_decl)
def process_assignment(O, ssl, start, i_equal_sign):
if (i_equal_sign+1 == len(ssl.code)):
ssl.raise_syntax_error()
lhs_tokens = tokenize_expression(ssl=ssl, start=start, stop=i_equal_sign)
rhs_tokens = tokenize_expression(ssl=ssl, start=i_equal_sign+1)
O.executable.append(ei_assignment(ssl=ssl, start=start,
lhs_tokens=lhs_tokens, rhs_tokens=rhs_tokens))
def process_body_lines(O):
assert not O.body_lines_processed_already
O.body_lines_processed_already = True
for ssl in O.body_lines:
O.process_body_line(ssl=ssl, start=0)
def show_fdecl(O):
"for debugging; not exercised"
assert O.fdecl_by_identifier is not None
print O.name.value
for key in sorted(O.fdecl_by_identifier.keys()):
fdecl = O.fdecl_by_identifier[key]
print " ", fdecl.id_tok.value
print " ", fdecl.var_type
if (fdecl.var_storage is not None):
print " ", fdecl.var_storage
if (fdecl.data_type is not None):
print " ", fdecl.data_type.value
if (fdecl.parameter_assignment_tokens is not None):
print " parameter"
print
def build_fdecl_by_identifier(O):
if (not O.body_lines_processed_already):
O.process_body_lines()
assert O.fdecl_by_identifier is None
O.fdecl_by_identifier = {}
def make_fdecl(
id_tok,
var_type=None,
var_storage=None,
data_type=None,
size_tokens=None,
dim_tokens=None):
O.fdecl_by_identifier[id_tok.value] = result = fdecl_info(
id_tok=id_tok,
var_type=var_type,
var_storage=var_storage,
data_type=data_type,
size_tokens=size_tokens,
dim_tokens=dim_tokens)
return result
if (O.is_function()):
vt = vt_function
elif (O.is_subroutine() or O.is_blockdata()):
vt = vt_subroutine
else:
vt = None
if (vt is not None):
make_fdecl(
id_tok=O.name,
var_type=vt,
var_storage=vs_fproc_name,
data_type=O.data_type,
size_tokens=O.size_tokens)
def raise_confl_decl(id_tok):
id_tok.raise_semantic_error(
msg="Conflicting declaration: %s" % id_tok.value)
def raise_confl_or_repeated_decl(id_tok):
id_tok.raise_semantic_error(
msg="Conflicting or repeated declaration: %s" % id_tok.value)
for fdecl in O.declarations:
id_tok = fdecl.id_tok
tf = O.fdecl_by_identifier.get(id_tok.value)
if (tf is None):
make_fdecl(
id_tok=id_tok,
data_type=fdecl.data_type,
size_tokens=fdecl.size_tokens,
dim_tokens=fdecl.dim_tokens)
elif (tf.var_storage is vs_fproc_name):
if (tf.data_type is not None):
raise_confl_or_repeated_decl(id_tok=id_tok)
if (fdecl.dim_tokens is not None):
raise_confl_or_repeated_decl(id_tok=id_tok)
tf.data_type = fdecl.data_type
tf.size_tokens = fdecl.size_tokens
elif (tf.data_type is not None):
raise_confl_or_repeated_decl(id_tok=id_tok)
for id_tok in O.args:
if (id_tok.value == "*"): continue
tf = O.fdecl_by_identifier.get(id_tok.value)
if (tf is not None):
if (tf.var_storage is not None):
raise_confl_or_repeated_decl(id_tok=id_tok)
tf.var_storage = vs_argument
else:
make_fdecl(id_tok=id_tok, var_storage=vs_argument)
for id_tok,assignment_tokens in O.parameter:
tf = O.fdecl_by_identifier.get(id_tok.value)
if (tf is None):
tf = make_fdecl(id_tok=id_tok, var_storage=vs_parameter)
else:
if (tf.var_storage is not None):
raise_confl_or_repeated_decl(id_tok=id_tok)
if (tf.dim_tokens is not None):
raise_confl_or_repeated_decl(id_tok=fdecl.id_tok)
tf.var_storage = vs_parameter
tf.parameter_assignment_tokens = assignment_tokens
def get_implicit_data_type(id_tok, optional=False):
result = O.implicit.get(id_tok.value[0])
if (result is None and not optional):
id_tok.raise_semantic_error("Unknown data type: %s" % id_tok.value)
return result
def set_dim_tokens(fdecl):
tf = O.fdecl_by_identifier.get(fdecl.id_tok.value)
if (tf is None):
tf = make_fdecl(
id_tok=fdecl.id_tok,
size_tokens=fdecl.size_tokens,
dim_tokens=fdecl.dim_tokens)
elif (fdecl.dim_tokens is not None):
if (tf.dim_tokens is not None):
fdecl.id_tok.raise_semantic_error(
msg="Conflicting or repeated dimension: %s" % fdecl.id_tok.value)
tf.dim_tokens = fdecl.dim_tokens
return tf
for fdecl in O.dimension:
set_dim_tokens(fdecl=fdecl)
for fdecl_list in O.common.lists:
for fdecl in fdecl_list:
tf = set_dim_tokens(fdecl=fdecl)
if (tf.var_storage is not None):
raise_confl_or_repeated_decl(id_tok=fdecl.id_tok)
tf.var_storage = vs_common
if (tf.data_type is None):
tf.data_type = get_implicit_data_type(id_tok=fdecl.id_tok)
if (O.save is not None):
for id_tok in O.save:
tf = O.fdecl_by_identifier.get(id_tok.value)
if (tf is None):
make_fdecl(id_tok=id_tok, var_storage=vs_save)
else:
vs = tf.var_storage
if (vs is None):
tf.var_storage = vs_save
elif (vs is not vs_common):
raise_confl_or_repeated_decl(id_tok=id_tok)
for id_tok in O.external:
tf = O.fdecl_by_identifier.get(id_tok.value)
if (tf is None):
make_fdecl(id_tok=id_tok, var_type=vt_external)
elif (tf.dim_tokens is not None):
raise_confl_or_repeated_decl(id_tok=id_tok)
else:
vs = tf.var_storage
if (vs is not None and vs is not vs_argument):
raise_confl_or_repeated_decl(id_tok=id_tok)
vt = tf.var_type
if ( vt is not None
and vt is not vs_external
and vt is not vs_function):
raise_confl_or_repeated_decl(id_tok=id_tok)
if (tf.data_type is None):
tf.var_type = vt_external
else:
tf.var_type = vt_function
for id_tok in O.intrinsic:
tf = O.fdecl_by_identifier.get(id_tok.value)
if (tf is None):
make_fdecl(id_tok=id_tok, var_type=vt_intrinsic)
elif (tf.dim_tokens is not None):
raise_confl_or_repeated_decl(id_tok=id_tok)
else:
vs = tf.var_storage
if (vs is not None):
raise_confl_or_repeated_decl(id_tok=id_tok)
#
for nlist,clist in O.data:
id_toks = tokenization.extract_identifiers(tokens=nlist)
for id_tok in id_toks:
tf = O.fdecl_by_identifier.get(id_tok.value)
if (tf is None):
make_fdecl(id_tok=id_tok, var_type=vt_scalar)
def callback(tok):
tf = O.fdecl_by_identifier.get(tok.value)
if (tf is not None):
if (tf.var_type is None):
tf.var_type = vt_scalar
if (tf.var_storage is None or tf.var_storage is vs_local):
tf.var_storage = vs_save
tf.is_modified = True
tf.use_count += 1
tokenization.search_for_data_or_read_target_tokens(
callback=callback, tokens=nlist)
#
for id_tok in O.args:
tf = O.fdecl_by_identifier.get(id_tok.value)
if (tf is not None and tf.dim_tokens is not None):
dim_id_toks = tokenization.extract_identifiers(tokens=tf.dim_tokens)
for dim_id_tok in dim_id_toks:
dim_tf = O.fdecl_by_identifier.get(dim_id_tok.value)
if (dim_tf is not None and dim_tf.var_type is None):
dim_tf.var_type = vt_used
dim_tf.use_count += 1
#
for equiv_tok in O.equivalence:
for tok_seq in equiv_tok.value:
id_tok = tok_seq.value[0]
tf = O.fdecl_by_identifier.get(id_tok.value)
if (tf is None):
make_fdecl(
id_tok=id_tok,
data_type=get_implicit_data_type(id_tok=id_tok))
#
for ei in O.executable:
if (ei.key == "call"):
id_tok = ei.subroutine_name
tf = O.fdecl_by_identifier.get(id_tok.value)
if (tf is None):
if (id_tok.value in intrinsics.extra_set_lower):
make_fdecl(id_tok=id_tok, var_type=vt_intrinsic)
elif (id_tok.value in intrinsics.io_set_lower):
make_fdecl(id_tok=id_tok, var_type=vt_intrinsic)
O.uses_io = True
else:
make_fdecl(id_tok=id_tok, var_type=vt_subroutine)
else:
vt = tf.var_type
if (vt is None or vt is vt_external):
if (tf.data_type is not None):
raise_confl_decl(id_tok=id_tok)
if (tf.dim_tokens is not None):
raise_confl_decl(id_tok=id_tok)
tf.var_type = vt_subroutine
tf.use_count += 1
elif ( vt is vt_intrinsic
and id_tok.value in intrinsics.extra_set_lower):
pass
elif (vt is not vt_subroutine):
raise_confl_decl(id_tok=id_tok)
def search_for_id_tokens_callback(id_tok, next_tok):
followed_by_parenthesis = (
next_tok is not None and next_tok.is_parentheses())
tf = O.fdecl_by_identifier.get(id_tok.value)
if (tf is None):
if (not followed_by_parenthesis):
tf = make_fdecl(id_tok=id_tok, var_type=vt_used)
elif (id_tok.value in intrinsics.set_lower):
tf = make_fdecl(id_tok=id_tok, var_type=vt_intrinsic)
else:
tf = make_fdecl(id_tok=id_tok, var_type=vt_external)
tf.use_count += 1
return
tf.use_count += 1
if (tf.var_type is vt_intrinsic):
if ( id_tok.value not in intrinsics.set_lower
and id_tok.value not in intrinsics.extra_set_lower
and id_tok.value not in intrinsics.io_set_lower):
id_tok.raise_semantic_error(
msg="Unknown intrinsic: %s" % id_tok.value)
if (not followed_by_parenthesis):
id_tok.raise_semantic_error(
msg="Improper use of intrinsic: %s" % id_tok.value)
elif (followed_by_parenthesis):
vt = tf.var_type
vs = tf.var_storage
if (tf.dim_tokens is not None):
if (tf.var_type is None):
tf.var_type = vt_used
if (tf.data_type is None):
tf.data_type = get_implicit_data_type(id_tok=id_tok)
elif ( tf.data_type is not None
and tf.data_type.value == "character"):
if (tf.var_type is None):
tf.var_type = vt_used
elif (vs is vs_argument):
if (vt is None):
tf.var_type = vt_external
elif ( vt is not vt_external
and vt is not vt_function
and vt is not vt_subroutine):
raise_confl_decl(id_tok=id_tok)
elif ( vt is vt_external
or vt is vt_function
or vt is vt_subroutine):
pass
elif (vt is vt_intrinsic):
if (id_tok.value not in intrinsics.set_lower):
id_tok.raise_semantic_error(
msg="Unknown intrinsic: %s" % id_tok.value)
elif (vt is vt_used):
raise_confl_decl(id_tok=id_tok)
else:
if (tf.var_storage is not None):
pass # XXX should be error; ignored due to
# lack of proper handling of f90 declarations
tf.var_storage = None
if ( id_tok.value in intrinsics.extra_set_lower
or id_tok.value in intrinsics.set_lower):
tf.var_type = vt_intrinsic
else:
tf.var_type = vt_function
else:
if (tf.var_type is None):
tf.var_type = vt_used
if (tf.data_type is None
and tf.var_type is not vt_external
and tf.var_type is not vt_subroutine):
tf.data_type = get_implicit_data_type(
id_tok=tf.id_tok, optional=True)
ei.search_for_id_tokens(callback=search_for_id_tokens_callback)
ei.set_is_modified(fdecl_by_identifier=O.fdecl_by_identifier)
#
O.uses_common = False
for tf in O.fdecl_by_identifier.values():
vt = tf.var_type
vs = tf.var_storage
if ( vt is vt_external
or vt is vt_function
or vt is vt_subroutine):
if (not (vs is None or vs is vs_fproc_name or vs is vs_argument)):
tf.id_tok.raise_internal_error()
elif (vt is vt_intrinsic):
if (vs is not None):
tf.id_tok.raise_internal_error()
else:
if (vs is None):
if (O.save is None):
tf.var_storage = vs_save
else:
tf.var_storage = vs_local
if (vt is vt_used):
tf.var_type = vt_scalar
if (tf.data_type is None):
tf.data_type = get_implicit_data_type(id_tok=tf.id_tok)
elif (vt is None and vs is vs_argument and tf.data_type is None):
tf.data_type = get_implicit_data_type(id_tok=tf.id_tok)
if (tf.is_common()):
O.uses_common = True
#
for ei in O.executable:
def search_for_id_tokens_callback(id_tok, next_tok):
if ( next_tok is None
or not next_tok.is_parentheses()):
return
tf = O.fdecl_by_identifier.get(id_tok.value)
assert tf is not None
if (tf.is_intrinsic()):
intrinsic_is_modified_info = intrinsics.is_modified_info_by_name.get(
id_tok.value)
if (intrinsic_is_modified_info is None):
return
elif (not tf.is_user_defined_callable()):
return
else:
intrinsic_is_modified_info = None
called_identifier = id_tok.value
for i_arg,tok_seq in enumerate(next_tok.value):
assert tok_seq.is_seq()
if (len(tok_seq.value) == 0):
continue
first_arg_tok = tok_seq.value[0]
if (not first_arg_tok.is_identifier()):
continue
tf_arg = O.fdecl_by_identifier.get(first_arg_tok.value)
assert tf_arg is not None
if (tf_arg.is_fproc_name()):
return
if (intrinsic_is_modified_info is None):
tf_arg.passed_as_arg.setdefault(
called_identifier, set()).add(i_arg)
if (len(tok_seq.value) == 1):
tf_arg.passed_as_arg_plain.setdefault(
called_identifier, set()).add(i_arg)
elif (i_arg in intrinsic_is_modified_info):
tf_arg.is_modified = True
ei.search_for_id_tokens(callback=search_for_id_tokens_callback)
#
assert O.args_fdecl is None
O.args_fdecl = []
for id_tok in O.args:
if (id_tok.value == "*"): continue
tf = O.fdecl_by_identifier.get(id_tok.value)
assert tf is not None
O.args_fdecl.append(tf)
#
for identifier in ["iargc", "getarg"]:
tf = O.fdecl_by_identifier.get(identifier)
if (tf is not None and tf.is_intrinsic()):
O.uses_iargc_getarg = True
break
#
equiv_info = O.equivalence_info()
for equiv_tok_cluster in equiv_info.equiv_tok_clusters:
cluster_is_modified = False
tf_cluster = []
for equiv_tok in equiv_tok_cluster:
for tok_seq in equiv_tok.value:
id_tok = tok_seq.value[0]
tf = O.fdecl_by_identifier[id_tok.value]
tf_cluster.append(tf)
if (tf.is_modified):
cluster_is_modified = tf.is_modified
if (cluster_is_modified):
for tf in tf_cluster:
tf.is_modified = True
def get_fdecl(O, id_tok):
return O.fdecl_by_identifier[id_tok.value]
def fmt_counts_by_statement_label(O):
assert O.body_lines_processed_already
result = O._fmt_counts_by_statement_label
if (result is None):
from libtbx import dict_with_default_0
result = dict_with_default_0()
for ei in O.executable:
if (ei.key in ["read", "write", "print"] and ei.fmt_tokens is None):
tl = ei.cilist.fmt
if (tl is not None and len(tl) == 1):
tok = tl[0]
if (tok.is_integer()):
result[tok.value] += 1
return result
def common_name_by_identifier(O):
result = O._common_name_by_identifier
if (result is None):
result = {}
for common_name,fdecl_list in O.common.items():
for fdecl in fdecl_list:
identifier = fdecl.id_tok.value
if (identifier in result):
fdecl.id_tok.raise_semantic_error(
msg="Identifier appears in multiple COMMON statements: %s" %
identifier)
result[identifier] = common_name
O._common_name_by_identifier = result
return result
def equivalence_info(O):
result = O._equivalence_info
if (result is None):
cu = equivalence.cluster_unions()
for equiv_tok in O.equivalence:
cu.add(
key_cluster=[tok_seq.value[0].value for tok_seq in equiv_tok.value])
cu.tidy()
result = equivalence_info()
for i in xrange(len(cu.unions)):
result.equiv_tok_clusters.append([])
for equiv_tok in O.equivalence:
result.equiv_tok_clusters[
cu.indices[equiv_tok.value[0].value[0].value]].append(
equiv_tok)
result.set_derived()
O._equivalence_info = result
return result
def classified_equivalence_info(O):
assert O.fdecl_by_identifier is not None
result = O._classified_equivalence_info
if (result is None):
result = classified_equivalence_info()
equiv_info = O.equivalence_info()
for equiv_tok_cluster in equiv_info.equiv_tok_clusters:
highest_priority = 0
for equiv_tok in equiv_tok_cluster:
for tok_seq in equiv_tok.value:
identifier = tok_seq.value[0].value
fdecl = O.fdecl_by_identifier[identifier]
if (fdecl.is_common()):
priority = 3
elif (fdecl.is_save()):
priority = 2
elif (fdecl.is_local()):
priority = 1
else:
tok_seq.raise_semantic_error(msg="Invalid EQUIVALENCE")
highest_priority = max(highest_priority, priority)
assert highest_priority != 0
slot = getattr(result, ["local", "save", "common"][highest_priority-1])
slot.equiv_tok_clusters.append(
equiv_info.equiv_tok_cluster_by_identifier[identifier])
result.set_derived()
O._classified_equivalence_info = result
return result
def set_uses_save(O):
cei = O.classified_equivalence_info()
O.uses_save = (len(O.data) != 0)
if (not O.uses_save):
for fdecl in O.fdecl_by_identifier.values():
if (fdecl.is_save()):
equiv_tok_cluster = cei.common.equiv_tok_cluster_by_identifier.get(
fdecl.id_tok.value)
if (equiv_tok_cluster is None):
O.uses_save = True
break
def target_statement_labels(O):
result = O._target_statement_labels
if (O._target_statement_labels is None):
result = {}
for ei in O.executable:
if (ei.key == "goto"):
result.setdefault(ei.label.value, []).append(ei.label)
elif (ei.key in ["goto_computed", "if_arithmetic"]):
for tok in ei.labels:
result.setdefault(tok.value, []).append(tok)
elif (ei.key == "open"):
if (ei.olist.err is not None):
tok = tokenization.get_statement_label_token(tokens=ei.olist.err)
result.setdefault(tok.value, []).append(tok)
elif (ei.key == "close"):
if (ei.cllist.err is not None):
tok = tokenization.get_statement_label_token(tokens=ei.cllist.err)
result.setdefault(tok.value, []).append(tok)
elif (ei.key == "inquire"):
if (ei.iuflist.err is not None):
tok = tokenization.get_statement_label_token(tokens=ei.iuflist.err)
result.setdefault(tok.value, []).append(tok)
elif (ei.key == "file_positioning"):
if (ei.alist.err is not None):
tok = tokenization.get_statement_label_token(tokens=ei.alist.err)
result.setdefault(tok.value, []).append(tok)
elif (ei.key in ["read", "write"]):
cilist = ei.cilist
if (cilist is not None):
for slot in ["end", "err"]:
tokens = getattr(cilist, slot)
if (tokens is not None):
tok = tokenization.get_statement_label_token(tokens=tokens)
result.setdefault(tok.value, []).append(tok)
O._target_statement_labels = result
return result
def _eval_const_expression_simple_identifier(O,
identifier, buffer, allow_power):
if (identifier in O.dynamic_parameters):
return False
fdecl = O.fdecl_by_identifier.get(identifier)
if (fdecl is None):
return False
tokens = fdecl.parameter_assignment_tokens
if (tokens is None):
return False
code = tokenization.tokens_as_python_code(
tokens=tokens, allow_power=allow_power)
if (code is None):
return False
expr = "%s = %s" % (identifier, code)
buffer.append(expr)
return O._eval_const_expression_simple_tokens(
tokens=tokens, buffer=buffer, allow_power=allow_power)
def _eval_const_expression_simple_tokens(O,
tokens, buffer, allow_power):
for id_tok in tokenization.extract_identifiers(tokens=tokens):
if (not O._eval_const_expression_simple_identifier(
identifier=id_tok.value, buffer=buffer, allow_power=allow_power)):
return False
return True
def eval_const_expression_simple(O,
identifier=None, tokens=None, allow_power=True):
assert O.fdecl_by_identifier is not None
assert "_" not in O.fdecl_by_identifier # not supported
assert [identifier, tokens].count(None) == 1
buffer = []
if (identifier is None):
code = tokenization.tokens_as_python_code(
tokens=tokens, allow_power=allow_power)
if (code is None):
return None
buffer.append("_ = %s" % code)
if (not O._eval_const_expression_simple_tokens(
tokens=tokens, buffer=buffer, allow_power=allow_power)):
return None
else:
if (not O._eval_const_expression_simple_identifier(
identifier=identifier, buffer=buffer, allow_power=allow_power)):
return None
buffer.reverse()
code = "\n".join(buffer)
exec_globals = {}
exec_locals = {}
exec(code, exec_globals, exec_locals)
if (identifier is None):
result = exec_locals["_"]
else:
result = exec_locals[identifier]
if (isinstance(result, float) and int(result) == result):
result = int(result)
return result
def eval_dimensions_simple(O, dim_tokens, allow_power=True):
vals = []
for tok_seq in dim_tokens:
if (tokenization.tok_seq_is_star(tok_seq=tok_seq)):
vals.append(None)
else:
for i,tok in enumerate(tok_seq.value):
if (tok.is_op_with(value=":")):
fl = []
for tokens in (tok_seq.value[:i], tok_seq.value[i+1:]):
fl.append(O.eval_const_expression_simple(
tokens=tokens, allow_power=allow_power))
f,l = fl
if (f is None or l is None):
vals.append(None)
else:
vals.append(l-f+1)
break
else:
vals.append(O.eval_const_expression_simple(
tokens=tok_seq.value, allow_power=allow_power))
return vals
class equivalence_info(object):
__slots__ = [
"equiv_tok_clusters",
"equiv_tok_cluster_by_identifier",
"identifier_clusters",
"identifier_cluster_by_identifier"]
def __init__(O):
O.equiv_tok_clusters = []
def set_derived(O):
O.equiv_tok_cluster_by_identifier = {}
O.identifier_clusters = []
O.identifier_cluster_by_identifier = {}
for equiv_tok_cluster in O.equiv_tok_clusters:
identifier_cluster = []
O.identifier_clusters.append(identifier_cluster)
for equiv_tok in equiv_tok_cluster:
for tok_seq in equiv_tok.value:
identifier = tok_seq.value[0].value
O.equiv_tok_cluster_by_identifier[identifier] = equiv_tok_cluster
identifier_cluster.append(identifier)
O.identifier_cluster_by_identifier[identifier] = identifier_cluster
class classified_equivalence_info(object):
__slots__ = ["common", "save", "local"]
def __init__(O):
for slot in O.__slots__:
setattr(O, slot, equivalence_info())
def set_derived(O):
for slot in O.__slots__:
getattr(O, slot).set_derived()
def has_save(O):
return (len(O.save.equiv_tok_clusters) != 0)
class split_fprocs(object):
__slots__ = [
"program",
"subroutine",
"function",
"blockdata",
"all_in_input_order",
"_fprocs_by_name",
"_fprocs_by_name_plain"]
def __init__(O):
O.program = []
O.subroutine = []
O.function = []
O.blockdata = []
O.all_in_input_order = []
O._fprocs_by_name = None
O._fprocs_by_name_plain = None
def by_type(O):
return [O.program, O.blockdata, O.subroutine, O.function]
def process(O, stripped_source_lines):
ssls = iter(stripped_source_lines)
leading_comments = []
for curr_ssl in ssls:
if (curr_ssl.is_comment()):
leading_comments.append(curr_ssl)
continue
assert len(curr_ssl.code) != 0
def collect_until_end(
fproc_type, top_ssl, i_code, data_type, size_tokens,
first_body_line=None):
body_lines = []
if (first_body_line is not None):
body_lines.append(first_body_line)
specific_end = "end"+fproc_type
for ssl in ssls:
if (ssl.code in ["end", specific_end]):
result = fproc(
leading_comments=leading_comments,
top_ssl=top_ssl,
fproc_type=fproc_type,
i_code=i_code,
data_type=data_type,
size_tokens=size_tokens,
body_lines=body_lines,
end_ssl=ssl)
O.all_in_input_order.append(result)
return result
body_lines.append(ssl)
if (top_ssl is None):
top_ssl = first_body_line
top_ssl.raise_error(msg="Missing END for %s" % (fproc_type.upper()))
for fproc_type in ["program", "blockdata", "subroutine", "function"]:
if (fproc_type == "subroutine" and \
curr_ssl.code.startswith('recursivesubroutine') ) \
or curr_ssl.code.startswith(fproc_type):
getattr(O, fproc_type).append(collect_until_end(
fproc_type=fproc_type,
top_ssl=curr_ssl,
i_code=0,
data_type=None,
size_tokens=None))
break
else:
i_code, data_type, size_tokens = extract_data_type_and_size(
ssl=curr_ssl, optional=True)
if (i_code is None or
not curr_ssl.code.startswith("function", i_code)):
O.program.append(collect_until_end(
fproc_type="program",
top_ssl=None,
i_code=0,
data_type=None,
size_tokens=None,
first_body_line=curr_ssl))
else:
O.function.append(collect_until_end(
fproc_type="function",
top_ssl=curr_ssl,
i_code=i_code,
data_type=data_type,
size_tokens=size_tokens))
leading_comments = []
if (len(leading_comments) != 0 and len(O.all_in_input_order) != 0):
O.all_in_input_order[-1].trailing_comments = leading_comments
def show_counts_by_type(O, out=None, prefix=""):
if (out is None): out = sys.stdout
print >> out, prefix + "Counts by Fortran procedure type:"
for attr in O.__slots__[:4]:
print >> out, prefix + " %s: %s" % (attr, len(getattr(O, attr)))
def process_body_lines(O):
for fproc in O.all_in_input_order:
fproc.process_body_lines()
def build_fdecl_by_identifier(O):
for fproc in O.all_in_input_order:
fproc.build_fdecl_by_identifier()
def fprocs_by_name(O, plain=False):
if (O._fprocs_by_name is None):
O._fprocs_by_name = {}
O._fprocs_by_name_plain = {}
for fprocs in O.by_type():
for fproc in fprocs:
other = O._fprocs_by_name.get(fproc.name.value)
if (other is not None):
msg = ["Fortran procedure name conflict:"]
for name in [other.name, fproc.name]:
if (name.ssl is None):
msg.append(
" %d. definition: %s (implied)\n"
" before %s" % (
len(msg),
fproc.name.value,
fproc.first_body_source_line()
.format_file_name_and_line_number()))
else:
msg.append(name.format_error(
msg="%d. definition" % len(msg), prefix=" "))
from libtbx.utils import Sorry
raise Sorry("\n".join(msg))
O._fprocs_by_name[fproc.name.value] = fproc
if (fproc.name_plain is not None):
O._fprocs_by_name_plain[fproc.name_plain.value] = fproc
if (plain):
return O._fprocs_by_name_plain
return O._fprocs_by_name
def build_bottom_up_fproc_list_following_calls(O, top_procedures=None):
return build_bottom_up_fproc_list_following_calls(
all_fprocs=O, top_procedures=top_procedures)
class build_bottom_up_fproc_list_following_calls(object):
__slots__ = [
"all_fprocs",
"top_procedures",
"deps_by_fproc_identifier",
"bottom_up_list",
"forward_uses_by_identifier",
"dependency_cycles",
"missing_external_fdecls_by_identifier"]
def __init__(O, all_fprocs, top_procedures=None):
O.all_fprocs = all_fprocs
O.top_procedures = top_procedures
fprocs_by_name = O.all_fprocs.fprocs_by_name()
#
for fproc in O.all_fprocs.all_in_input_order:
for fdecl in fproc.fdecl_by_identifier.values():
if ( fdecl.is_user_defined_callable()
and not fdecl.var_storage is vs_argument
and len(fdecl.passed_as_arg_plain) != 0):
def recursively_update_externals_passed(
primary_external_identifier,
procs_visited_already,
caller_fdecl):
for called_name,i_arg_set in \
caller_fdecl.passed_as_arg_plain.items():
called_fproc = fprocs_by_name.get(called_name)
if (called_fproc is None):
continue
for i_arg in i_arg_set:
arg_identifier = called_fproc.args[i_arg].value
primaries = called_fproc.externals_passed_by_arg_identifier \
.setdefault(arg_identifier, set())
if (not primary_external_identifier in primaries):
primaries.add(primary_external_identifier)
if (called_name not in procs_visited_already):
procs_visited_already.add(called_name)
recursively_update_externals_passed(
primary_external_identifier=primary_external_identifier,
procs_visited_already=procs_visited_already,
caller_fdecl=called_fproc.fdecl_by_identifier[
arg_identifier])
primary_external_identifier = fdecl.id_tok.value
primary_fproc = fprocs_by_name.get(primary_external_identifier)
if (primary_fproc is not None):
primary_fproc.is_passed_as_external = True
recursively_update_externals_passed(
primary_external_identifier=primary_external_identifier,
procs_visited_already=set([fproc.name.value]),
caller_fdecl=fdecl)
#
O.deps_by_fproc_identifier = {}
external_fdecls = {}
def get_dependencies(fproc):
deps = set()
for primaries in fproc.externals_passed_by_arg_identifier.values():
deps.update(primaries)
for identifier in sorted(fproc.fdecl_by_identifier.keys()):
if (identifier == fproc.name.value): continue
fdecl = fproc.fdecl_by_identifier[identifier]
if ( fdecl.is_user_defined_callable()
and fdecl.var_storage is not vs_argument):
deps.add(fdecl.id_tok.value)
external_fdecls.setdefault(identifier, []).append(fdecl)
if (fproc.is_program()):
for b in O.all_fprocs.blockdata:
deps.add(b.name.value)
result = sorted(deps)
O.deps_by_fproc_identifier[fproc.name.value] = result
return result
if (O.top_procedures is None or len(O.top_procedures) == 0):
connections_for_topological_sort = []
for fproc in O.all_fprocs.all_in_input_order:
connections_for_topological_sort.append(
(fproc.name.value, get_dependencies(fproc=fproc)))
else:
top_procedures_tidy = []
for top_procedure_or_procedures in O.top_procedures:
for top_procedure in top_procedure_or_procedures.split(","):
top_fproc = fprocs_by_name.get(top_procedure)
if (top_fproc is None):
top_fproc = fprocs_by_name.get("program_"+top_procedure)
if (top_fproc is None):
raise RuntimeError(
"Unknown Fortran procedure name: %s" % top_procedure)
top_procedures_tidy.append(top_procedure)
def recurse(fproc):
for identifier in get_dependencies(fproc=fproc):
if (identifier in O.deps_by_fproc_identifier):
continue
next_fproc = fprocs_by_name.get(identifier)
if (next_fproc is not None):
recurse(fproc=next_fproc)
recurse(fproc=top_fproc)
O.top_procedures = top_procedures_tidy
connections_for_topological_sort = []
for fproc in O.all_fprocs.all_in_input_order:
if (fproc.name.value in O.deps_by_fproc_identifier):
connections_for_topological_sort.append(
(fproc.name.value, get_dependencies(fproc=fproc)))
#
from libtbx import topological_sort
successors_by_node = dict(connections_for_topological_sort)
O.bottom_up_list = []
bottom_up_set = set()
O.forward_uses_by_identifier = {}
forward_uses_set = set()
O.missing_external_fdecls_by_identifier = {}
for identifier in topological_sort.stable(
connections=connections_for_topological_sort):
fproc = fprocs_by_name.get(identifier)
if (fproc is not None):
O.bottom_up_list.append(fproc)
bottom_up_set.add(identifier)
for dep in successors_by_node[identifier]:
if ( dep in successors_by_node
and dep not in bottom_up_set
and dep not in forward_uses_set):
O.forward_uses_by_identifier.setdefault(identifier, []).append(dep)
forward_uses_set.add(dep)
elif (identifier not in all_fprocs.fprocs_by_name(plain=True)):
O.missing_external_fdecls_by_identifier[identifier] = \
external_fdecls[identifier]
O.dependency_cycles = topological_sort.strongly_connected_components(
successors_by_node=successors_by_node)
def each_fproc_update_is_modified(O):
fprocs_by_name = O.all_fprocs.fprocs_by_name()
for caller_fproc in O.bottom_up_list:
for caller_fdecl in caller_fproc.fdecl_by_identifier.values():
for called_identifier, i_args in caller_fdecl.passed_as_arg.items():
primaries = caller_fproc.externals_passed_by_arg_identifier.get(
called_identifier)
if (primaries is None):
primaries = [called_identifier]
for called_identifier in primaries:
called_fproc = fprocs_by_name.get(called_identifier)
if (called_fproc is not None):
for i_arg in sorted(i_args):
if (i_arg >= len(called_fproc.args_fdecl)):
continue
arg_fdecl = called_fproc.args_fdecl[i_arg]
if (arg_fdecl.is_modified):
caller_fdecl.is_modified = True
return O
def each_fproc_update_needs_cmn(O):
fprocs_by_name = O.all_fprocs.fprocs_by_name()
have_blockdata = (len(O.all_fprocs.blockdata) != 0)
for caller_fproc in O.bottom_up_list:
caller_fproc.needs_cmn = (
caller_fproc.uses_common
or caller_fproc.uses_save
or caller_fproc.uses_io
or caller_fproc.uses_iargc_getarg
or (have_blockdata and caller_fproc.is_program())
or len(caller_fproc.dynamic_parameters) != 0)
if (not caller_fproc.needs_cmn):
for dependency in O.deps_by_fproc_identifier.get(
caller_fproc.name.value, []):
called_fproc = fprocs_by_name.get(dependency)
if (called_fproc is not None and called_fproc.needs_cmn):
caller_fproc.needs_cmn = True
break
return O
def process(file_names, basic_only=False, skip_load_includes=False):
assert not skip_load_includes or basic_only
all_fprocs = split_fprocs()
import itertools
global_line_index_generator = itertools.count()
for file_name in file_names:
all_fprocs.process(stripped_source_lines=load(
global_line_index_generator=global_line_index_generator,
file_name=file_name,
skip_load_includes=skip_load_includes))
if (not basic_only):
all_fprocs.build_fdecl_by_identifier()
for fproc in all_fprocs.all_in_input_order:
fproc.common_name_by_identifier()
fproc.set_uses_save()
fproc.target_statement_labels()
return all_fprocs
|
{
"content_hash": "90e8eae2f7b4007417ad1ca1fb98c281",
"timestamp": "",
"source": "github",
"line_count": 3116,
"max_line_length": 79,
"avg_line_length": 32.41463414634146,
"alnum_prop": 0.5775909864955844,
"repo_name": "hickerson/bbn",
"id": "582e4c20bbdb50da27b6373f5a6f8f30b90514e8",
"size": "101004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fable/fable_sources/fable/read.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "264"
},
{
"name": "C",
"bytes": "1096307"
},
{
"name": "C++",
"bytes": "738771"
},
{
"name": "DTrace",
"bytes": "1453"
},
{
"name": "Fortran",
"bytes": "955353"
},
{
"name": "Makefile",
"bytes": "13894"
},
{
"name": "Python",
"bytes": "1842383"
},
{
"name": "Shell",
"bytes": "9842"
},
{
"name": "TeX",
"bytes": "9489"
}
],
"symlink_target": ""
}
|
import os
from conjureup.app_config import app
from conjureup.controllers.summary import common
from conjureup.ui.views.summary import SummaryView
from ubuntui.ev import EventLoop
class SummaryController:
def __init__(self):
self.view = None
self.save_path = os.path.join(app.config['spell-dir'],
'results.txt')
def finish(self):
EventLoop.remove_alarms()
EventLoop.exit(0)
def render(self, results):
app.log.debug("Rendering summary results: {}".format(results))
common.write_results(results, self.save_path)
self.view = SummaryView(app, results, self.finish)
app.ui.set_header(title="Deploy Summary",
excerpt="Deployment summary for {}".format(
app.config['spell']))
app.ui.set_body(self.view)
app.ui.set_footer("Your big software is deployed, press "
"(Q) key to return to shell.")
_controller_class = SummaryController
|
{
"content_hash": "024662b11f2c5c5c7736833307e59532",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 70,
"avg_line_length": 32.59375,
"alnum_prop": 0.6069031639501438,
"repo_name": "battlemidget/conjure-up",
"id": "4a1483826e90987de6eb89db527ee7e57c58f119",
"size": "1043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conjureup/controllers/summary/gui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2635"
},
{
"name": "Python",
"bytes": "532618"
},
{
"name": "Shell",
"bytes": "2717"
}
],
"symlink_target": ""
}
|
""" ID3v2 Frames """
__author__ = "Alastair Tse <alastair@tse.id.au>"
__license__ = "BSD"
__copyright__ = "Copyright (c) 2004, Alastair Tse"
__revision__ = "$Id: id3v2frame.py,v 1.4 2004/12/21 12:02:06 acnt2 Exp $"
from tagger.constants import *
from tagger.exceptions import *
from tagger.utility import *
from tagger.debug import *
from tagger.encoding import *
from encodings import normalize_encoding
import struct, types, tempfile
class ID3v2BaseFrame:
""" Base ID3v2 Frame for 2.2, 2.3 and 2.4
Abstract class that defines basic functions that are common for
2.2, 2.3 and 2.4.
o_* functions means output_*, they output a bytestring encoding
the given data
x_* functions means extract_*, they extract data into accessible
structures when given a suitable length bytestream
@cvar header_length: header portion length
@cvar supported: supported frame ids
@cvar status_flags: status flags required
@cvar format_flags: format flags required
@ivar fid: frame id code
@ivar rawdata: rawdata of the rest of the frame minus the header
@ivar length: length of the frame in bytes
@ivar flags: dictionary of flags for this frame
@ivar encoding: optional - for text fields we have the encoding name
@ivar strings: a list of strings for text fields
@ivar shortcomment: set if this frame is a comment
@ivar longcomment: set if this frame is a comment (optional)
@ivar language: set if this frame is a comment (2 character code)
@ivar mimetype: mimetype for GEOB, APIC
@ivar filename: filename for GEOB
@ivar obj: data for GEOB
@ivar desc: for geob and URL
@ivar url: for URL
@ivar counter: for playcount (PCNT)
"""
supported = {}
header_length = 0
status_flags = {}
format_flags = {}
fid = None
rawdata = None
length = 0
flags = 0
encoding = ''
strings = []
shortcomment = ''
longcomment = ''
language = ''
mimetype = ''
filename = ''
obj = None
desc = ''
url = ''
def __init__(self, frame=None, fid=None):
"""
creates an ID3v2BaseFrame structure. If you specify frame,
then it will go into parse mode. If you specify the fid,
then it will create a new frame.
@param frame: frame bytestring
@param fid: frame id for creating a new frame
"""
if fid and not frame and fid not in self.supported.keys():
raise ID3ParameterException("Unsupported ID3v2 Field: %s" % fid)
elif fid and not frame:
self.fid = fid
self.new_frame_header()
elif frame:
self.parse_frame_header(frame)
self.parse_field()
def parse_frame_header(self, frame):
"""
Parse the frame header from a bytestring
@param frame: bytestring of the frame
@type frame: string
@todo: apple's id3 tags doesn't seem to follow the unsync safe format
"""
self.rawdata = ''
self.length = 0
raise ID3NotImplementedException("parse_frame_header")
def new_frame_header(self):
"""
creates a new frame header
"""
self.flags = {}
for flagname, bit in self.status_flags + self.format_flags:
self.flags[flagname] = 0
def output(self):
"""
Create a bytestring representing the frame contents
and the field
@todo: no syncsafing
@todo: no status format flags used
"""
raise ID3NotImplementedException("output")
def parse_field(self):
if self.fid not in self.supported.keys():
raise ID3FrameException("Unsupported ID3v2 Field: %s" % self.fid)
parser = self.supported[self.fid][0]
eval('self.x_' + parser + '()')
def output_field(self):
if self.fid not in self.supported.keys():
raise ID3FrameException("Unsupported ID3v2 Field: %s" % self.fid)
parser = self.supported[self.fid][0]
return eval('self.o_' + parser + '()')
def o_string(self, s, toenc, fromenc='latin_1'):
"""
Converts a String or Unicode String to a byte string of specified encoding.
@param toenc: Encoding which we wish to convert to. This can be either ID3V2_FIELD_ENC_* or the actual python encoding type
@param fromenc: converting from encoding specified
"""
# sanitise input - convert to string repr
try:
if type(encodings[toenc]) == types.StringType:
toenc = encodings[toenc]
except KeyError:
toenc = 'latin_1'
outstring = ''
# make sure string is of a type we understand
if type(s) not in [types.StringType, types.UnicodeType]:
s = unicode(s)
if type(s) == types.StringType:
if toenc == fromenc:
# don't need any conversion here
outstring = s
else:
try:
outstring = s.decode(fromenc).encode(toenc)
except (UnicodeEncodeError, UnicodeDecodeError):
warn("o_string: frame conversion failed. leaving as is.")
outstring = s
elif type(s) == types.UnicodeType:
try:
outstring = s.encode(toenc)
except UnicodeEncodeError, err:
warn("o_string: frame conversion failed - leaving empty. %s" %\
err)
outstring = ''
return outstring
def o_text(self):
"""
Output text bytestring
"""
newstrings = []
for s in self.strings:
newstrings.append(self.o_string(s, self.encoding))
output = chr(encodings[self.encoding])
for s in newstrings:
output += null_terminate(self.encoding, s)
"""
# strip the last null terminator
if is_double_byte(self.encoding) and len(output) > 1:
output = output[:-2]
elif not is_double_byte(self.encoding) and len(output) > 0:
output = output[:-1]
"""
return output
def x_text(self):
"""
Extract Text Fields
@todo: handle multiple strings seperated by \x00
sets: encoding, strings
"""
data = self.rawdata
self.encoding = encodings[ord(data[0])]
rawtext = data[1:]
if normalize_encoding(self.encoding) == 'latin_1':
text = rawtext
self.strings = text.split('\x00')
else:
text = rawtext.decode(self.encoding)
if is_double_byte(self.encoding):
self.strings = text.split('\x00\x00')
else:
self.strings = text.split('\x00')
try:
dummy = text.encode('utf_8')
debug('Read Field: %s Len: %d Enc: %s Text: %s' %
(self.fid, self.length, self.encoding, str([text])))
except UnicodeDecodeError:
debug('Read Field: %s Len: %d Enc: %s Text: %s (Err)' %
(self.fid, self.length, self.encoding, str([text])))
def set_text(self, s, encoding = 'utf_16'):
self.strings = [s]
self.encoding = encoding
def o_comm(self):
if is_double_byte(self.encoding):
sep = '\x00\x00'
else:
sep = '\x00'
return chr(encodings[self.encoding]) + self.language + \
self.o_string(self.shortcomment, self.encoding) + sep + \
self.o_string(self.longcomment, self.encoding) + sep
def x_comm(self):
"""
extract comment field
sets: encoding, lang, shortcomment, longcomment
"""
data = self.rawdata
self.encoding = encodings[ord(data[0])]
self.language = data[1:4]
self.shortcomment = ''
self.longcomment = ''
if is_double_byte(self.encoding):
for i in range(4,len(data)-1):
if data[i:i+2] == '\x00\x00':
self.shortcomment = data[4:i].strip('\x00')
self.longcomment = data[i+2:].strip('\x00')
break
else:
for i in range(4,len(data)):
if data[i] == '\x00':
self.shortcomment = data[4:i].strip('\x00')
self.longcomment = data[i+1:].strip('\x00')
break
debug('Read Field: %s Len: %d Enc: %s Lang: %s Comm: %s' %
(self.fid, self.length, self.encoding, self.language,
str([self.shortcomment, self.longcomment])))
def o_pcnt(self):
counter = ''
if self.length == 4:
counter = struct.pack('!I', self.counter)
else:
for i in range(0, self.length):
x = (self.counter >> (i*8) ) & 0xff
counter = counter + struct.pack('!B',x)
return counter
def x_pcnt(self):
"""
Extract Play Count
sets: counter
"""
data = self.rawdata
bytes = self.length
counter = 0
if bytes == 4:
counter = struct.unpack('!I',data)[0]
else:
for i in range(0,bytes):
counter += struct.unpack('B',data[i]) * pow(256,i)
debug('Read Field: %s Len: %d Count: %d' % (self.fid, bytes, counter))
self.counter = counter
def o_bin(self):
return self.rawdata
def x_bin(self):
pass
def o_wxxx(self):
if is_double_byte(self.encoding):
return chr(encodings[self.encoding]) + \
self.o_string(self.desc, self.encoding) + '\x00\x00' + \
self.o_string(self.url, self.encoding) + '\x00\x00'
else:
return chr(encodings[self.encoding]) + \
self.o_string(self.desc, self.encoding) + '\x00' + \
self.o_string(self.url, self.encoding) + '\x00'
def x_wxxx(self):
"""
Extract URL
set: encoding, desc, url
"""
data = self.rawdata
self.encoding = encodings[ord(data[0])]
if is_double_byte(self.encoding):
for i in range(1,len(data)-1):
if data[i:i+2] == '\x00\x00':
self.desc = data[1:i]
self.url = data[i+2:]
break
else:
for i in range(1,len(data)):
if data[i] == '\x00':
self.desc = data[1:i]
self.url = data[i+1:]
break
debug("Read field: %s Len: %s Enc: %s Desc: %s URL: %s" %
(self.fid, self.length, self.encoding,
self.desc, str([self.url])))
def o_apic(self):
enc = encodings[self.encoding]
sep = '\x00'
if is_double_byte(self.encoding):
sep = '\x00\x00'
return '%c%s\x00%c%s%s%s' % (enc, self.mimetype, self.picttype,
self.o_string(self.desc, self.encoding),
sep, self.pict)
def x_apic(self):
"""
Extract APIC
set: encoding, mimetype, desc, pict, picttype
"""
data = self.rawdata
self.encoding = encodings[ord(data[0])]
self.mimetype = ''
self.desc = ''
self.pict = ''
self.picttype = 0
# get mime type (must be latin-1)
for i in range(1,len(data)):
if data[i] == '\x00':
self.mimetype = data[1:i]
break
if not self.mimetype:
raise ID3FrameException("APIC extraction failed. Missing mimetype")
picttype = ord(data[len(self.mimetype) + 2])
# get picture description
for i in range(len(self.mimetype) + 2, len(data)-1):
if data[i] == '\x00':
self.desc = data[len(self.mimetype)+2:i]
if data[i+1] == '\x00':
self.pict = data[i+2:]
else:
self.pict = data[i+1:]
break
debug('Read Field: %s Len: %d PicType: %d Mime: %s Desc: %s PicLen: %d' %
(self.fid, self.length, self.picttype, self.mimetype,
self.desc, len(self.pict)))
# open("test.png","w").write(pictdata)
def o_url(self):
return self.rawdata
def x_url(self):
debug("Read Field: %s Len: %d Data: %s" %
(self.fid, self.length, [self.rawdata]))
return
def o_geob(self):
if is_double_byte(self.encoding):
return chr(encodings[self.encoding]) + self.mimetype + '\x00' + \
self.filename + '\x00\x00' + self.desc + \
'\x00\x00' + self.obj
else:
return chr(encodings[self.encoding]) + self.mimetype + '\x00' + \
self.filename + '\x00' + self.desc + \
'\x00' + self.obj
def x_geob(self):
"""
Extract GEOB
set: encoding, mimetype, filename, desc, obj
"""
data = self.rawdata
self.encoding = encodings[ord(data[0])]
self.mimetype = ''
self.filename = ''
self.desc = ''
self.obj = ''
for i in range(1,len(data)):
if data[i] == '\x00':
self.mimetype = data[1:i]
break
if not self.mimetype:
raise ID3FrameException("Unable to extract GEOB. Missing mimetype")
# FIXME: because filename and desc are optional, we should be
# smarter about splitting
if is_double_byte(self.encoding):
for i in range(len(self.mimetype)+2,len(data)-1):
if data[i:i+2] == '\x00\x00':
self.filename = data[len(self.mimetype)+2:i]
ptr = len(self.mimetype) + len(self.filename) + 4
break
else:
for i in range(len(self.mimetype)+2,len(data)-1):
if data[i] == '\x00':
self.filename = data[len(self.mimetype)+2:i]
ptr = len(self.mimetype) + len(self.filename) + 3
break
if is_double_byte(self.encoding):
for i in range(ptr,len(data)-1):
if data[i:i+2] == '\x00\x00':
self.desc = data[ptr:i]
self.obj = data[i+2:]
break
else:
for i in range(ptr,len(data)-1):
if data[i] == '\x00':
self.desc = data[ptr:i]
self.obj = data[i+1:]
break
debug("Read Field: %s Len: %d Enc: %s Mime: %s Filename: %s Desc: %s ObjLen: %d" %
(self.fid, self.length, self.encoding, self.mimetype,
self.filename, self.desc, len(self.obj)))
class ID3v2_2_Frame(ID3v2BaseFrame):
supported = ID3V2_2_FRAME_SUPPORTED_IDS
header_length = ID3V2_2_FRAME_HEADER_LENGTH
version = '2.2'
status_flags = []
format_flags = []
def parse_frame_header(self, frame):
header = frame[:self.header_length]
self.fid = header[0:3]
self.rawdata = frame[self.header_length:]
self.length = struct.unpack('!I', '\x00' + header[3:6])[0]
def output(self):
fieldstr = self.output_field()
# FIXME: no syncsafe
# NOTE: ID3v2 uses only 3 bytes for size, so we strip of MSB
header = self.fid + struct.pack('!I', len(fieldstr))[1:]
return header + fieldstr
def o_text(self):
"""
Output Text Field
ID3v2.2 text fields do not support multiple fields
"""
newstring = self.o_string(self.strings[0], self.encoding)
enc = encodings[self.encoding]
return chr(enc) + null_terminate(self.encoding, newstring)
def o_apic(self):
enc = encodings[self.encoding]
if is_double_byte(self.encoding):
sep = '\x00\x00'
else:
sep = '\x00'
imgtype = self.mimetype
if len(imgtype) != 3:
#attempt conversion
if imgtype in ID3V2_2_FRAME_MIME_TYPE_TO_IMAGE_FORMAT.keys():
imgtype = ID3V2_2_FRAME_MIME_TYPE_TO_IMAGE_FORMAT[imgtype]
else:
raise ID3FrameException("ID3v2.2 picture format must be three characters")
return '%c%s%c%s%s%s' % (enc, imgtype, self.picttype,
self.o_string(self.desc, self.encoding),
sep, self.pict)
def x_apic(self):
"""
Extract APIC
set: encoding, mimetype, desc, pict, picttype
"""
data = self.rawdata
self.encoding = encodings[ord(data[0])]
self.mimetype = ''
self.desc = ''
self.pict = ''
self.picttype = 0
# get mime type (must be latin-1)
imgtype = data[1:4]
if not imgtype:
raise ID3FrameException("APIC extraction failed. Missing mimetype")
if imgtype not in ID3V2_2_FRAME_IMAGE_FORMAT_TO_MIME_TYPE.keys():
raise ID3FrameException("Unrecognised mime-type")
else:
self.mimetype = ID3V2_2_FRAME_IMAGE_FORMAT_TO_MIME_TYPE[imgtype]
picttype = ord(data[len(imgtype) + 1])
# get picture description
for i in range(len(imgtype) + 2, len(data) - 1):
print [data[i:i+3]]
if data[i] == '\x00':
self.desc = data[len(imgtype)+2:i]
if data[i+1] == '\x00':
self.pict = data[i+2:]
else:
self.pict = data[i+1:]
break
debug('Read Field: %s Len: %d PicType: %d Mime: %s Desc: %s PicLen: %d' %
(self.fid, self.length, self.picttype, self.mimetype,
self.desc, len(self.pict)))
# open("test.png","w").write(pictdata)
class ID3v2_3_Frame(ID3v2BaseFrame):
supported = ID3V2_3_ABOVE_SUPPORTED_IDS
header_length = ID3V2_3_FRAME_HEADER_LENGTH
status_flags = ID3V2_3_FRAME_STATUS_FLAGS
format_flags = ID3V2_3_FRAME_FORMAT_FLAGS
version = '2.3'
def parse_frame_header(self, frame):
frame_header = frame[:self.header_length]
(fid, rawsize, status, format) = struct.unpack("!4sIBB", frame_header)
self.fid = fid
self.rawdata = frame[self.header_length:]
self.length = rawsize
self.flags = {}
for flagname, bit in self.status_flags:
self.flags[flagname] = (status >> bit) & 0x01
for flagname, bit in self.format_flags:
self.flags[flagname] = (format >> bit) & 0x01
def output(self):
fieldstr = self.output_field()
header = self.fid + struct.pack('!IBB', len(fieldstr), \
self.getstatus(), \
self.getformat())
return header + fieldstr
def getstatus(self):
status_word = 0
if self.flags and self.status_flags:
for flag, bit in self.status_flags:
if self.flags.has_key(flag):
status_word = status_word & (0x01 << bit)
return status_word
def getformat(self):
format_word = 0
if self.flags and self.format_flags:
for flag, bit in self.format_flags:
if self.flags.has_key(flag):
format_word = format_word & (0x01 << bit)
return format_word
class ID3v2_4_Frame(ID3v2_3_Frame):
supported = ID3V2_3_ABOVE_SUPPORTED_IDS
header_length = ID3V2_3_FRAME_HEADER_LENGTH
flags = ID3V2_3_FRAME_FLAGS
version = '2.4'
ID3v2Frame = ID3v2_4_Frame
|
{
"content_hash": "c0fc0b5510439fe7d4a98f1295cc8fea",
"timestamp": "",
"source": "github",
"line_count": 617,
"max_line_length": 131,
"avg_line_length": 32.6612641815235,
"alnum_prop": 0.5219829297340214,
"repo_name": "Ciantic/pytagger",
"id": "f4fdcbaae11e7e5990be94319c398ed241d6da49",
"size": "20152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tagger/id3v2frame.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "115178"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
import numpy as np
from scipy.stats import sem
from agaro.output_utils import get_recent_model
from agaro.measure_utils import measures, t_measures, params
def seg_intersect(p1, p2, yi):
x1, y1 = p1
x2, y2 = p2
m = (y2 - y1) / (x2 - x1)
c = y1 - m * x1
xi = (yi - c) / m
if x1 < xi < x2:
return xi
else:
raise ValueError
def curve_intersect(xs, ys, yi):
i_big = np.where(ys > yi)[0][0]
p1 = [xs[i_big - 1], ys[i_big - 1]]
p2 = [xs[i_big], ys[i_big]]
return seg_intersect(p1, p2, yi)
def get_vd_coeff(x, t):
return x / t
def get_diff_coeff(x, t):
return x ** 2 / (2.0 * t)
# Measure getters
def get_ud_vector(m):
dr = m.ships.agents.positions.dr
uds = get_vd_coeff(dr, m.ships.time.t) / m.ships.agents.swimmers.v_0
return np.mean(uds, axis=0), sem(uds, axis=0)
def get_ud_scalar(m):
dr = m.ships.agents.positions.dr_mag
uds = get_vd_coeff(dr, m.ships.time.t) / m.ships.agents.swimmers.v_0
return np.mean(uds, axis=0), sem(uds, axis=0)
def get_ud_abs(m):
dr = m.ships.agents.positions.dr
uds = np.abs(get_vd_coeff(dr, m.ships.time.t) /
m.ships.agents.swimmers.v_0)
return np.mean(uds, axis=0), sem(uds, axis=0)
def get_D_vector(m):
dr = m.ships.agents.positions.dr
Ds = get_diff_coeff(dr, m.ships.time.t)
return np.mean(Ds, axis=0), sem(Ds, axis=0)
def get_D_scalar(m):
dr = m.ships.agents.positions.dr_mag
Ds = get_diff_coeff(dr, m.ships.time.t)
return np.mean(Ds, axis=0), sem(Ds, axis=0)
def get_r_vector(m):
dr = m.ships.agents.positions.dr
return np.mean(dr, axis=0), sem(dr, axis=0)
def get_r_scalar(m):
dr = m.ships.agents.positions.dr_mag
return np.mean(dr, axis=0), sem(dr, axis=0)
def get_r_abs(m):
dr = np.abs(m.ships.agents.positions.dr)
return np.mean(dr, axis=0), sem(dr, axis=0)
def get_u_net_vector(m):
us = m.ships.agents.directions.u
return np.mean(us, axis=0), sem(us, axis=0)
def get_u_net_scalar(m):
u_net, u_net_err = get_u_net_vector(m)
u_net_sq = np.square(u_net)
u_net_sq_err = 2.0 * u_net * u_net_err
u_net_mag_sq = np.sum(u_net_sq)
u_net_mag_sq_err = np.sqrt(np.sum(np.square(u_net_sq_err)))
u_net_mag = np.sqrt(u_net_mag_sq)
u_net_mag_err = 0.5 * u_net_mag_sq_err / u_net_mag
return u_net_mag, u_net_mag_err
# Parameter getters
def get_chi(m):
return m.ships.agents.chi
def get_pf(m):
return m.ships.obstructor.fraction_occupied
def get_Dr_0(m):
return m.ships.agents.rudder_sets.Dr_0
def get_p_0(m):
return m.ships.agents.rudder_sets.p_0
def get_noise_0_tot(m):
return m.ships.agents.noise_0_tot
def get_time(m):
return m.ships.time.t
# Time dependence
def t_uds_vector(dirname):
"""Calculate the particle drift speed over time along each axis
for a model output directory.
Parameters
----------
dirname: str
A model output directory path
Returns
-------
ts: numpy.ndarray[dtype=float]
Times.
uds: numpy.ndarray[dtype=float]
Drift speeds, normalised by the swimmer speed.
"""
return t_measures(dirname, get_time, get_ud_vector)
def t_uds_scalar(dirname):
"""Calculate the overall particle drift speed over time
for a model output directory.
Parameters
----------
dirname: str
A model output directory path
Returns
-------
ts: numpy.ndarray[dtype=float]
Times.
uds: numpy.ndarray[dtype=float]
Particle drift speeds.
"""
return t_measures(dirname, get_time, get_ud_scalar)
def t_uds_abs(dirname):
"""Calculate the overall particle absolute drift speed over time
for a model output directory.
Parameters
----------
dirname: str
A model output directory path
Returns
-------
ts: numpy.ndarray[dtype=float]
Times.
uds_abs: numpy.ndarray[dtype=float]
Particle absolute drift speeds.
"""
return t_measures(dirname, get_time, get_ud_abs)
def t_Ds_scalar(dirname):
"""Calculate the overall particle diffusion constant over time
for a model output directory.
Parameters
----------
dirname: str
A model output directory path
Returns
-------
ts: numpy.ndarray[dtype=float]
Times.
Ds: numpy.ndarray[dtype=float]
Particle diffusion constants.
"""
return t_measures(dirname, get_time, get_D_scalar)
def t_Ds_vector(dirname):
"""Calculate the particle diffusion constant over time along each axis
for a model output directory.
Parameters
----------
dirname: str
A model output directory path
Returns
-------
ts: numpy.ndarray[dtype=float]
Times.
Ds: numpy.ndarray[dtype=float]
Particle diffusion constants.
"""
return t_measures(dirname, get_time, get_D_vector)
def t_rs_scalar(dirname):
"""Calculate the overall particle displacement over time
for a model output directory.
Parameters
----------
dirname: str
A model output directory path
Returns
-------
ts: numpy.ndarray[dtype=float]
Times.
rs: numpy.ndarray[dtype=float]
Particle displacements
"""
return t_measures(dirname, get_time, get_r_scalar)
def t_rs_vector(dirname):
"""Calculate the particle displacement over time along each axis
for a model output directory.
Parameters
----------
dirname: str
A model output directory path
Returns
-------
ts: numpy.ndarray[dtype=float]
Times.
rs: numpy.ndarray[dtype=float]
Particle displacements
"""
return t_measures(dirname, get_time, get_r_vector)
def t_rs_abs(dirname):
"""Calculate the absolute particle displacement over time along each axis
for a model output directory.
Parameters
----------
dirname: str
A model output directory path
Returns
-------
ts: numpy.ndarray[dtype=float]
Times.
rs: numpy.ndarray[dtype=float]
Particle absolute displacements
"""
return t_measures(dirname, get_time, get_r_abs)
def t_u_nets_scalar(dirname):
"""Calculate the particles' overall centre-of-mass speed over time
for a model output directory.
Parameters
----------
dirname: str
A model output directory path
Returns
-------
ts: numpy.ndarray[dtype=float]
Times.
v_nets: numpy.ndarray[dtype=float]
Centre-of-mass particle speeds.
"""
return t_measures(dirname, get_time, get_u_net_scalar)
def t_u_nets_vector(dirname):
"""Calculate the particle's centre-of-mass velocity over time
for a model output directory.
Parameters
----------
dirname: str
A model output directory path
Returns
-------
ts: numpy.ndarray[dtype=float]
Times.
v_nets: numpy.ndarray[dtype=float]
Centre-of-mass particle velocities.
"""
return t_measures(dirname, get_time, get_u_net_vector)
# Parameter to measure relations
def chi_uds_x(dirnames, t_steady=None):
chis = params(dirnames, get_chi)
uds, uds_err = measures(dirnames, get_ud_vector, t_steady)
return chis, uds[:, 0], uds_err[:, 0]
def pf_Ds_scalar(dirnames, t_steady=None):
pfs = params(dirnames, get_pf)
Ds, Ds_err = measures(dirnames, get_D_scalar, t_steady)
return pfs, Ds, Ds_err
def pf_uds_x(dirnames, t_steady=None):
pfs = params(dirnames, get_pf)
uds, uds_err = measures(dirnames, get_ud_vector, t_steady)
return pfs, uds[:, 0], uds_err[:, 0]
def Dr_0_Ds_scalar(dirnames, t_steady=None):
Dr_0s = params(dirnames, get_Dr_0)
Ds, Ds_err = measures(dirnames, get_D_scalar, t_steady)
return Dr_0s, Ds, Ds_err
def p_0_Ds_scalar(dirnames, t_steady=None):
p_0s = params(dirnames, get_p_0)
Ds, Ds_err = measures(dirnames, get_D_scalar, t_steady)
return p_0s, Ds, Ds_err
def noise_0_tot_Ds_scalar(dirnames, t_steady=None):
noise_0_tots = params(dirnames, get_noise_0_tot)
Ds, Ds_err = measures(dirnames, get_D_scalar, t_steady)
return noise_0_tots, Ds, Ds_err
# Related to finding equivalent chis to give equal drift speeds
def get_equiv_chi(ud_0, dirnames):
chis, uds, uds_err = chi_uds_x(dirnames)
i_sort = np.argsort(chis)
chis, uds, uds_err = chis[i_sort], uds[i_sort], uds_err[i_sort]
return curve_intersect(chis, uds, ud_0)
def get_equiv_chi_key(m):
noise_var_key = 'p_0' if m.ships.agents.does_tumbling else 'Dr_0'
chemo_rudders = m.ships.agents.rudder_sets.chemo_rudders
key = (noise_var_key, chemo_rudders.is_onesided,
chemo_rudders.noise_measurer.is_temporal)
return key
def get_equiv_chi_item(ud_0, dirnames):
key = get_equiv_chi_key(get_recent_model(dirnames[0]))
chi_equiv = get_equiv_chi(ud_0, dirnames)
return key, chi_equiv
def get_equiv_chi_dict(ud_0, dirname_sets):
params_to_chi = {}
for dirnames in dirname_sets:
key, chi_equiv = get_equiv_chi_item(ud_0, dirnames)
params_to_chi[key] = chi_equiv
return params_to_chi
# Density distributions
def circle_segment_angle(d, R):
return 2.0 * np.arccos(d / R)
def circle_segment_area(d, R):
if d > R:
return 0.0
elif d < -R:
return np.pi * R ** 2.0
else:
theta = circle_segment_angle(d, R)
return ((R ** 2) / 2.0) * (theta - np.sin(theta))
def circle_cross_section_area(d_1, d_2, R):
return np.abs(circle_segment_area(d_1, R) - circle_segment_area(d_2, R))
def linear_areas(rs, R, Lx, Ly, nx):
x = np.linspace(-Lx / 2.0, Lx / 2.0, nx)
linear_areas = np.full_like(x, Lx * Ly / nx)
for i_r in range(rs.shape[0]):
for i in range(nx - 1):
d_1 = rs[i_r] - x[i]
d_2 = rs[i_r] - x[i + 1]
cross_section_area = circle_cross_section_area(d_1, d_2, R)
linear_areas[i] -= cross_section_area
return linear_areas
def linear_density(xs, xcs, R, Lx, Ly, dx):
nx = int(round(Lx / dx))
ns, x_bins = np.histogram(xs, bins=nx, range=(-Lx / 2.0, Lx / 2.0))
areas = linear_areas(xcs, R, Lx, Ly, nx)
densities = ns.astype(np.float) / areas
return densities, x_bins
def get_linear_density(m, dx):
xs = m.ships.agents.positions.r[:, 0]
try:
xcs = m.ships.obstructor.rs[:, 0]
R = m.ships.obstructor.R
except AttributeError:
xcs = np.array([])
R = 0.0
Lx, Ly = m.ships.agents.positions.L
return linear_density(xs, xcs, R, Lx, Ly, dx)
def angle_density(ths, dth):
nth = int(round(2.0 * np.pi / dth))
ns, th_bins = np.histogram(ths, bins=nth, range=(-np.pi, np.pi))
dth = th_bins[1] - th_bins[0]
densities = ns.astype(np.float) / (ns * dth).sum()
return densities, th_bins
def get_angle_density(m, dth):
ths = m.ships.agents.directions.th
return angle_density(ths, dth)
|
{
"content_hash": "84c16e8a4e0e9ed232f32a482de52558",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 77,
"avg_line_length": 24.11597374179431,
"alnum_prop": 0.6162780146992106,
"repo_name": "eddiejessup/ahoy",
"id": "a60666e736b821ea2da4566ee042a9a6b9c52048",
"size": "11021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ahoy/utils/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "180250"
},
{
"name": "Shell",
"bytes": "277"
}
],
"symlink_target": ""
}
|
import timeit
from scapy.layers.inet import TCP, IP, ICMP, UDP
__author__ = 'tal'
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import socket
class Scanner():
def __init__(self, dst_ip, src_ip, timeout, scan_type, ports_list=None):
if not ports_list or len(ports_list) == 0:
ports_list = [21, 23, 25, 53, 80, 110, 443]
print '******Starting port scanning******'
self.start = timeit.default_timer()
self.dst_ip = socket.gethostbyname(dst_ip)
# self.dst_ip = dst_ip
self.port_list = ports_list
self.src_ip = src_ip
self.src_port = RandShort()
self.timeout = int(timeout)
self.scan_type = scan_type
self.open_ports = 0
self.results = {1: "Open", 2: "Closed", 3: "Filtered"}
def set_print(self, ip, port, state):
print ip, ':', port, "- ", state
def tcp_scan(self):
print 'starting tcp_scan'
for port in self.port_list:
port = int(port)
resp = sr1(IP(dst=self.dst_ip) / TCP(sport=self.src_port, dport=port, flags="S"), timeout=self.timeout,
verbose=0)
if str(type(resp)) == "<type 'NoneType'>":
self.set_print(self.dst_ip, port, self.results[2])
elif resp.haslayer(TCP):
if resp.getlayer(TCP).flags == 0x12:
send_rst = sr(IP(dst=self.dst_ip) / TCP(sport=self.src_port, dport=port, flags="AR"), verbose=0,
timeout=self.timeout)
self.set_print(self.dst_ip, port, self.results[1])
self.open_ports += 1
elif resp.getlayer(TCP).flags == 0x14:
self.set_print(self.dst_ip, port, self.results[2])
def udp_scan(self):
print 'starting udp_scan'
for port in self.port_list:
port = int(port)
resp = sr1(IP(dst=self.dst_ip) / UDP(sport=self.src_port, dport=port), timeout=self.timeout, verbose=0)
if str(type(resp)) == "<type 'NoneType'>":
retrans = []
for count in range(0, 3):
retrans.append(
sr1(IP(dst=self.dst_ip) / UDP(sport=self.src_port, dport=port), timeout=self.timeout,
verbose=0))
for item in retrans:
if str(type(item)) != "<type 'NoneType'>":
self.udp_scan()
self.set_print(self.dst_ip, port, self.results[1] + "|" + self.results[3])
elif resp.haslayer(UDP):
self.set_print(self.dst_ip, port, self.results[1])
self.open_ports += 1
elif resp.haslayer(ICMP):
if int(resp.getlayer(ICMP).type) == 3 and int(resp.getlayer(ICMP).code) == 3:
self.set_print(self.dst_ip, port, self.results[2])
elif int(resp.getlayer(ICMP).type) == 3 and int(resp.getlayer(ICMP).code) in [1, 2, 9, 10, 13]:
self.set_print(self.dst_ip, port, self.results[3])
else:
print 'Checked'
def ack_scan(self):
print 'starting ack_scan'
for port in self.port_list:
port = int(port)
resp = sr1(IP(dst=self.dst_ip) / TCP(sport=self.src_port, dport=port, flags="A"), timeout=self.timeout,
verbose=0)
if str(type(resp)) == "<type 'NoneType'>":
self.set_print(self.dst_ip, port, self.results[3])
elif resp.haslayer(TCP):
if resp.getlayer(TCP).flags == 0x4:
self.set_print(self.dst_ip, port, "Unfiltered)")
self.open_ports += 1
elif resp.haslayer(ICMP):
if int(resp.getlayer(ICMP).type) == 3 and int(resp.getlayer(ICMP).code) in [1, 2, 3, 9, 10, 13]:
self.set_print(self.dst_ip, port, self.results[3])
def stealth_connection(self):
print 'starting stealth_connection'
for port in self.port_list:
port = int(port)
resp = sr1(IP(dst=self.dst_ip) / TCP(sport=self.src_port, dport=port, flags="S"), timeout=self.timeout,
verbose=0)
if str(type(resp)) == "<type 'NoneType'>":
self.set_print(self.dst_ip, port, self.results[3])
elif resp.haslayer(TCP):
if resp.getlayer(TCP).flags == 0x12:
send_rst = sr(IP(dst=self.dst_ip) / TCP(sport=self.src_port, dport=port, flags="R"), verbose=0,
timeout=self.timeout)
self.set_print(self.dst_ip, port, self.results[1])
self.open_ports += 1
elif resp.getlayer(TCP).flags == 0x14:
self.set_print(self.dst_ip, port, self.results[2])
elif resp.haslayer(ICMP):
if int(resp.getlayer(ICMP).type) == 3 and int(resp.getlayer(ICMP).code) in [1, 2, 3, 9, 10, 13]:
print self.set_print(self.dst_ip, port, self.results[3])
def fin_scan(self):
"""
Purpose: Send a fin packet over tcp, target should send back RST
"""
print 'starting fin_scan'
for port in self.port_list:
port = int(port)
resp = sr1(IP(dst=self.dst_ip) / TCP(sport=self.src_port, dport=port, flags="F"), timeout=self.timeout,
verbose=0)
if str(type(resp)) == "<type 'NoneType'>":
self.set_print(self.dst_ip, port, self.results[1] + "|" + self.results[3])
self.open_ports += 1
elif resp.haslayer(TCP):
if resp.getlayer(TCP).flags == 0x14:
self.set_print(self.dst_ip, port, self.results[2])
elif resp.haslayer(ICMP):
if int(resp.getlayer(ICMP).type) == 3 and int(resp.getlayer(ICMP).code) in [1, 2, 3, 9, 10, 13]:
self.set_print(self.dst_ip, port, self.results[3])
def run_scan(self, ):
"""
Choose the right type by the t argument
char -> func dict
"""
options = {'t': self.tcp_scan,
'u': self.udp_scan,
'a': self.ack_scan,
's': self.stealth_connection,
'f': self.fin_scan,
}
try:
options[self.scan_type]()
print "Scan Took:", timeit.default_timer() - self.start
print "Total Open ports:", self.open_ports
except Exception, ex:
print '%s is not vaild scan_type, bye bye' % self.scan_type
exit(1)
|
{
"content_hash": "b4d3fe3235351036a42d33b341ac55f8",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 116,
"avg_line_length": 46.14383561643836,
"alnum_prop": 0.5150660531393796,
"repo_name": "talp101/Scanning-tool",
"id": "0ab28adbc78eecaa39825e954ba0bbca2a0f7766",
"size": "6737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scanner/Scanner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12597"
}
],
"symlink_target": ""
}
|
import sys
from easygui import *
from getNBlinkN2nanpyLCD import taha
def phase1():
msg = "Hi, Please type a positive integer :)"
n = integerbox(msg=msg, default=1, lowerbound=1)
taha(n)
msgbox(msg='Your number is {}. :)'.format(n))
|
{
"content_hash": "88ca7536d3fc883a22a5b554cfb9abc7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 52,
"avg_line_length": 25.1,
"alnum_prop": 0.6693227091633466,
"repo_name": "pooyapooya/rizpardazande",
"id": "838a2a20f881772264499f50c35a9a7908e39666",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phase1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "881"
},
{
"name": "C++",
"bytes": "82831"
},
{
"name": "Makefile",
"bytes": "14984"
},
{
"name": "Processing",
"bytes": "36089"
},
{
"name": "Python",
"bytes": "1657256"
},
{
"name": "Shell",
"bytes": "3783"
}
],
"symlink_target": ""
}
|
import random
import numpy as np
import tensorflow as tf
from language_model import LM
from hparams import HParams
def get_test_hparams():
return HParams(
batch_size=21,
num_steps=12,
num_shards=2,
num_layers=1,
learning_rate=0.2,
max_grad_norm=1.0,
vocab_size=1000,
emb_size=14,
state_size=17,
projected_size=15,
num_sampled=500,
num_gpus=1,
average_params=True,
run_profiler=False,
)
def simple_data_generator(batch_size, num_steps):
x = np.zeros([batch_size, num_steps], np.int32)
y = np.zeros([batch_size, num_steps], np.int32)
for i in range(batch_size):
first = random.randrange(0, 20)
for j in range(num_steps):
x[i, j] = first + j
y[i, j] = first + j + 1
return x, y, np.ones([batch_size, num_steps], np.uint8)
class TestLM(tf.test.test_util.TensorFlowTestCase):
def test_lm(self):
hps = get_test_hparams()
with tf.variable_scope("model"):
model = LM(hps)
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_local_variables().run()
loss = 1e5
for i in range(50):
x, y, w = simple_data_generator(hps.batch_size, hps.num_steps)
loss, _ = sess.run([model.loss, model.train_op], {model.x: x, model.y: y, model.w: w})
print("%d: %.3f %.3f" % (i, loss, np.exp(loss)))
if np.isnan(loss):
print("NaN detected")
break
self.assertLess(loss, 1.0)
|
{
"content_hash": "2d6fcb1a87a01b4a4a7bab51dd6f8aeb",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 102,
"avg_line_length": 28.25423728813559,
"alnum_prop": 0.5410917816436712,
"repo_name": "DorRosenblum/tf_flstm_f-lm",
"id": "3d38c53792381c830cf49c16bd2da76800e0efc8",
"size": "1667",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "language_model_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "57"
},
{
"name": "Perl",
"bytes": "11141"
},
{
"name": "Python",
"bytes": "116702"
},
{
"name": "Shell",
"bytes": "3514"
}
],
"symlink_target": ""
}
|
from typing import Type, TypeVar
class MyClass:
class_attr = 42
def __init__(self, attr):
self.inst_attr = attr
T = TypeVar('T', bound=MyClass)
def func(x: Type[T]):
# It will be resolved on "Go to Declaration" in the editor due to the "implicit" resolve context.
x.inst_attr
# <ref>
|
{
"content_hash": "305180a9d69d1d86e20132597953bdbb",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 102,
"avg_line_length": 18.941176470588236,
"alnum_prop": 0.6242236024844721,
"repo_name": "google/intellij-community",
"id": "a08ebfbd5ca724a3a1ce28b3194ba300a80effef",
"size": "322",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "python/testData/resolve/TypeVarClassObjectBoundAttribute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import unittest
def path_sum(root, given_sum):
all_paths = []
if root is not None:
path_sum_recursive(root, 0, [], given_sum, all_paths)
return all_paths
def path_sum_recursive(root, prev, path, given_sum, all_paths):
if root is not None:
path.append(root.val())
current = root.val() + prev
if root.is_leaf() and given_sum == current:
all_paths.append(path)
else:
path_sum_recursive(root.left(), current, list(path), given_sum, all_paths)
path_sum_recursive(root.right(), current, list(path), given_sum, all_paths)
class TreeNode(object):
def __init__(self, val, left=None, right=None):
self._val = val
self._left = left
self._right = right
def val(self):
return self._val
def is_leaf(self):
return self._left is None and self._right is None
def left(self):
return self._left
def right(self):
return self._right
class PathSumTest(unittest.TestCase):
def testPathSumOfNullTree(self):
self.assertEqual(path_sum(None, 10), [])
def testOnlyRoot_sumEqRootVal(self):
self.assertEqual(path_sum(TreeNode(10), 10), [[10]])
def testOnlyRoot_sumNotEqRootVal(self):
self.assertEqual(path_sum(TreeNode(20), 10), [])
def testTwoLevelTree_leftInPath(self):
self.assertEqual(path_sum(TreeNode(3, TreeNode(5), TreeNode(4)), 8), [[3, 5]])
def testNegativeSum(self):
self.assertEqual(path_sum(TreeNode(0, TreeNode(-1), TreeNode(-1)), -1), [[0, -1], [0, -1]])
def testBigTree(self):
self.assertEqual(
path_sum(
TreeNode(
5,
TreeNode(
4,
TreeNode(
11,
TreeNode(7),
TreeNode(2)
),
),
TreeNode(
8,
TreeNode(13),
TreeNode(
4,
TreeNode(5),
TreeNode(1)
)
)
),
22
),
[[5, 4, 11, 2], [5, 8, 4, 5]]
)
|
{
"content_hash": "e8f9f21e9de6cde5713c29f734cf831d",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 99,
"avg_line_length": 28.397590361445783,
"alnum_prop": 0.4705133644463301,
"repo_name": "Alex-Diez/python-tdd-katas",
"id": "7249349decb442a13e10bf31578f8de6ebcdff57",
"size": "2357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "path_sum_kata/day_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "335247"
}
],
"symlink_target": ""
}
|
from typing import MutableMapping, MutableSequence
import proto # type: ignore
from google.cloud.appengine_admin_v1.types import (
network_settings as ga_network_settings,
)
__protobuf__ = proto.module(
package="google.appengine.v1",
manifest={
"Service",
"TrafficSplit",
},
)
class Service(proto.Message):
r"""A Service resource is a logical component of an application
that can share state and communicate in a secure fashion with
other services. For example, an application that handles
customer requests might include separate services to handle
tasks such as backend data analysis or API requests from mobile
devices. Each service has a collection of versions that define a
specific set of code used to implement the functionality of that
service.
Attributes:
name (str):
Full path to the Service resource in the API. Example:
``apps/myapp/services/default``.
@OutputOnly
id (str):
Relative name of the service within the application.
Example: ``default``.
@OutputOnly
split (google.cloud.appengine_admin_v1.types.TrafficSplit):
Mapping that defines fractional HTTP traffic
diversion to different versions within the
service.
labels (MutableMapping[str, str]):
A set of labels to apply to this service.
Labels are key/value pairs that describe the
service and all resources that belong to it
(e.g., versions). The labels can be used to
search and group resources, and are propagated
to the usage and billing reports, enabling
fine-grain analysis of costs. An example of
using labels is to tag resources belonging to
different environments (e.g., "env=prod",
"env=qa").
<p>Label keys and values can be no longer than
63 characters and can only contain lowercase
letters, numeric characters, underscores,
dashes, and international characters. Label keys
must start with a lowercase letter or an
international character. Each service can have
at most 32 labels.
network_settings (google.cloud.appengine_admin_v1.types.NetworkSettings):
Ingress settings for this service. Will apply
to all versions.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
id: str = proto.Field(
proto.STRING,
number=2,
)
split: "TrafficSplit" = proto.Field(
proto.MESSAGE,
number=3,
message="TrafficSplit",
)
labels: MutableMapping[str, str] = proto.MapField(
proto.STRING,
proto.STRING,
number=4,
)
network_settings: ga_network_settings.NetworkSettings = proto.Field(
proto.MESSAGE,
number=6,
message=ga_network_settings.NetworkSettings,
)
class TrafficSplit(proto.Message):
r"""Traffic routing configuration for versions within a single
service. Traffic splits define how traffic directed to the
service is assigned to versions.
Attributes:
shard_by (google.cloud.appengine_admin_v1.types.TrafficSplit.ShardBy):
Mechanism used to determine which version a
request is sent to. The traffic selection
algorithm will be stable for either type until
allocations are changed.
allocations (MutableMapping[str, float]):
Mapping from version IDs within the service to fractional
(0.000, 1] allocations of traffic for that version. Each
version can be specified only once, but some versions in the
service may not have any traffic allocation. Services that
have traffic allocated cannot be deleted until either the
service is deleted or their traffic allocation is removed.
Allocations must sum to 1. Up to two decimal place precision
is supported for IP-based splits and up to three decimal
places is supported for cookie-based splits.
"""
class ShardBy(proto.Enum):
r"""Available sharding mechanisms."""
UNSPECIFIED = 0
COOKIE = 1
IP = 2
RANDOM = 3
shard_by: ShardBy = proto.Field(
proto.ENUM,
number=1,
enum=ShardBy,
)
allocations: MutableMapping[str, float] = proto.MapField(
proto.STRING,
proto.DOUBLE,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
{
"content_hash": "261ee8c2def9ebbf9eab0de7bc168fac",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 81,
"avg_line_length": 34.97744360902256,
"alnum_prop": 0.6352106620808254,
"repo_name": "googleapis/python-appengine-admin",
"id": "e09e6285230d43adecced55492fdf14e92ba07e2",
"size": "5252",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/appengine_admin_v1/types/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1721758"
},
{
"name": "Shell",
"bytes": "30687"
}
],
"symlink_target": ""
}
|
import logging
import signal
import time
import warnings
from datetime import datetime, timedelta
from itertools import repeat
from rq.exceptions import NoSuchJobError
from rq.job import Job
from rq.queue import Queue
from redis import WatchError
from .utils import from_unix, to_unix, get_next_scheduled_time
logger = logging.getLogger(__name__)
class Scheduler(object):
scheduler_key = 'rq:scheduler'
scheduled_jobs_key = 'rq:scheduler:scheduled_jobs'
def __init__(self, queue_name='default', interval=60, connection=None):
from rq.connections import resolve_connection
self.connection = resolve_connection(connection)
self.queue_name = queue_name
self._interval = interval
self.log = logger
def register_birth(self):
if self.connection.exists(self.scheduler_key) and \
not self.connection.hexists(self.scheduler_key, 'death'):
raise ValueError("There's already an active RQ scheduler")
key = self.scheduler_key
now = time.time()
with self.connection._pipeline() as p:
p.delete(key)
p.hset(key, 'birth', now)
# Set scheduler key to expire a few seconds after polling interval
# This way, the key will automatically expire if scheduler
# quits unexpectedly
p.expire(key, int(self._interval) + 10)
p.execute()
def register_death(self):
"""Registers its own death."""
with self.connection._pipeline() as p:
p.hset(self.scheduler_key, 'death', time.time())
p.expire(self.scheduler_key, 60)
p.execute()
def _install_signal_handlers(self):
"""
Installs signal handlers for handling SIGINT and SIGTERM
gracefully.
"""
def stop(signum, frame):
"""
Register scheduler's death and exit.
"""
self.log.info('Shutting down RQ scheduler...')
self.register_death()
raise SystemExit()
signal.signal(signal.SIGINT, stop)
signal.signal(signal.SIGTERM, stop)
def _create_job(self, func, args=None, kwargs=None, commit=True,
result_ttl=None, ttl=None, id=None, description=None, queue_name=None):
"""
Creates an RQ job and saves it to Redis.
"""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
job = Job.create(func, args=args, connection=self.connection,
kwargs=kwargs, result_ttl=result_ttl, ttl=ttl, id=id, description=description)
job.origin = queue_name or self.queue_name
if commit:
job.save()
return job
def enqueue_at(self, scheduled_time, func, *args, **kwargs):
"""
Pushes a job to the scheduler queue. The scheduled queue is a Redis sorted
set ordered by timestamp - which in this case is job's scheduled execution time.
Usage:
from datetime import datetime
from redis import Redis
from rq.scheduler import Scheduler
from foo import func
redis = Redis()
scheduler = Scheduler(queue_name='default', connection=redis)
scheduler.enqueue_at(datetime(2020, 1, 1), func, 'argument', keyword='argument')
"""
job = self._create_job(func, args=args, kwargs=kwargs)
self.connection._zadd(self.scheduled_jobs_key,
to_unix(scheduled_time),
job.id)
return job
def enqueue_in(self, time_delta, func, *args, **kwargs):
"""
Similar to ``enqueue_at``, but accepts a timedelta instead of datetime object.
The job's scheduled execution time will be calculated by adding the timedelta
to datetime.utcnow().
"""
job = self._create_job(func, args=args, kwargs=kwargs)
self.connection._zadd(self.scheduled_jobs_key,
to_unix(datetime.utcnow() + time_delta),
job.id)
return job
def enqueue_periodic(self, scheduled_time, interval, repeat, func,
*args, **kwargs):
"""
Schedule a job to be periodically executed, at a certain interval.
"""
warnings.warn("'enqueue_periodic()' has been deprecated in favor of '.schedule()'"
"and will be removed in a future release.", DeprecationWarning)
return self.schedule(scheduled_time, func, args=args, kwargs=kwargs,
interval=interval, repeat=repeat)
def schedule(self, scheduled_time, func, args=None, kwargs=None, interval=None,
repeat=None, result_ttl=None, ttl=None, timeout=None, id=None, description=None, queue_name=None):
"""
Schedule a job to be periodically executed, at a certain interval.
"""
# Set result_ttl to -1 for periodic jobs, if result_ttl not specified
if interval is not None and result_ttl is None:
result_ttl = -1
job = self._create_job(func, args=args, kwargs=kwargs, commit=False,
result_ttl=result_ttl, ttl=ttl, id=id, description=description, queue_name=queue_name)
if interval is not None:
job.meta['interval'] = int(interval)
if repeat is not None:
job.meta['repeat'] = int(repeat)
if repeat and interval is None:
raise ValueError("Can't repeat a job without interval argument")
if timeout is not None:
job.timeout = timeout
job.save()
self.connection._zadd(self.scheduled_jobs_key,
to_unix(scheduled_time),
job.id)
return job
def cron(self, cron_string, func, args=None, kwargs=None, repeat=None,
queue_name=None, id=None, timeout=None, description=None):
"""
Schedule a cronjob
"""
scheduled_time = get_next_scheduled_time(cron_string)
# Set result_ttl to -1, as jobs scheduled via cron are periodic ones.
# Otherwise the job would expire after 500 sec.
job = self._create_job(func, args=args, kwargs=kwargs, commit=False,
result_ttl=-1, id=id, queue_name=queue_name, description=description)
job.meta['cron_string'] = cron_string
if repeat is not None:
job.meta['repeat'] = int(repeat)
if timeout is not None:
job.timeout = timeout
job.save()
self.connection._zadd(self.scheduled_jobs_key,
to_unix(scheduled_time),
job.id)
return job
def enqueue(self, scheduled_time, func, args=None, kwargs=None,
interval=None, repeat=None, result_ttl=None, queue_name=None):
"""
This method is deprecated and only left in as a backwards compatibility
alias for schedule().
"""
warnings.warn("'enqueue()' has been deprecated in favor of '.schedule()'"
"and will be removed in a future release.", DeprecationWarning)
return self.schedule(scheduled_time, func, args, kwargs, interval,
repeat, result_ttl, queue_name=queue_name)
def cancel(self, job):
"""
Pulls a job from the scheduler queue. This function accepts either a
job_id or a job instance.
"""
if isinstance(job, Job):
self.connection.zrem(self.scheduled_jobs_key, job.id)
else:
self.connection.zrem(self.scheduled_jobs_key, job)
def __contains__(self, item):
"""
Returns a boolean indicating whether the given job instance or job id is
scheduled for execution.
"""
job_id = item
if isinstance(item, Job):
job_id = item.id
return self.connection.zscore(self.scheduled_jobs_key, job_id) is not None
def change_execution_time(self, job, date_time):
"""
Change a job's execution time. Wrap this in a transaction to prevent race condition.
"""
with self.connection._pipeline() as pipe:
while 1:
try:
pipe.watch(self.scheduled_jobs_key)
if pipe.zscore(self.scheduled_jobs_key, job.id) is None:
raise ValueError('Job not in scheduled jobs queue')
pipe.zadd(self.scheduled_jobs_key, to_unix(date_time), job.id)
break
except WatchError:
# If job is still in the queue, retry otherwise job is already executed
# so we raise an error
if pipe.zscore(self.scheduled_jobs_key, job.id) is None:
raise ValueError('Job not in scheduled jobs queue')
continue
def get_jobs(self, until=None, with_times=False):
"""
Returns a list of job instances that will be queued until the given time.
If no 'until' argument is given all jobs are returned. This function
accepts datetime and timedelta instances as well as integers representing
epoch values.
If with_times is True a list of tuples consisting of the job instance and
it's scheduled execution time is returned.
"""
def epoch_to_datetime(epoch):
return from_unix(float(epoch))
if until is None:
until = "+inf"
elif isinstance(until, datetime):
until = to_unix(until)
elif isinstance(until, timedelta):
until = to_unix((datetime.utcnow() + until))
job_ids = self.connection.zrangebyscore(self.scheduled_jobs_key, 0,
until, withscores=with_times,
score_cast_func=epoch_to_datetime)
if not with_times:
job_ids = zip(job_ids, repeat(None))
jobs = []
for job_id, sched_time in job_ids:
job_id = job_id.decode('utf-8')
try:
job = Job.fetch(job_id, connection=self.connection)
if with_times:
jobs.append((job, sched_time))
else:
jobs.append(job)
except NoSuchJobError:
# Delete jobs that aren't there from scheduler
self.cancel(job_id)
return jobs
def get_jobs_to_queue(self, with_times=False):
"""
Returns a list of job instances that should be queued
(score lower than current timestamp).
If with_times is True a list of tuples consisting of the job instance and
it's scheduled execution time is returned.
"""
return self.get_jobs(to_unix(datetime.utcnow()), with_times=with_times)
def get_queue_for_job(self, job):
"""
Returns a queue to put job into.
"""
key = '{0}{1}'.format(Queue.redis_queue_namespace_prefix, job.origin)
return Queue.from_queue_key(key, connection=self.connection)
def enqueue_job(self, job):
"""
Move a scheduled job to a queue. In addition, it also does puts the job
back into the scheduler if needed.
"""
self.log.debug('Pushing {0} to {1}'.format(job.id, job.origin))
interval = job.meta.get('interval', None)
repeat = job.meta.get('repeat', None)
cron_string = job.meta.get('cron_string', None)
# If job is a repeated job, decrement counter
if repeat:
job.meta['repeat'] = int(repeat) - 1
queue = self.get_queue_for_job(job)
queue.enqueue_job(job)
self.connection.zrem(self.scheduled_jobs_key, job.id)
if interval:
# If this is a repeat job and counter has reached 0, don't repeat
if repeat is not None:
if job.meta['repeat'] == 0:
return
self.connection._zadd(self.scheduled_jobs_key,
to_unix(datetime.utcnow()) + int(interval),
job.id)
elif cron_string:
# If this is a repeat job and counter has reached 0, don't repeat
if repeat is not None:
if job.meta['repeat'] == 0:
return
self.connection._zadd(self.scheduled_jobs_key,
to_unix(get_next_scheduled_time(cron_string)),
job.id)
def enqueue_jobs(self):
"""
Move scheduled jobs into queues.
"""
self.log.info('Checking for scheduled jobs...')
jobs = self.get_jobs_to_queue()
for job in jobs:
self.enqueue_job(job)
# Refresh scheduler key's expiry
self.connection.expire(self.scheduler_key, int(self._interval) + 10)
return jobs
def run(self):
"""
Periodically check whether there's any job that should be put in the queue (score
lower than current time).
"""
self.log.info('Running RQ scheduler...')
self.register_birth()
self._install_signal_handlers()
try:
while True:
self.enqueue_jobs()
time.sleep(self._interval)
finally:
self.register_death()
|
{
"content_hash": "759bdd8481f80ebea83a29a6b8409ef9",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 117,
"avg_line_length": 38.87356321839081,
"alnum_prop": 0.5700029568302779,
"repo_name": "lechup/rq-scheduler",
"id": "6a6598bbddd03d6322a3d21987ece89f36a69dcc",
"size": "13528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rq_scheduler/scheduler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "22"
},
{
"name": "Python",
"bytes": "42797"
}
],
"symlink_target": ""
}
|
"""Tests for tpu_trainer_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from DLT2T.tpu import tpu_trainer_lib as lib
from DLT2T.utils import trainer_utils
from DLT2T.utils import trainer_utils_test
import tensorflow as tf
class TpuTrainerTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
trainer_utils_test.TrainerUtilsTest.setUpClass()
def testSmoke(self):
data_dir = trainer_utils_test.TrainerUtilsTest.data_dir
problem_name = "tiny_algo"
model_name = "transformer"
hparams_set = "transformer_tpu"
hparams = trainer_utils.create_hparams(hparams_set, data_dir)
trainer_utils.add_problem_hparams(hparams, problem_name)
problem = hparams.problem_instances[0]
model_fn = lib.get_model_fn(model_name, hparams, use_tpu=False)
input_fn = lib.get_input_fn(data_dir, problem, hparams)
params = {"batch_size": 16}
config = tf.contrib.tpu.RunConfig(
tpu_config=tf.contrib.tpu.TPUConfig(num_shards=2))
features, targets = input_fn(tf.estimator.ModeKeys.TRAIN, params)
with tf.variable_scope("training"):
spec = model_fn(features, targets, tf.estimator.ModeKeys.TRAIN, params,
config)
self.assertTrue(spec.loss is not None)
self.assertTrue(spec.train_op is not None)
with tf.variable_scope("eval"):
spec = model_fn(features, targets, tf.estimator.ModeKeys.EVAL, params,
config)
self.assertTrue(spec.eval_metrics is not None)
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "52253feae7913ca059eeec4d18d96e21",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 30.452830188679247,
"alnum_prop": 0.6933085501858736,
"repo_name": "renqianluo/DLT2T",
"id": "d5c6bdc1059c8be1a23e94fc5d278ed81218c3b4",
"size": "2212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DLT2T/tpu/tpu_trainer_lib_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10699"
},
{
"name": "Jupyter Notebook",
"bytes": "14574"
},
{
"name": "Python",
"bytes": "1155627"
},
{
"name": "Shell",
"bytes": "744"
}
],
"symlink_target": ""
}
|
"""
Django settings for dsap project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from src.web.dsapcfg import (
get_epp_conf,
get_dsapcfg_parser,
get_database_settings
)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONF_DIR = os.path.join(BASE_DIR, 'conf')
LOGIN_REDIRECT_URL = '/portal/'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
with open(os.path.join(CONF_DIR, 'secret_key')) as skf:
SECRET_KEY = skf.read()
ADMINS = [
('Lucas Estienne', 'lucas.estienne@cira.ca'),
# ('Elson Oliveira', 'elson.oliveira@cira.ca'),
]
# SECURITY WARNING: don't run with debug turned on in production!
# Run `manage.py collectstatic -c` before running application when debug False
DEBUG = False
ALLOWED_HOSTS = ['*']
# EPP Config
EPP_CFG = get_epp_conf(CONF_DIR)
# DSAP Config file
DSAP_CFG_PARSER = get_dsapcfg_parser(CONF_DIR)
DSAPCFG_DEFAULT = DSAP_CFG_PARSER['DEFAULT']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'src.web.api',
'rest_framework',
'crispy_forms',
)
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'src.web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web', 'static', 'portal', 'templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.csrf',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'builtins': [
'src.web.templatetags.app_filters',
],
},
},
]
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
APPEND_SLASH=False
WSGI_APPLICATION = 'src.web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = get_database_settings()
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'staticcache')
STATIC_URL = '/static/'
STATIC_APP_DIR = os.path.join(BASE_DIR, 'web', 'static')
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'src.web.finders.StaticFinder',
)
# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
{
"content_hash": "be81850b41ed1ff56fefe646ea227dd8",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 82,
"avg_line_length": 28.607843137254903,
"alnum_prop": 0.6922549691569568,
"repo_name": "CIRALabs/DSAP",
"id": "368ff57a3a5f367337bfd0eb814e056038254f52",
"size": "4377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/web/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6372"
},
{
"name": "HTML",
"bytes": "13320"
},
{
"name": "JavaScript",
"bytes": "8002"
},
{
"name": "Python",
"bytes": "96806"
}
],
"symlink_target": ""
}
|
import base64
import re
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
import stevedore
import webob
from webob import exc
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import servers as schema_servers
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.image import glance
from nova import objects
from nova import utils
ALIAS = 'servers'
TAG_SEARCH_FILTERS = ('tags', 'tags-any', 'not-tags', 'not-tags-any')
CONF = cfg.CONF
CONF.import_opt('enable_instance_password',
'nova.api.openstack.compute.legacy_v2.servers')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
CONF.import_opt('extensions_blacklist', 'nova.api.openstack',
group='osapi_v21')
CONF.import_opt('extensions_whitelist', 'nova.api.openstack',
group='osapi_v21')
LOG = logging.getLogger(__name__)
authorize = extensions.os_compute_authorizer(ALIAS)
class ServersController(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
EXTENSION_CREATE_NAMESPACE = 'nova.api.v21.extensions.server.create'
EXTENSION_REBUILD_NAMESPACE = 'nova.api.v21.extensions.server.rebuild'
EXTENSION_UPDATE_NAMESPACE = 'nova.api.v21.extensions.server.update'
EXTENSION_RESIZE_NAMESPACE = 'nova.api.v21.extensions.server.resize'
_view_builder_class = views_servers.ViewBuilderV21
schema_server_create = schema_servers.base_create
schema_server_update = schema_servers.base_update
schema_server_rebuild = schema_servers.base_rebuild
schema_server_resize = schema_servers.base_resize
schema_server_create_v20 = schema_servers.base_create_v20
schema_server_update_v20 = schema_servers.base_update_v20
schema_server_rebuild_v20 = schema_servers.base_rebuild_v20
schema_server_create_v219 = schema_servers.base_create_v219
schema_server_update_v219 = schema_servers.base_update_v219
schema_server_rebuild_v219 = schema_servers.base_rebuild_v219
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = [l for l in robj.obj['server']['links'] if l['rel'] == 'self']
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, **kwargs):
def _check_load_extension(required_function):
def should_load_extension(ext):
# Check whitelist is either empty or if not then the extension
# is in the whitelist
whitelist = CONF.osapi_v21.extensions_whitelist
blacklist = CONF.osapi_v21.extensions_blacklist
if not whitelist:
# if there is no whitelist, we accept everything,
# so we only care about the blacklist.
if ext.obj.alias in blacklist:
return False
else:
return True
else:
if ext.obj.alias in whitelist:
if ext.obj.alias in blacklist:
LOG.warning(
_LW(
"Extension %s is both in whitelist and "
"blacklist, blacklisting takes precedence"
),
ext.obj.alias)
return False
else:
return True
else:
return False
def check_load_extension(ext):
if isinstance(ext.obj, extensions.V21APIExtensionBase):
# Filter out for the existence of the required
# function here rather than on every request. We
# don't have a new abstract base class to reduce
# duplication in the extensions as they may want
# to implement multiple server (and other) entry
# points if hasattr(ext.obj, 'server_create'):
if hasattr(ext.obj, required_function):
LOG.debug('extension %(ext_alias)s detected by '
'servers extension for function %(func)s',
{'ext_alias': ext.obj.alias,
'func': required_function})
return should_load_extension(ext)
else:
LOG.debug(
'extension %(ext_alias)s is missing %(func)s',
{'ext_alias': ext.obj.alias,
'func': required_function})
return False
else:
return False
return check_load_extension
self.extension_info = kwargs.pop('extension_info')
super(ServersController, self).__init__(**kwargs)
self.compute_api = compute.API(skip_policy_check=True)
# Look for implementation of extension point of server creation
self.create_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_CREATE_NAMESPACE,
check_func=_check_load_extension('server_create'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.create_extension_manager):
LOG.debug("Did not find any server create extensions")
# Look for implementation of extension point of server rebuild
self.rebuild_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_REBUILD_NAMESPACE,
check_func=_check_load_extension('server_rebuild'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.rebuild_extension_manager):
LOG.debug("Did not find any server rebuild extensions")
# Look for implementation of extension point of server update
self.update_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_UPDATE_NAMESPACE,
check_func=_check_load_extension('server_update'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.update_extension_manager):
LOG.debug("Did not find any server update extensions")
# Look for implementation of extension point of server resize
self.resize_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_RESIZE_NAMESPACE,
check_func=_check_load_extension('server_resize'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.resize_extension_manager):
LOG.debug("Did not find any server resize extensions")
# Look for API schema of server create extension
self.create_schema_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_CREATE_NAMESPACE,
check_func=_check_load_extension('get_server_create_schema'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if list(self.create_schema_manager):
self.create_schema_manager.map(self._create_extension_schema,
self.schema_server_create_v219,
'2.19')
self.create_schema_manager.map(self._create_extension_schema,
self.schema_server_create, '2.1')
self.create_schema_manager.map(self._create_extension_schema,
self.schema_server_create_v20,
'2.0')
else:
LOG.debug("Did not find any server create schemas")
# Look for API schema of server update extension
self.update_schema_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_UPDATE_NAMESPACE,
check_func=_check_load_extension('get_server_update_schema'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if list(self.update_schema_manager):
self.update_schema_manager.map(self._update_extension_schema,
self.schema_server_update_v219,
'2.19')
self.update_schema_manager.map(self._update_extension_schema,
self.schema_server_update, '2.1')
self.update_schema_manager.map(self._update_extension_schema,
self.schema_server_update_v20,
'2.0')
else:
LOG.debug("Did not find any server update schemas")
# Look for API schema of server rebuild extension
self.rebuild_schema_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_REBUILD_NAMESPACE,
check_func=_check_load_extension('get_server_rebuild_schema'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if list(self.rebuild_schema_manager):
self.rebuild_schema_manager.map(self._rebuild_extension_schema,
self.schema_server_rebuild_v219,
'2.19')
self.rebuild_schema_manager.map(self._rebuild_extension_schema,
self.schema_server_rebuild, '2.1')
self.rebuild_schema_manager.map(self._rebuild_extension_schema,
self.schema_server_rebuild_v20,
'2.0')
else:
LOG.debug("Did not find any server rebuild schemas")
# Look for API schema of server resize extension
self.resize_schema_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_RESIZE_NAMESPACE,
check_func=_check_load_extension('get_server_resize_schema'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if list(self.resize_schema_manager):
self.resize_schema_manager.map(self._resize_extension_schema,
self.schema_server_resize, '2.1')
else:
LOG.debug("Did not find any server resize schemas")
@extensions.expected_errors((400, 403))
def index(self, req):
"""Returns a list of server names and ids for a given user."""
context = req.environ['nova.context']
authorize(context, action="index")
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
@extensions.expected_errors((400, 403))
def detail(self, req):
"""Returns a list of server details for a given user."""
context = req.environ['nova.context']
authorize(context, action="detail")
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options(req))
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
search_opts.pop('status', None)
if 'status' in req.GET.keys():
statuses = req.GET.getall('status')
states = common.task_and_vm_state_from_status(statuses)
vm_state, task_state = states
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes-since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted instances according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
else:
# Convert deleted filter value to a valid boolean.
# Return non-deleted servers if an invalid value
# is passed with deleted filter.
search_opts['deleted'] = strutils.bool_from_string(
search_opts['deleted'], default=False)
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
if api_version_request.is_supported(req, min_version='2.26'):
for tag_filter in TAG_SEARCH_FILTERS:
if tag_filter in search_opts:
search_opts[tag_filter] = search_opts[
tag_filter].split(',')
# If tenant_id is passed as a search parameter this should
# imply that all_tenants is also enabled unless explicitly
# disabled. Note that the tenant_id parameter is filtered out
# by remove_invalid_options above unless the requestor is an
# admin.
# TODO(gmann): 'all_tenants' flag should not be required while
# searching with 'tenant_id'. Ref bug# 1185290
# +microversions to achieve above mentioned behavior by
# uncommenting below code.
# if 'tenant_id' in search_opts and 'all_tenants' not in search_opts:
# We do not need to add the all_tenants flag if the tenant
# id associated with the token is the tenant id
# specified. This is done so a request that does not need
# the all_tenants flag does not fail because of lack of
# policy permission for compute:get_all_tenants when it
# doesn't actually need it.
# if context.project_id != search_opts.get('tenant_id'):
# search_opts['all_tenants'] = 1
all_tenants = common.is_all_tenants(search_opts)
# use the boolean from here on out so remove the entry from search_opts
# if it's present
search_opts.pop('all_tenants', None)
elevated = None
if all_tenants:
if is_detail:
authorize(context, action="detail:get_all_tenants")
else:
authorize(context, action="index:get_all_tenants")
elevated = context.elevated()
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
sort_keys, sort_dirs = common.get_sort_params(req.params)
expected_attrs = ['pci_devices']
if is_detail:
if api_version_request.is_supported(req, '2.26'):
expected_attrs.append("tags")
# merge our expected attrs with what the view builder needs for
# showing details
expected_attrs = self._view_builder.get_show_expected_attrs(
expected_attrs)
try:
instance_list = self.compute_api.get_all(elevated or context,
search_opts=search_opts, limit=limit, marker=marker,
want_objects=True, expected_attrs=expected_attrs,
sort_keys=sort_keys, sort_dirs=sort_dirs)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
LOG.debug("Flavor '%s' could not be found ",
search_opts['flavor'])
instance_list = objects.InstanceList()
if is_detail:
instance_list._context = context
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid, is_detail=False):
"""Utility function for looking up an instance by uuid.
:param context: request context for auth
:param req: HTTP request. The instance is cached in this request.
:param instance_uuid: UUID of the server instance to get
:param is_detail: True if you plan on showing the details of the
instance in the response, False otherwise.
"""
expected_attrs = ['flavor', 'pci_devices', 'numa_topology']
if is_detail:
expected_attrs = self._view_builder.get_show_expected_attrs(
expected_attrs)
instance = common.get_instance(self.compute_api, context,
instance_uuid,
expected_attrs=expected_attrs)
req.cache_db_instance(instance)
return instance
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
network_uuids = []
for network in requested_networks:
request = objects.NetworkRequest()
try:
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
request.address = network.get('fixed_ip', None)
request.port_id = network.get('port', None)
if request.port_id:
request.network_id = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument: port")
raise exc.HTTPBadRequest(explanation=msg)
if request.address is not None:
msg = _("Specified Fixed IP '%(addr)s' cannot be used "
"with port '%(port)s': port already has "
"a Fixed IP allocated.") % {
"addr": request.address,
"port": request.port_id}
raise exc.HTTPBadRequest(explanation=msg)
else:
request.network_id = network['uuid']
if (not request.port_id and
not uuidutils.is_uuid_like(request.network_id)):
br_uuid = request.network_id.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % request.network_id
raise exc.HTTPBadRequest(explanation=msg)
# duplicate networks are allowed only for neutron v2.0
if (not utils.is_neutron() and request.network_id and
request.network_id in network_uuids):
expl = (_("Duplicate networks"
" (%s) are not allowed") %
request.network_id)
raise exc.HTTPBadRequest(explanation=expl)
network_uuids.append(request.network_id)
networks.append(request)
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return objects.NetworkRequestList(objects=networks)
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
@extensions.expected_errors(404)
def show(self, req, id):
"""Returns server details by server id."""
context = req.environ['nova.context']
authorize(context, action="show")
instance = self._get_server(context, req, id, is_detail=True)
return self._view_builder.show(req, instance)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 409, 413))
@validation.schema(schema_server_create_v20, '2.0', '2.0')
@validation.schema(schema_server_create, '2.1', '2.18')
@validation.schema(schema_server_create_v219, '2.19')
def create(self, req, body):
"""Creates a new server for a given user."""
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
name = common.normalize_name(server_dict['name'])
if api_version_request.is_supported(req, min_version='2.19'):
if 'description' in server_dict:
# This is allowed to be None
description = server_dict['description']
else:
# No default description
description = None
else:
description = name
# Arguments to be passed to instance create function
create_kwargs = {}
# Query extensions which want to manipulate the keyword
# arguments.
# NOTE(cyeoh): This is the hook that extensions use
# to replace the extension specific code below.
# When the extensions are ported this will also result
# in some convenience function from this class being
# moved to the extension
if list(self.create_extension_manager):
self.create_extension_manager.map(self._create_extension_point,
server_dict, create_kwargs, body)
availability_zone = create_kwargs.pop("availability_zone", None)
target = {
'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
authorize(context, target, 'create')
# TODO(Shao He, Feng) move this policy check to os-availabilty-zone
# extension after refactor it.
parse_az = self.compute_api.parse_availability_zone
availability_zone, host, node = parse_az(context, availability_zone)
if host or node:
authorize(context, {}, 'create:forced_host')
block_device_mapping = create_kwargs.get("block_device_mapping")
# TODO(Shao He, Feng) move this policy check to os-block-device-mapping
# extension after refactor it.
if block_device_mapping:
authorize(context, target, 'create:attach_volume')
image_uuid = self._image_from_req_data(server_dict, create_kwargs)
# NOTE(cyeoh): Although an extension can set
# return_reservation_id in order to request that a reservation
# id be returned to the client instead of the newly created
# instance information we do not want to pass this parameter
# to the compute create call which always returns both. We use
# this flag after the instance create call to determine what
# to return to the client
return_reservation_id = create_kwargs.pop('return_reservation_id',
False)
requested_networks = None
if ('os-networks' in self.extension_info.get_extensions()
or utils.is_neutron()):
requested_networks = server_dict.get('networks')
if requested_networks is not None:
requested_networks = self._get_requested_networks(
requested_networks)
if requested_networks and len(requested_networks):
authorize(context, target, 'create:attach_network')
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
try:
inst_type = flavors.get_flavor_by_flavor_id(
flavor_id, ctxt=context, read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=description,
availability_zone=availability_zone,
forced_host=host, forced_node=node,
metadata=server_dict.get('metadata', {}),
admin_password=password,
requested_networks=requested_networks,
check_server_group_quota=True,
**create_kwargs)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPForbidden(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ExternalNetworkAttachForbidden as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % error
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.ImageBadRequest,
exception.FixedIpNotFoundForAddress,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.InvalidRequest,
exception.InvalidVolume,
exception.MultiplePortsNotApplicable,
exception.InvalidFixedIpAndMaxCountRequest,
exception.InstanceUserDataMalformed,
exception.InstanceUserDataTooLarge,
exception.PortNotFound,
exception.FixedIpAlreadyInUse,
exception.SecurityGroupNotFound,
exception.PortRequiresFixedIP,
exception.NetworkRequiresSubnet,
exception.NetworkNotFound,
exception.NetworkDuplicated,
exception.InvalidBDM,
exception.InvalidBDMSnapshot,
exception.InvalidBDMVolume,
exception.InvalidBDMImage,
exception.InvalidBDMBootSequence,
exception.InvalidBDMLocalsLimit,
exception.InvalidBDMVolumeNotBootable,
exception.InvalidBDMEphemeralSize,
exception.InvalidBDMFormat,
exception.InvalidBDMSwapSize,
exception.AutoDiskConfigDisabledByImage,
exception.ImageNUMATopologyIncomplete,
exception.ImageNUMATopologyForbidden,
exception.ImageNUMATopologyAsymmetric,
exception.ImageNUMATopologyCPUOutOfRange,
exception.ImageNUMATopologyCPUDuplicates,
exception.ImageNUMATopologyCPUsUnassigned,
exception.ImageNUMATopologyMemoryOutOfRange,
exception.InstanceGroupNotFound) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.InstanceExists,
exception.NetworkAmbiguous,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if return_reservation_id:
# NOTE(cyeoh): In v3 reservation_id was wrapped in
# servers_reservation but this is reverted for V2 API
# compatibility. In the long term with the tasks API we
# will probably just drop the concept of reservation_id
return wsgi.ResponseObject({'reservation_id': resv_id})
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
# NOTE(gmann): Parameter 'req_body' is placed to handle scheduler_hint
# extension for V2.1. No other extension supposed to use this as
# it will be removed soon.
def _create_extension_point(self, ext, server_dict,
create_kwargs, req_body):
handler = ext.obj
LOG.debug("Running _create_extension_point for %s", ext.obj)
handler.server_create(server_dict, create_kwargs, req_body)
def _rebuild_extension_point(self, ext, rebuild_dict, rebuild_kwargs):
handler = ext.obj
LOG.debug("Running _rebuild_extension_point for %s", ext.obj)
handler.server_rebuild(rebuild_dict, rebuild_kwargs)
def _resize_extension_point(self, ext, resize_dict, resize_kwargs):
handler = ext.obj
LOG.debug("Running _resize_extension_point for %s", ext.obj)
handler.server_resize(resize_dict, resize_kwargs)
def _update_extension_point(self, ext, update_dict, update_kwargs):
handler = ext.obj
LOG.debug("Running _update_extension_point for %s", ext.obj)
handler.server_update(update_dict, update_kwargs)
def _create_extension_schema(self, ext, create_schema, version):
handler = ext.obj
LOG.debug("Running _create_extension_schema for %s", ext.obj)
schema = handler.get_server_create_schema(version)
if ext.obj.name == 'SchedulerHints':
# NOTE(oomichi): The request parameter position of scheduler-hint
# extension is different from the other extensions, so here handles
# the difference.
create_schema['properties'].update(schema)
else:
create_schema['properties']['server']['properties'].update(schema)
def _update_extension_schema(self, ext, update_schema, version):
handler = ext.obj
LOG.debug("Running _update_extension_schema for %s", ext.obj)
schema = handler.get_server_update_schema(version)
update_schema['properties']['server']['properties'].update(schema)
def _rebuild_extension_schema(self, ext, rebuild_schema, version):
handler = ext.obj
LOG.debug("Running _rebuild_extension_schema for %s", ext.obj)
schema = handler.get_server_rebuild_schema(version)
rebuild_schema['properties']['rebuild']['properties'].update(schema)
def _resize_extension_schema(self, ext, resize_schema, version):
handler = ext.obj
LOG.debug("Running _resize_extension_schema for %s", ext.obj)
schema = handler.get_server_resize_schema(version)
resize_schema['properties']['resize']['properties'].update(schema)
def _delete(self, context, req, instance_uuid):
authorize(context, action='delete')
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
@extensions.expected_errors((400, 404))
@validation.schema(schema_server_update_v20, '2.0', '2.0')
@validation.schema(schema_server_update, '2.1', '2.18')
@validation.schema(schema_server_update_v219, '2.19')
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
ctxt = req.environ['nova.context']
update_dict = {}
authorize(ctxt, action='update')
if 'name' in body['server']:
update_dict['display_name'] = common.normalize_name(
body['server']['name'])
if 'description' in body['server']:
# This is allowed to be None (remove description)
update_dict['display_description'] = body['server']['description']
if list(self.update_extension_manager):
self.update_extension_manager.map(self._update_extension_point,
body['server'], update_dict)
instance = self._get_server(ctxt, req, id, is_detail=True)
try:
# NOTE(mikal): this try block needs to stay because save() still
# might throw an exception.
instance.update(update_dict)
instance.save()
return self._view_builder.show(req, instance,
extend_address=False)
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
# NOTE(gmann): Returns 204 for backwards compatibility but should be 202
# for representing async API as this API just accepts the request and
# request hypervisor driver to complete the same in async mode.
@wsgi.response(204)
@extensions.expected_errors((400, 404, 409))
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
authorize(context, action='confirm_resize')
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize', id)
@wsgi.response(202)
@extensions.expected_errors((400, 404, 409))
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
authorize(context, action='revert_resize')
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize', id)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('reboot')
@validation.schema(schema_servers.reboot)
def _action_reboot(self, req, id, body):
reboot_type = body['reboot']['type'].upper()
context = req.environ['nova.context']
authorize(context, action='reboot')
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot', id)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
authorize(context, action='resize')
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.QuotaError as error:
raise exc.HTTPForbidden(
explanation=error.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize', instance_id)
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.AutoDiskConfigDisabledByImage,
exception.CannotResizeDisk,
exception.CannotResizeToSameFlavor,
exception.FlavorNotFound,
exception.NoValidHost) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(204)
@extensions.expected_errors((404, 409))
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete', id)
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, server_dict, create_kwargs):
"""Get image data from the request or raise appropriate
exceptions.
The field imageRef is mandatory when no block devices have been
defined and must be a proper uuid when present.
"""
image_href = server_dict.get('imageRef')
if not image_href and create_kwargs.get('block_device_mapping'):
return ''
elif image_href:
return self._image_uuid_from_href(six.text_type(image_href))
else:
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _flavor_id_from_req_data(self, data):
flavor_ref = data['server']['flavorRef']
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@extensions.expected_errors((400, 401, 403, 404, 409))
@wsgi.action('resize')
@validation.schema(schema_server_resize)
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
resize_dict = body['resize']
flavor_ref = str(resize_dict["flavorRef"])
resize_kwargs = {}
if list(self.resize_extension_manager):
self.resize_extension_manager.map(self._resize_extension_point,
resize_dict, resize_kwargs)
self._resize(req, id, flavor_ref, **resize_kwargs)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409, 413))
@wsgi.action('rebuild')
@validation.schema(schema_server_rebuild_v20, '2.0', '2.0')
@validation.schema(schema_server_rebuild, '2.1', '2.18')
@validation.schema(schema_server_rebuild_v219, '2.19')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
rebuild_dict = body['rebuild']
image_href = rebuild_dict["imageRef"]
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(rebuild_dict)
context = req.environ['nova.context']
authorize(context, action='rebuild')
instance = self._get_server(context, req, id)
attr_map = {
'name': 'display_name',
'description': 'display_description',
'metadata': 'metadata',
}
rebuild_kwargs = {}
if list(self.rebuild_extension_manager):
self.rebuild_extension_manager.map(self._rebuild_extension_point,
rebuild_dict, rebuild_kwargs)
for request_attribute, instance_attribute in attr_map.items():
try:
if request_attribute == 'name':
rebuild_kwargs[instance_attribute] = common.normalize_name(
rebuild_dict[request_attribute])
else:
rebuild_kwargs[instance_attribute] = rebuild_dict[
request_attribute]
except (KeyError, TypeError):
pass
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
**rebuild_kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild', id)
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.AutoDiskConfigDisabledByImage) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id, is_detail=True)
view = self._view_builder.show(req, instance, extend_address=False)
# Add on the admin_password attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.action('createImage')
@common.check_snapshots_enabled
@validation.schema(schema_servers.create_image, '2.0', '2.0')
@validation.schema(schema_servers.create_image, '2.1')
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
authorize(context, action='create_image')
entity = body["createImage"]
image_name = common.normalize_name(entity["name"])
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
instance = self._get_server(context, req, id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
authorize(context, action="create_image:allow_volume_backed")
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_name,
extra_properties=
metadata)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=metadata)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage', id)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
image_ref = glance.generate_image_url(image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['adminPass']
except KeyError:
password = utils.generate_password()
return password
def _get_server_search_options(self, req):
"""Return server search options allowed by non-admin."""
opt_list = ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes-since', 'all_tenants')
if api_version_request.is_supported(req, min_version='2.5'):
opt_list += ('ip6',)
if api_version_request.is_supported(req, min_version='2.26'):
opt_list += TAG_SEARCH_FILTERS
return opt_list
def _get_instance(self, context, instance_uuid):
try:
attrs = ['system_metadata', 'metadata']
return objects.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=attrs)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-start')
def _start_server(self, req, id, body):
"""Start an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorize(context, instance, 'start')
LOG.debug('start instance', instance=instance)
try:
self.compute_api.start(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'start', id)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-stop')
def _stop_server(self, req, id, body):
"""Stop an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorize(context, instance, 'stop')
LOG.debug('stop instance', instance=instance)
try:
self.compute_api.stop(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'stop', id)
@wsgi.Controller.api_version("2.17")
@wsgi.response(202)
@extensions.expected_errors((400, 404, 409))
@wsgi.action('trigger_crash_dump')
@validation.schema(schema_servers.trigger_crash_dump)
def _action_trigger_crash_dump(self, req, id, body):
"""Trigger crash dump in an instance"""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorize(context, instance, 'trigger_crash_dump')
try:
self.compute_api.trigger_crash_dump(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'trigger_crash_dump', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.TriggerCrashDumpNotSupported as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Only remove parameters for sorting and pagination
for key in ('sort_key', 'sort_dir', 'limit', 'marker'):
search_options.pop(key, None)
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
if unknown_options:
LOG.debug("Removing options '%s' from query",
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
class Servers(extensions.V21APIExtensionBase):
"""Servers."""
name = "Servers"
alias = ALIAS
version = 1
def get_resources(self):
member_actions = {'action': 'POST'}
collection_actions = {'detail': 'GET'}
resources = [
extensions.ResourceExtension(
ALIAS,
ServersController(extension_info=self.extension_info),
member_name='server', collection_actions=collection_actions,
member_actions=member_actions)]
return resources
def get_controller_extensions(self):
return []
|
{
"content_hash": "1d33bb37d0f2e8b32e35e410b7d6939a",
"timestamp": "",
"source": "github",
"line_count": 1246,
"max_line_length": 79,
"avg_line_length": 44.86677367576244,
"alnum_prop": 0.5892064968517459,
"repo_name": "zhimin711/nova",
"id": "c34dffda9b14fccaa060cad77bb6fed1e32261e7",
"size": "56585",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/servers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16549579"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "259485"
}
],
"symlink_target": ""
}
|
"""
Produces colormaps by curving through Lch color space
Chroma should probably be limited to the RGB cube in a way that never
produces sharp angles in the curve, which appear as bands in the gradient.
Examples:
This is similar to the 'hsv' map, but isoluminant:
lab_color_scale(hue=30, rot=1, l=75, chroma=41)
This is similar to 'cubehelix', except in Lch space instead of RGB space:
lab_color_scale(hue=0, rot=-1.5, chroma=30)
"""
from numpy import asarray, linspace, cos, sin, vstack, array, ones, radians
from matplotlib import cm
from skimage.color import lab2rgb
def lch2lab(L, c, h):
"""
L is lightness, 0 (black) to 100 (white)
c is chroma, 0-100 or more
h is hue, in degrees, 0 = red, 90 = yellow, 180 = green, 270 = blue
"""
a = c * cos(radians(h))
b = c * sin(radians(h))
return L, a, b
def lab_color_scale(lutsize=256, hue=0, chroma=50, rot=1/4, l=None):
"""
Color map created by drawing arcs through the L*c*h*
(lightness, chroma, hue) cylindrical coordinate color space, also called
the L*a*b* color space when using Cartesian coordinates.
Parameters
----------
lutsize : int
The number of elements in the colormap lookup table. (Default is 256.)
hue : float
Hue angle at which the colormap starts, in degrees. Default 0 is
reddish.
chroma : float
Chroma radius for the colormap path. If chroma is 0, the colormap is
grayscale. If chroma is too large, the colors will exceed the RGB
gamut and produce ugly bands. Since the RGB cube is pointy at the
black and white ends, this always clips somewhat.
rot : float
Number of hue rotations. If 0, hue is constant and only lightness is
varied. If 1, all hues are passed through once. If 2, circle through
all hues twice, etc.
For counterclockwise rotation, make the value negative
l : float
Lightness value for constant-lightness (isoluminant) colormaps. If
not specified, lightness is varied from 0 at minimum to 100 at maximum.
Returns
-------
cmap : matplotlib.colors.LinearSegmentedColormap
The resulting colormap object
"""
hue = linspace(hue, hue + rot * 360, lutsize)
# or use atleast1d
if l is None:
L = linspace(0, 100, lutsize)
elif hasattr(l, "__len__"):
if len(l) == 2:
L = linspace(l[0], l[1], lutsize)
elif len(l) == 1:
L = l * ones(lutsize)
elif len(l) == lutsize:
L = asarray(l)
else:
raise ValueError('lightness argument not understood')
else:
L = l * ones(lutsize)
L, a, b = lch2lab(L, chroma, hue)
rgbs = []
Lab = vstack([L, a, b])
for item in Lab.T:
rgb = lab2rgb(item.reshape(1, 1, 3))
rgbs.append((rgb[0, 0, 0], rgb[0, 0, 1], rgb[0, 0, 2]))
return cm.colors.LinearSegmentedColormap.from_list('lab_color_scale', rgbs,
lutsize)
if __name__ == "__main__":
import numpy as np
from matplotlib import pyplot as plt
dx, dy = 0.01, 0.01
x = np.arange(-2.0, 2.0001, dx)
y = np.arange(-2.0, 2.0001, dy)
X, Y = np.meshgrid(x, y)
# Matplotlib's 2-bump example shows banding and other problems clearly
Z = X * np.exp(-X**2 - Y**2)
plt.figure()
plt.imshow(Z, vmax=abs(Z).max(), vmin=-abs(Z).max(),
cmap=lab_color_scale(hue=0, chroma=100, l=45, rot=1)
)
plt.colorbar()
plt.show()
|
{
"content_hash": "30b2a7b8b7dc839fa21b882ae9459976",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 79,
"avg_line_length": 32.73394495412844,
"alnum_prop": 0.6070627802690582,
"repo_name": "sealhuang/brainDecodingToolbox",
"id": "3fe5ff442e0af135febbdca6378629ef71394a41",
"size": "3592",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "braincode/vim2/color_scale.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Matlab",
"bytes": "774"
},
{
"name": "Python",
"bytes": "181593"
}
],
"symlink_target": ""
}
|
import sys, os
import re
from subprocess import call, Popen, PIPE
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
from repo_util import run_cmd_get_output
# Call Doxygen to get XML files from the header files
print "Calling Doxygen to generate latest XML files"
call('doxygen')
# Generate 'api_name.inc' files using the XML files by Doxygen
os.system("python gen-dxd.py")
# Generate 'kconfig.inc' file from components' Kconfig files
os.system("python gen-kconfig-doc.py > _build/inc/kconfig.inc")
# http://stackoverflow.com/questions/12772927/specifying-an-online-image-in-sphinx-restructuredtext-format
#
suppress_warnings = ['image.nonlocal_uri']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe',
'link-roles',
'sphinxcontrib.blockdiag',
'sphinxcontrib.seqdiag',
'sphinxcontrib.actdiag',
'sphinxcontrib.nwdiag',
'sphinxcontrib.rackdiag',
'sphinxcontrib.packetdiag'
]
# Set up font for blockdiag, nwdiag, rackdiag and packetdiag
blockdiag_fontpath = '_static/DejaVuSans.ttf'
seqdiag_fontpath = '_static/DejaVuSans.ttf'
actdiag_fontpath = '_static/DejaVuSans.ttf'
nwdiag_fontpath = '_static/DejaVuSans.ttf'
rackdiag_fontpath = '_static/DejaVuSans.ttf'
packetdiag_fontpath = '_static/DejaVuSans.ttf'
# Breathe extension variables
breathe_projects = { "esp32-idf": "xml/" }
breathe_default_project = "esp32-idf"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ESP-IDF Programming Guide'
copyright = u'2016 - 2017, Espressif'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Readthedocs largely ignores 'version' and 'release', and displays one of
# 'latest', tag name, or branch name, depending on the build type.
# Still, this is useful for non-RTD builds.
# This is supposed to be "the short X.Y version", but it's the only version
# visible when you open index.html.
# Display full version to make things less confusing.
version = run_cmd_get_output('git describe')
# The full version, including alpha/beta/rc tags.
# If needed, nearest tag is returned by 'git describe --abbrev=0'.
release = version
print 'Version: {0} Release: {1}'.format(version, release)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReadtheDocsTemplatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ReadtheDocsTemplate.tex', u'Read the Docs Template Documentation',
u'Read the Docs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'readthedocstemplate', u'Read the Docs Template Documentation',
[u'Read the Docs'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ReadtheDocsTemplate', u'Read the Docs Template Documentation',
u'Read the Docs', 'ReadtheDocsTemplate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Use sphinx_rtd_theme for local builds --------------------------------
# ref. https://github.com/snide/sphinx_rtd_theme#using-this-theme-locally-then-building-on-read-the-docs
#
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
|
{
"content_hash": "f727fba170ec0e5d4340799d7792ce0d",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 106,
"avg_line_length": 33.91059602649007,
"alnum_prop": 0.708231618006054,
"repo_name": "empoweredhomes/esp-idf",
"id": "caa233c93c6fcbb1ff3a8649384700effa7e49be",
"size": "10676",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "140730"
},
{
"name": "C",
"bytes": "25454916"
},
{
"name": "C++",
"bytes": "1438938"
},
{
"name": "Lex",
"bytes": "7280"
},
{
"name": "Makefile",
"bytes": "89591"
},
{
"name": "Objective-C",
"bytes": "45515"
},
{
"name": "Perl",
"bytes": "15204"
},
{
"name": "Python",
"bytes": "542036"
},
{
"name": "Shell",
"bytes": "36955"
},
{
"name": "Yacc",
"bytes": "15875"
}
],
"symlink_target": ""
}
|
'''This example runs some iRODS commands
If something doesn't work as expected, try to set
SAGA_VERBOSE=3 in your environment before you run the
script in order to get some debug output.
If you think you have encountered a defect, please
report it at: https://github.com/saga-project/saga-python/issues
'''
__author__ = "Ashley Zebrowski"
__copyright__ = "Copyright 2012, Ashley Zebrowski"
__license__ = "MIT"
import sys, time
import saga
import os
FILE_SIZE = 1 # in megs, approx
NUM_REPLICAS = 5 # num replicas to create
TEMP_FILENAME = "test.txt" # filename to create and use for testing
TEMP_DIR = "/irods_test_dir/" #directory to create and use for testing
IRODS_DIRECTORY = "/osg/home/azebro1/" #directory to store our iRODS files in, don't forget trailing and leading /
IRODS_RESOURCE = "osgGridFtpGroup" #iRODS resource or resource group to upload files to
def main():
try:
#myfile = saga.logicalfile.LogicalFile('irods://'+IRODS_DIRECTORY+TEMP_FILENAME)
#myfile = saga.replica.LogicalFile('irods://'+IRODS_DIRECTORY+TEMP_FILENAME)
#myfile.add_location("irods:////data/cache/AGLT2_CE_2_FTPplaceholder/whatever?resource=AGLT2_CE_2_FTP")
#myfile.add_location("irods:///osg/home/azebro1/test_file?resource=AGLT2_CE_2_FTP")
# grab our home directory (tested on Linux)
home_dir = os.path.expanduser("~"+"/")
print "Creating temporary file of size %dM : %s" % \
(FILE_SIZE, home_dir+TEMP_FILENAME)
# create a file for us to use with iRODS
with open(home_dir+TEMP_FILENAME, "wb") as f:
f.write ("x" * (FILE_SIZE * pow(2,20)) )
print "Creating iRODS directory object"
mydir = saga.replica.LogicalDirectory("irods://localhost/" + IRODS_DIRECTORY)
import subprocess
subprocess.call(["irm", IRODS_DIRECTORY+TEMP_FILENAME])
print "Uploading file to iRODS"
myfile = saga.replica.LogicalFile('irods://'+IRODS_DIRECTORY+TEMP_FILENAME)
myfile.upload(home_dir + TEMP_FILENAME, \
"irods:///this/path/is/ignored/?resource="+IRODS_RESOURCE)
print "Deleting file locally : %s" % (home_dir + TEMP_FILENAME)
os.remove(home_dir + TEMP_FILENAME)
print "Printing iRODS directory listing for %s " % ("irods://" + IRODS_DIRECTORY)
for entry in mydir.list():
print entry
print "Creating iRODS file object"
myfile = saga.replica.LogicalFile('irods://' + IRODS_DIRECTORY+TEMP_FILENAME)
print "Size of test file %s on iRODS in bytes:" % (IRODS_DIRECTORY + TEMP_FILENAME)
print myfile.get_size()
print "Creating",NUM_REPLICAS,"replicas for",IRODS_DIRECTORY+TEMP_FILENAME
for i in range(NUM_REPLICAS):
myfile.replicate("irods:///this/path/is/ignored/?resource="+IRODS_RESOURCE)
print "Locations the file is stored at on iRODS:"
for entry in myfile.list_locations():
print entry
print "Downloading logical file %s to current/default directory" % \
(IRODS_DIRECTORY + TEMP_FILENAME)
myfile.download(TEMP_FILENAME)
import time
print "Downloading logical file %s to /tmp/" % \
(IRODS_DIRECTORY + TEMP_FILENAME)
myfile.download("/tmp/")
#exit(0)
print "Deleting downloaded file locally : %s" % (os.getcwd() + TEMP_FILENAME)
#os.remove(os.getcwd() +"/" + TEMP_FILENAME)
print "Deleting downloaded file locally : %s" % ("/tmp" + TEMP_FILENAME)
#os.remove("/tmp/" + TEMP_FILENAME)
print "Making test dir %s on iRODS" % (IRODS_DIRECTORY+TEMP_DIR)
mydir.make_dir("irods://"+IRODS_DIRECTORY+TEMP_DIR)
#commented because iRODS install on gw68 doesn't support move
#print "Moving file to %s test dir on iRODS" % (IRODS_DIRECTORY+TEMP_DIR)
#myfile.move("irods://"+IRODS_DIRECTORY+TEMP_DIR)
print "Deleting test dir %s from iRODS" % (IRODS_DIRECTORY+TEMP_DIR)
mydir.remove("irods://"+IRODS_DIRECTORY+TEMP_DIR)
print "Deleting file %s from iRODS" % (IRODS_DIRECTORY+TEMP_FILENAME)
myfile.remove()
print "iRODS test script finished execution"
except saga.SagaException, ex:
print "An error occured while executing the test script! %s" % (str(ex))
import traceback
print traceback.format_exc()
if __name__ == "__main__":
main()
|
{
"content_hash": "790fc40254fc8f116248d7b355258d1d",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 114,
"avg_line_length": 39.421052631578945,
"alnum_prop": 0.6395193591455274,
"repo_name": "telamonian/saga-python",
"id": "46ac966e4b66b79d8188dd05e03ebc74b73f944b",
"size": "4545",
"binary": false,
"copies": "5",
"ref": "refs/heads/devel",
"path": "tests/adaptors/irods/irods_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "2790"
},
{
"name": "Makefile",
"bytes": "142"
},
{
"name": "Python",
"bytes": "1551101"
},
{
"name": "Shell",
"bytes": "55277"
}
],
"symlink_target": ""
}
|
from .request import make_request
from .response import process_response
from .util import _build_tracking_url
def create_tracking(tracking, **kwargs):
"""Create a tracking.
"""
response = make_request('POST', 'trackings', json=dict(tracking=tracking), **kwargs)
return process_response(response)
def get_tracking(*, tracking_id=None, slug=None, tracking_number=None,
**kwargs):
"""Create a tracking.
"""
optional_keys = ('tracking_postal_code', 'tracking_ship_date', 'tracking_account_number', 'tracking_key',
'tracking_origin_country', 'tracking_destination_country', 'tracking_state',
'fields', 'lang')
params = {key: kwargs.pop(key) for key in optional_keys if key in kwargs}
url = 'trackings/{}'.format(_build_tracking_url(tracking_id, slug, tracking_number))
response = make_request('GET', url, params=params, **kwargs)
return process_response(response)
def update_tracking(*, tracking, tracking_id=None, slug=None, tracking_number=None, **kwargs):
"""Update a tracking.
"""
url = 'trackings/{}'.format(_build_tracking_url(tracking_id, slug, tracking_number))
response = make_request('PUT', url, json=dict(tracking=tracking), **kwargs)
return process_response(response)
def retrack(*, tracking_id=None, slug=None, tracking_number=None, **kwargs):
"""Retrack an expired tracking. Max 3 times per tracking.
"""
url = 'trackings/{}/retrack'.format(_build_tracking_url(tracking_id, slug, tracking_number))
response = make_request('POST', url, **kwargs)
return process_response(response)
def get_last_checkpoint(*, tracking_id=None, slug=None, tracking_number=None, **kwargs):
"""Return the tracking information of the last checkpoint of a single tracking.
"""
url = 'last_checkpoint/{}'.format(_build_tracking_url(tracking_id, slug, tracking_number))
response = make_request('GET', url, **kwargs)
return process_response(response)
def delete_tracking(*, tracking_id=None, slug=None, tracking_number=None, **kwargs):
"""Delete a tracking.
"""
url = 'trackings/{}'.format(_build_tracking_url(tracking_id, slug, tracking_number))
response = make_request('DELETE', url, **kwargs)
return process_response(response)
def list_trackings(**kwargs):
"""Get tracking results of multiple trackings.
"""
optional_keys = (
'page',
'limit',
'keyword',
'tracking_numbers',
'slug',
'delivery_time',
'origin',
'destination',
'tag',
'created_at_min',
'created_at_max',
'updated_at_min',
'updated_at_max',
'fields',
'lang',
'last_updated_at',
'return_to_sender',
'courier_destination_country_iso3',
)
params = {key: kwargs.pop(key) for key in optional_keys if key in kwargs}
response = make_request('GET', 'trackings', params=params, **kwargs)
return process_response(response)
|
{
"content_hash": "f301a1b681c46a429fd48eff9ede471a",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 109,
"avg_line_length": 35.65882352941176,
"alnum_prop": 0.6417024084460574,
"repo_name": "AfterShip/aftership-sdk-python",
"id": "5d48a27a9cb9cac5dba4acb1414686424b306da6",
"size": "3031",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aftership/tracking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "405"
},
{
"name": "Python",
"bytes": "16392"
}
],
"symlink_target": ""
}
|
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <michi@uiae.at>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
|
{
"content_hash": "c3bc56537551c3830bab5a8dfd3be198",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 24.672131147540984,
"alnum_prop": 0.6132890365448505,
"repo_name": "j-windsor/iRiot-WebApp",
"id": "a48a3499bad6f47a5511068dec7d5ac23660b6e3",
"size": "1744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/rst2odt_prepstyles.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "23695"
},
{
"name": "Python",
"bytes": "46591"
},
{
"name": "Shell",
"bytes": "3258"
}
],
"symlink_target": ""
}
|
r"""Code to extract a tensorflow checkpoint from a bundle file.
To run this code on your local machine:
$ python magenta/scripts/unpack_bundle.py \
--bundle_path 'path' --checkpoint_path 'path'
"""
from magenta.models.shared import sequence_generator_bundle
import tensorflow.compat.v1 as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('bundle_path', '',
'Path to .mag file containing the bundle')
tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/model.ckpt',
'Path where the extracted checkpoint should'
'be saved')
def main(_):
bundle_file = FLAGS.bundle_path
checkpoint_file = FLAGS.checkpoint_path
metagraph_filename = checkpoint_file + '.meta'
bundle = sequence_generator_bundle.read_bundle_file(bundle_file)
with tf.gfile.Open(checkpoint_file, 'wb') as f:
f.write(bundle.checkpoint_file[0])
with tf.gfile.Open(metagraph_filename, 'wb') as f:
f.write(bundle.metagraph_file)
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.app.run()
|
{
"content_hash": "a82eb543b1d46edf747cb5f7f89d354f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 71,
"avg_line_length": 31.5,
"alnum_prop": 0.6685340802987861,
"repo_name": "magenta/magenta",
"id": "7cefa63ada41349f4d16b920cdee533d9cd58cc0",
"size": "1656",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "magenta/scripts/unpack_bundle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2717663"
},
{
"name": "Shell",
"bytes": "22832"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.