text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
byceps.services.brand.service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from typing import Optional
from ...database import db
from ...typing import BrandID
from .dbmodels.brand import Brand as DbBrand
from .dbmodels.setting import Setting as DbSetting
from .transfer.models import Brand
def create_brand(brand_id: BrandID, title: str) -> Brand:
"""Create a brand."""
brand = DbBrand(brand_id, title)
db.session.add(brand)
db.session.commit()
return _db_entity_to_brand(brand)
def update_brand(
brand_id: BrandID,
title: str,
image_filename: Optional[str],
archived: bool,
) -> Brand:
"""Update a brand."""
brand = _get_db_brand(brand_id)
brand.title = title
brand.image_filename = image_filename
brand.archived = archived
db.session.commit()
return _db_entity_to_brand(brand)
def delete_brand(brand_id: BrandID) -> None:
"""Delete a brand."""
db.session.query(DbSetting) \
.filter_by(brand_id=brand_id) \
.delete()
db.session.query(DbBrand) \
.filter_by(id=brand_id) \
.delete()
db.session.commit()
def find_brand(brand_id: BrandID) -> Optional[Brand]:
"""Return the brand with that id, or `None` if not found."""
brand = _get_db_brand(brand_id)
if brand is None:
return None
return _db_entity_to_brand(brand)
def _get_db_brand(brand_id: BrandID) -> DbBrand:
"""Return the brand with that ID."""
return db.session.get(DbBrand, brand_id)
def get_brand(brand_id: BrandID) -> Brand:
"""Return the brand with that id, or raise an exception."""
brand = find_brand(brand_id)
if brand is None:
raise ValueError(f'Unknown brand ID "{brand_id}"')
return brand
def get_all_brands() -> list[Brand]:
"""Return all brands, ordered by title."""
brands = db.session \
.query(DbBrand) \
.order_by(DbBrand.title) \
.all()
return [_db_entity_to_brand(brand) for brand in brands]
def get_active_brands() -> set[Brand]:
"""Return active (i.e. non-archived) brands."""
brands = db.session \
.query(DbBrand) \
.filter_by(archived=False) \
.all()
return {_db_entity_to_brand(brand) for brand in brands}
def count_brands() -> int:
"""Return the number of brands."""
return db.session.query(DbBrand).count()
def _db_entity_to_brand(brand: DbBrand) -> Brand:
image_url_path: Optional[str]
if brand.image_filename:
image_url_path = f'/data/global/brand_images/{brand.image_filename}'
else:
image_url_path = None
return Brand(
id=brand.id,
title=brand.title,
image_filename=brand.image_filename,
image_url_path=image_url_path,
archived=brand.archived,
)
|
{
"content_hash": "2698e3ada6dbbb542937c0274bb373ba",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 76,
"avg_line_length": 23.532258064516128,
"alnum_prop": 0.6288553803975325,
"repo_name": "homeworkprod/byceps",
"id": "21740a1bc08c2d03657e18745bada034e9461c6b",
"size": "2918",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "byceps/services/brand/service.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38198"
},
{
"name": "HTML",
"bytes": "318830"
},
{
"name": "JavaScript",
"bytes": "8541"
},
{
"name": "Python",
"bytes": "935249"
}
],
"symlink_target": ""
}
|
"""
isort:skip_file
"""
from django_mysql.models.base import Model # noqa
from django_mysql.models.aggregates import BitAnd, BitOr, BitXor, GroupConcat # noqa
from django_mysql.models.expressions import ListF, SetF # noqa
from django_mysql.models.query import ( # noqa
add_QuerySetMixin,
ApproximateInt,
SmartChunkedIterator,
SmartIterator,
pt_visual_explain,
QuerySet,
QuerySetMixin,
)
from django_mysql.models.fields import ( # noqa
Bit1BooleanField,
DynamicField,
EnumField,
JSONField,
ListCharField,
ListTextField,
NullBit1BooleanField,
SetCharField,
SetTextField,
SizedBinaryField,
SizedTextField,
)
|
{
"content_hash": "c90e4b07f39e1488edfe85d5466658d6",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 85,
"avg_line_length": 24.357142857142858,
"alnum_prop": 0.7199413489736071,
"repo_name": "arnau126/django-mysql",
"id": "d162b77e508802975fd6c32a97bbd3aa90fc02fb",
"size": "682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/django_mysql/models/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "538625"
},
{
"name": "Shell",
"bytes": "1756"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponse
def index(request):
return HttpResponse('Ok')
|
{
"content_hash": "8b707f2259ce0f84221c00406f85a3b6",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 36,
"avg_line_length": 17.8,
"alnum_prop": 0.7528089887640449,
"repo_name": "igr-santos/tracker",
"id": "940346a4721f3e290a5773d4d0029d4a83f3e585",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "135"
},
{
"name": "Python",
"bytes": "4786"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import copy
import six
import requests
import json
import jsonschema
import datapackage_registry
from datapackage_registry.exceptions import DataPackageRegistryException
from .exceptions import (
SchemaError,
ValidationError,
RegistryError,
)
class Schema(object):
'''Abstracts a JSON Schema and allows validation of data against it.
Args:
schema (str or dict): The JSON Schema itself as a dict, a local path
or URL to it.
Raises:
SchemaError: If unable to load schema or it was invalid.
Warning:
The schema objects created with this class are read-only. You should
change any of its attributes after creation.
'''
def __init__(self, schema):
self._registry = self._load_registry()
self._schema = self._load_schema(schema, self._registry)
self._validator = self._load_validator(self._schema, self._registry)
self._check_schema()
def to_dict(self):
'''dict: Convert this :class:`.Schema` to dict.'''
return copy.deepcopy(self._schema)
def validate(self, data):
'''Validates a data dict against this schema.
Args:
data (dict): The data to be validated.
Raises:
ValidationError: If the data is invalid.
'''
try:
self._validator.validate(data)
except jsonschema.ValidationError as e:
six.raise_from(ValidationError.create_from(e), e)
def _load_registry(self):
try:
return datapackage_registry.Registry()
except DataPackageRegistryException as e:
six.raise_from(RegistryError(str(e)), e)
def _load_schema(self, schema, registry):
the_schema = schema
if isinstance(schema, six.string_types):
try:
the_schema = registry.get(schema)
if not the_schema:
if os.path.isfile(schema):
with open(schema, 'r') as f:
the_schema = json.load(f)
else:
req = requests.get(schema)
req.raise_for_status()
the_schema = req.json()
except (IOError,
ValueError,
DataPackageRegistryException,
requests.exceptions.RequestException) as e:
msg = 'Unable to load schema at "{0}"'
six.raise_from(SchemaError(msg.format(schema)), e)
elif isinstance(the_schema, dict):
the_schema = copy.deepcopy(the_schema)
else:
msg = 'Schema must be a "dict", but was a "{0}"'
raise SchemaError(msg.format(type(the_schema).__name__))
return the_schema
def _load_validator(self, schema, registry):
resolver = None
if registry.base_path:
path = 'file://{base_path}/'.format(base_path=registry.base_path)
resolver = jsonschema.RefResolver(path, schema)
validator_class = jsonschema.validators.validator_for(schema)
return validator_class(schema, resolver=resolver)
def _check_schema(self):
try:
self._validator.check_schema(self._schema)
except jsonschema.exceptions.SchemaError as e:
six.raise_from(SchemaError.create_from(e), e)
def __getattr__(self, name):
if name in self.__dict__.get('_schema', {}):
return copy.deepcopy(self._schema[name])
msg = '\'{0}\' object has no attribute \'{1}\''
raise AttributeError(msg.format(self.__class__.__name__, name))
def __setattr__(self, name, value):
if name in self.__dict__.get('_schema', {}):
raise AttributeError('can\'t set attribute')
super(self.__class__, self).__setattr__(name, value)
def __dir__(self):
return list(self.__dict__.keys()) + list(self._schema.keys())
|
{
"content_hash": "18ab681ca23235ed2f70149b17063bd9",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 77,
"avg_line_length": 33.36585365853659,
"alnum_prop": 0.5869883040935673,
"repo_name": "okfn/datapackage-validate-py",
"id": "6ada4b7aea676d4095a5ed15926c996876e279b2",
"size": "4104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datapackage_validate/schema.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
This module is largely inspired by django-rest-framework settings.
Settings for the OAuth2 Provider are all namespaced in the OAUTH2_PROVIDER setting.
For example your project's `settings.py` file might look like this:
OAUTH2_PROVIDER = {
'CLIENT_ID_GENERATOR_CLASS':
'oauth2_provider.generators.ClientIdGenerator',
'CLIENT_SECRET_GENERATOR_CLASS':
'oauth2_provider.generators.ClientSecretGenerator',
}
This module provides the `oauth2_settings` object, that is used to access
OAuth2 Provider settings, checking for user settings first, then falling
back to the defaults.
"""
from __future__ import unicode_literals
import six
from django.conf import settings
try:
# Available in Python 2.7+
import importlib
except ImportError:
from django.utils import importlib
USER_SETTINGS = getattr(settings, 'OAUTH2_PROVIDER', None)
DEFAULTS = {
'CLIENT_ID_GENERATOR_CLASS': 'oauth2_provider.generators.ClientIdGenerator',
'CLIENT_SECRET_GENERATOR_CLASS': 'oauth2_provider.generators.ClientSecretGenerator',
'CLIENT_SECRET_GENERATOR_LENGTH': 128,
'OAUTH2_VALIDATOR_CLASS': 'oauth2_provider.oauth2_validators.OAuth2Validator',
'SCOPES': {"read": "Reading scope", "write": "Writing scope"},
'READ_SCOPE': 'read',
'WRITE_SCOPE': 'write',
'AUTHORIZATION_CODE_EXPIRE_SECONDS': 60,
'ACCESS_TOKEN_EXPIRE_SECONDS': 36000,
'APPLICATION_MODEL': getattr(settings, 'OAUTH2_PROVIDER_APPLICATION_MODEL', 'oauth2_provider.Application'),
'REQUEST_APPROVAL_PROMPT': 'force',
'ALLOWED_REDIRECT_URI_SCHEMES': ['http', 'https'],
# Special settings that will be evaluated at runtime
'_SCOPES': [],
}
# List of settings that cannot be empty
MANDATORY = (
'CLIENT_ID_GENERATOR_CLASS',
'CLIENT_SECRET_GENERATOR_CLASS',
'OAUTH2_VALIDATOR_CLASS',
'SCOPES',
'ALLOWED_REDIRECT_URI_SCHEMES',
)
# List of settings that may be in string import notation.
IMPORT_STRINGS = (
'CLIENT_ID_GENERATOR_CLASS',
'CLIENT_SECRET_GENERATOR_CLASS',
'OAUTH2_VALIDATOR_CLASS',
)
def perform_import(val, setting_name):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if isinstance(val, six.string_types):
return import_from_string(val, setting_name)
elif isinstance(val, (list, tuple)):
return [import_from_string(item, setting_name) for item in val]
return val
def import_from_string(val, setting_name):
"""
Attempt to import a class from a string representation.
"""
try:
parts = val.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
msg = "Could not import '%s' for setting '%s'. %s: %s." % (val, setting_name, e.__class__.__name__, e)
raise ImportError(msg)
class OAuth2ProviderSettings(object):
"""
A settings object, that allows OAuth2 Provider settings to be accessed as properties.
Any setting with string import paths will be automatically resolved
and return the class, rather than the string literal.
"""
def __init__(self, user_settings=None, defaults=None, import_strings=None, mandatory=None):
self.user_settings = user_settings or {}
self.defaults = defaults or {}
self.import_strings = import_strings or ()
self.mandatory = mandatory or ()
def __getattr__(self, attr):
if attr not in self.defaults.keys():
raise AttributeError("Invalid OAuth2Provider setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Coerce import strings into classes
if val and attr in self.import_strings:
val = perform_import(val, attr)
# Overriding special settings
if attr == '_SCOPES':
val = list(six.iterkeys(self.SCOPES))
self.validate_setting(attr, val)
# Cache the result
setattr(self, attr, val)
return val
def validate_setting(self, attr, val):
if not val and attr in self.mandatory:
raise AttributeError("OAuth2Provider setting: '%s' is mandatory" % attr)
oauth2_settings = OAuth2ProviderSettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS, MANDATORY)
|
{
"content_hash": "39ad38abae9b05a4600355a5fc70085f",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 111,
"avg_line_length": 32.8978102189781,
"alnum_prop": 0.671400044375416,
"repo_name": "ramcn/demo3",
"id": "fa7eef1355db378ecec5cd875ac6b426afeeaa94",
"size": "4507",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "venv/lib/python3.4/site-packages/oauth2_provider/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "330662"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Groff",
"bytes": "7"
},
{
"name": "HTML",
"bytes": "252755"
},
{
"name": "JavaScript",
"bytes": "136464"
},
{
"name": "Python",
"bytes": "11000226"
},
{
"name": "Shell",
"bytes": "3753"
}
],
"symlink_target": ""
}
|
__author__ = 'luckydonald'
from pytg.receiver import Receiver
from pytg.utils import coroutine
@coroutine
def example_function(receiver):
try:
while True:
msg = (yield)
print('Full dump: {array}'.format(array=str( msg )))
except KeyboardInterrupt:
receiver.stop()
print("Exiting")
if __name__ == '__main__':
receiver = Receiver(port=4458) #get a Receiver Connector instance
receiver.start() #start the Connector.
receiver.message(example_function(receiver)) # add "example_function" function as listeners. You can supply arguments here (like receiver).
# continues here, after exiting while loop in example_function()
receiver.stop()
|
{
"content_hash": "cae8c6976006419ad8865ae29565514a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 140,
"avg_line_length": 32.75,
"alnum_prop": 0.732824427480916,
"repo_name": "kalloc/pytg",
"id": "1ecfb7a81771871527c93c361e1812de1f188670",
"size": "679",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/dump.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64348"
}
],
"symlink_target": ""
}
|
"""The share snapshots api."""
from oslo_log import log
import six
import webob
from webob import exc
from manila.api import common
from manila.api.openstack import wsgi
from manila.api.v1 import share_snapshots
from manila.api.views import share_snapshots as snapshot_views
from manila.common import constants
from manila import exception
from manila.i18n import _, _LI
from manila import share
LOG = log.getLogger(__name__)
class ShareSnapshotsController(share_snapshots.ShareSnapshotMixin,
wsgi.Controller, wsgi.AdminActionsMixin):
"""The Share Snapshots API V2 controller for the OpenStack API."""
resource_name = 'share_snapshot'
_view_builder_class = snapshot_views.ViewBuilder
def __init__(self):
super(ShareSnapshotsController, self).__init__()
self.share_api = share.API()
@wsgi.Controller.authorize('unmanage_snapshot')
def _unmanage(self, req, id, body=None):
"""Unmanage a share snapshot."""
context = req.environ['manila.context']
LOG.info(_LI("Unmanage share snapshot with id: %s."), id)
try:
snapshot = self.share_api.get_snapshot(context, id)
share = self.share_api.get(context, snapshot['share_id'])
if share.get('share_server_id'):
msg = _("Operation 'unmanage_snapshot' is not supported for "
"snapshots of shares that are created with share"
" servers (created with share-networks).")
raise exc.HTTPForbidden(explanation=msg)
elif share.get('has_replicas'):
msg = _("Share %s has replicas. Snapshots of this share "
"cannot currently be unmanaged until all replicas "
"are removed.") % share['id']
raise exc.HTTPConflict(explanation=msg)
elif snapshot['status'] in constants.TRANSITIONAL_STATUSES:
msg = _("Snapshot with transitional state cannot be "
"unmanaged. Snapshot '%(s_id)s' is in '%(state)s' "
"state.") % {'state': snapshot['status'],
's_id': snapshot['id']}
raise exc.HTTPForbidden(explanation=msg)
self.share_api.unmanage_snapshot(context, snapshot, share['host'])
except (exception.ShareSnapshotNotFound, exception.ShareNotFound) as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
return webob.Response(status_int=202)
@wsgi.Controller.authorize('manage_snapshot')
def _manage(self, req, body):
"""Instruct Manila to manage an existing snapshot.
Required HTTP Body:
{
"snapshot":
{
"share_id": <Manila share id>,
"provider_location": <A string parameter that identifies the
snapshot on the backend>
}
}
Optional elements in 'snapshot' are:
name A name for the new snapshot.
description A description for the new snapshot.
driver_options Driver specific dicts for the existing snapshot.
"""
context = req.environ['manila.context']
snapshot_data = self._validate_manage_parameters(context, body)
# NOTE(vponomaryov): compatibility actions are required between API and
# DB layers for 'name' and 'description' API params that are
# represented in DB as 'display_name' and 'display_description'
# appropriately.
name = snapshot_data.get('display_name',
snapshot_data.get('name'))
description = snapshot_data.get(
'display_description', snapshot_data.get('description'))
snapshot = {
'share_id': snapshot_data['share_id'],
'provider_location': snapshot_data['provider_location'],
'display_name': name,
'display_description': description,
}
driver_options = snapshot_data.get('driver_options', {})
try:
snapshot_ref = self.share_api.manage_snapshot(context, snapshot,
driver_options)
except (exception.ShareNotFound, exception.ShareSnapshotNotFound) as e:
raise exc.HTTPNotFound(explanation=six.text_type(e))
except (exception.InvalidShare,
exception.ManageInvalidShareSnapshot) as e:
raise exc.HTTPConflict(explanation=six.text_type(e))
return self._view_builder.detail(req, snapshot_ref)
def _validate_manage_parameters(self, context, body):
if not (body and self.is_valid_body(body, 'snapshot')):
msg = _("Snapshot entity not found in request body.")
raise exc.HTTPUnprocessableEntity(explanation=msg)
data = body['snapshot']
required_parameters = ('share_id', 'provider_location')
self._validate_parameters(data, required_parameters)
return data
def _validate_parameters(self, data, required_parameters,
fix_response=False):
if fix_response:
exc_response = exc.HTTPBadRequest
else:
exc_response = exc.HTTPUnprocessableEntity
for parameter in required_parameters:
if parameter not in data:
msg = _("Required parameter %s not found.") % parameter
raise exc_response(explanation=msg)
if not data.get(parameter):
msg = _("Required parameter %s is empty.") % parameter
raise exc_response(explanation=msg)
def _allow(self, req, id, body):
context = req.environ['manila.context']
if not (body and self.is_valid_body(body, 'allow_access')):
msg = _("Access data not found in request body.")
raise exc.HTTPBadRequest(explanation=msg)
access_data = body.get('allow_access')
required_parameters = ('access_type', 'access_to')
self._validate_parameters(access_data, required_parameters,
fix_response=True)
access_type = access_data['access_type']
access_to = access_data['access_to']
common.validate_access(access_type=access_type, access_to=access_to)
snapshot = self.share_api.get_snapshot(context, id)
self._check_mount_snapshot_support(context, snapshot)
try:
access = self.share_api.snapshot_allow_access(
context, snapshot, access_type, access_to)
except exception.ShareSnapshotAccessExists as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
return self._view_builder.detail_access(req, access)
def _deny(self, req, id, body):
context = req.environ['manila.context']
if not (body and self.is_valid_body(body, 'deny_access')):
msg = _("Access data not found in request body.")
raise exc.HTTPBadRequest(explanation=msg)
access_data = body.get('deny_access')
self._validate_parameters(
access_data, ('access_id',), fix_response=True)
access_id = access_data['access_id']
snapshot = self.share_api.get_snapshot(context, id)
self._check_mount_snapshot_support(context, snapshot)
access = self.share_api.snapshot_access_get(context, access_id)
if access['share_snapshot_id'] != snapshot['id']:
msg = _("Access rule provided is not associated with given"
" snapshot.")
raise webob.exc.HTTPBadRequest(explanation=msg)
self.share_api.snapshot_deny_access(context, snapshot, access)
return webob.Response(status_int=202)
def _check_mount_snapshot_support(self, context, snapshot):
share = self.share_api.get(context, snapshot['share_id'])
if not share['mount_snapshot_support']:
msg = _("Cannot control access to the snapshot %(snap)s since the "
"parent share %(share)s does not support mounting its "
"snapshots.") % {'snap': snapshot['id'],
'share': share['id']}
raise exc.HTTPBadRequest(explanation=msg)
def _access_list(self, req, snapshot_id):
context = req.environ['manila.context']
snapshot = self.share_api.get_snapshot(context, snapshot_id)
self._check_mount_snapshot_support(context, snapshot)
access_list = self.share_api.snapshot_access_get_all(context, snapshot)
return self._view_builder.detail_list_access(req, access_list)
@wsgi.Controller.api_version('2.0', '2.6')
@wsgi.action('os-reset_status')
def snapshot_reset_status_legacy(self, req, id, body):
return self._reset_status(req, id, body)
@wsgi.Controller.api_version('2.7')
@wsgi.action('reset_status')
def snapshot_reset_status(self, req, id, body):
return self._reset_status(req, id, body)
@wsgi.Controller.api_version('2.0', '2.6')
@wsgi.action('os-force_delete')
def snapshot_force_delete_legacy(self, req, id, body):
return self._force_delete(req, id, body)
@wsgi.Controller.api_version('2.7')
@wsgi.action('force_delete')
def snapshot_force_delete(self, req, id, body):
return self._force_delete(req, id, body)
@wsgi.Controller.api_version('2.12')
@wsgi.response(202)
def manage(self, req, body):
return self._manage(req, body)
@wsgi.Controller.api_version('2.12')
@wsgi.action('unmanage')
def unmanage(self, req, id, body=None):
return self._unmanage(req, id, body)
@wsgi.Controller.api_version('2.32')
@wsgi.action('allow_access')
@wsgi.response(202)
@wsgi.Controller.authorize
def allow_access(self, req, id, body=None):
return self._allow(req, id, body)
@wsgi.Controller.api_version('2.32')
@wsgi.action('deny_access')
@wsgi.Controller.authorize
def deny_access(self, req, id, body=None):
return self._deny(req, id, body)
@wsgi.Controller.api_version('2.32')
@wsgi.Controller.authorize
def access_list(self, req, snapshot_id):
return self._access_list(req, snapshot_id)
def create_resource():
return wsgi.Resource(ShareSnapshotsController())
|
{
"content_hash": "77094c2840b2a56027737507af95b0cf",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 79,
"avg_line_length": 38.35793357933579,
"alnum_prop": 0.6096200096200096,
"repo_name": "vponomaryov/manila",
"id": "257469020c0a9a7f1805702655bf182a8c0b82d0",
"size": "11051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/api/v2/share_snapshots.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "9697997"
},
{
"name": "Shell",
"bytes": "103800"
}
],
"symlink_target": ""
}
|
"""
Vanilla product models
"""
from oscar.apps.catalogue.abstract_models import * # noqa
class ProductClass(AbstractProductClass):
pass
class Category(AbstractCategory):
pass
class ProductCategory(AbstractProductCategory):
pass
class Product(AbstractProduct):
pass
class ContributorRole(AbstractContributorRole):
pass
class Contributor(AbstractContributor):
pass
class ProductContributor(AbstractProductContributor):
pass
class ProductAttribute(AbstractProductAttribute):
pass
class ProductAttributeValue(AbstractProductAttributeValue):
pass
class AttributeOptionGroup(AbstractAttributeOptionGroup):
pass
class AttributeOption(AbstractAttributeOption):
pass
class AttributeEntity(AbstractAttributeEntity):
pass
class AttributeEntityType(AbstractAttributeEntityType):
pass
class Option(AbstractOption):
pass
class ProductImage(AbstractProductImage):
pass
from .receivers import * # noqa
|
{
"content_hash": "4755a60e96d909acad2fdbe7c035604e",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 59,
"avg_line_length": 14.833333333333334,
"alnum_prop": 0.780388151174668,
"repo_name": "Idematica/django-oscar",
"id": "e5e40f636519bb1223a8967182b4011fc41852c4",
"size": "979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/apps/catalogue/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1099824"
},
{
"name": "JavaScript",
"bytes": "818932"
},
{
"name": "Puppet",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "4079718"
},
{
"name": "Shell",
"bytes": "5760"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
}
|
'''
This module provides one useful class: NodeHandler
The NodeHandler class is designed to be subclassed. Each subclass
should support the processing that createpdf.RstToPdf needs to do
on a particular type of node that could appear in a document tree.
When the subclass is defined, it should reference NodeHandler as
the first base class, and one or more docutils node classes as
subsequent base classes.
These docutils node classes will not actually wind up in the
base classes of the subclass. Instead, they will be used as
keys in a dispatch dictionary which is used to find the correct
NodeHandler subclass to use to process an instance of a given
docutils node class.
When an instance of createpdf.RstToPdf is created, a NodeHandler
instance will be called to return dispatchers for gather_elements
and gather_pdftext, wrapped up as methods of the createpdf.RstToPdf
class.
When a dispatcher is called, it will dispatch to the correct subclass
to handle the given docutils node instance.
If no NodeHandler subclass has been created to handle that particular
type of docutils node, then default processing will occur and a warning
will be logged.
'''
import types
import inspect
from log import log, nodeid
from smartypants import smartyPants
import docutils.nodes
from flowables import BoundByWidth, TocEntry
class MetaHelper(type):
''' MetaHelper is designed to generically enable a few of the benefits of
using metaclasses by encapsulating some of the complexity of setting
them up.
If a base class uses MetaHelper (by assigning __metaclass__ = MetaHelper),
then that class (and its metaclass inheriting subclasses) can control
class creation behavior by defining a couple of helper functions.
1) A base class can define a _classpreinit function. This function
is called during __new__ processing of the class object itself,
but only during subclass creation (not when the class defining
the _classpreinit is itself created).
The subclass object does not yet exist at the time _classpreinit
is called. _classpreinit accepts all the parameters of the
__new__ function for the class itself (not the same as the __new__
function for the instantiation of class objects!) and must return
a tuple of the same objects. A typical use of this would be to
modify the class bases before class creation.
2) Either a base class or a subclass can define a _classinit() function.
This function will be called immediately after the actual class has
been created, and can do whatever setup is required for the class.
Note that every base class (but not every subclass) which uses
MetaHelper MUST define _classinit, even if that definition is None.
MetaHelper also places an attribute into each class created with it.
_baseclass is set to None if this class has no superclasses which
also use MetaHelper, or to the first such MetaHelper-using baseclass.
_baseclass can be explicitly set inside the class definition, in
which case MetaHelper will not override it.
'''
def __new__(clstype, name, bases, clsdict):
# Our base class is the first base in the class definition which
# uses MetaHelper, or None if no such base exists.
base = ([x for x in bases if type(x) is MetaHelper] + [None])[0]
# Only set our base into the class if it has not been explicitly
# set
clsdict.setdefault('_baseclass', base)
# See if the base class definied a preinit function, and call it
# if so.
preinit = getattr(base, '_classpreinit', None)
if preinit is not None:
clstype, name, bases, clsdict = preinit(clstype, name, bases, clsdict)
# Delegate the real work to type
return type.__new__(clstype, name, bases, clsdict)
def __init__(cls, name, bases, clsdict):
# Let type build the class for us
type.__init__(cls, name, bases, clsdict)
# Call the class's initialization function if defined
if cls._classinit is not None:
cls._classinit()
class NodeHandler(object):
''' NodeHandler classes are used to dispatch
to the correct class to handle some node class
type, via a dispatchdict in the main class.
'''
__metaclass__ = MetaHelper
@classmethod
def _classpreinit(baseclass, clstype, name, bases, clsdict):
# _classpreinit is called before the actual class is built
# Perform triage on the class bases to separate actual
# inheritable bases from the target docutils node classes
# which we want to dispatch for.
new_bases = []
targets = []
for target in bases:
if target is not object:
(targets, new_bases)[issubclass(target, NodeHandler)].append(target)
clsdict['_targets'] = targets
return clstype, name, tuple(new_bases), clsdict
@classmethod
def _classinit(cls):
# _classinit() is called once the subclass has actually
# been created.
# For the base class, just add a dispatch dictionary
if cls._baseclass is None:
cls.dispatchdict = {}
return
# for subclasses, instantiate them, and then add
# the class to the dispatch dictionary for each of its targets.
self = cls()
for target in cls._targets:
if cls.dispatchdict.setdefault(target, self) is not self:
t = repr(target)
old = repr(cls.dispatchdict[target])
new = repr(self)
log.debug('Dispatch handler %s for node type %s overridden by %s' %
(old, t, new))
cls.dispatchdict[target] = self
@staticmethod
def getclassname(obj):
cln = repr(obj.__class__)
info = cln.split("'")
if len(info) == 3:
return info[1]
return cln
def log_unknown(self, node, during):
if not hasattr(self, 'unkn_node'):
self.unkn_node = set()
cln=self.getclassname(node)
if not cln in self.unkn_node:
self.unkn_node.add(cln)
log.warning("Unkn. node (self.%s): %s [%s]",
during, cln, nodeid(node))
try:
log.debug(node)
except (UnicodeDecodeError, UnicodeEncodeError):
log.debug(repr(node))
def findsubclass(self, node, during):
handlerinfo = '%s.%s' % (self.getclassname(self), during)
log.debug("%s: %s", handlerinfo, self.getclassname(node))
log.debug("%s: [%s]", handlerinfo, nodeid(node))
try:
log.debug("%s: %s", handlerinfo, node)
except (UnicodeDecodeError, UnicodeEncodeError):
log.debug("%s: %r", handlerninfo, node)
log.debug("")
# Dispatch to the first matching class in the MRO
dispatchdict = self.dispatchdict
for baseclass in inspect.getmro(node.__class__):
result = dispatchdict.get(baseclass)
if result is not None:
break
else:
self.log_unknown(node, during)
result = self
return result
def __call__(self, client):
''' Get the dispatchers, wrapped up as methods for the client'''
textdispatch = types.MethodType(self.textdispatch, client)
elemdispatch = types.MethodType(self.elemdispatch, client)
return textdispatch, elemdispatch
# This overridable attribute will be set true in the instance
# if handling a sphinx document
sphinxmode = False
# Begin overridable attributes and methods for elemdispatch
def gather_elements(self, client, node, style):
return client.gather_elements(node, style=style)
def getstyle(self, client, node, style):
try:
if node['classes'] and node['classes'][0]:
# FIXME: Supports only one class, sorry ;-)
if client.styles.StyleSheet.has_key(node['classes'][0]):
style = client.styles[node['classes'][0]]
else:
log.info("Unknown class %s, ignoring. [%s]",
node['classes'][0], nodeid(node))
except TypeError: # Happens when a docutils.node.Text reaches here
pass
if style is None or style == client.styles['bodytext']:
style = client.styles.styleForNode(node)
return style
def getelements(self, client, node, style):
style = self.getstyle(client, node, style)
elements = self.gather_elements(client, node, style)
# Make all the sidebar cruft unreachable
#if style.__dict__.get('float','None').lower() !='none':
#node.elements=[Sidebar(node.elements,style)]
#elif 'width' in style.__dict__:
if 'width' in style.__dict__:
elements = [BoundByWidth(style.width,
elements, style, mode="shrink")]
return elements
# End overridable attributes and methods for elemdispatch
def elemdispatch(self, client, node, style=None):
self = self.findsubclass(node, 'elemdispatch')
# set anchors for internal references
try:
for i in node['ids']:
client.pending_targets.append(i)
except TypeError: #Happens with docutils.node.Text
pass
elements = self.getelements(client, node, style)
if node.line and client.debugLinesPdf:
elements.insert(0,TocEntry(client.depth-1,'LINE-%s'%node.line))
node.elements = elements
return elements
# Begin overridable attributes and methods for textdispatch
pre = ''
post = ''
def get_pre_post(self, client, node, replaceEnt):
return self.pre, self.post
def get_text(self, client, node, replaceEnt):
return client.gather_pdftext(node)
def apply_smartypants(self, text, smarty, node):
# Try to be clever about when to use smartypants
if node.__class__ in (docutils.nodes.paragraph,
docutils.nodes.block_quote, docutils.nodes.title):
return smartyPants(text, smarty)
return text
# End overridable attributes and methods for textdispatch
def textdispatch(self, client, node, replaceEnt=True):
self = self.findsubclass(node, 'textdispatch')
pre, post = self.get_pre_post(client, node, replaceEnt)
text = self.get_text(client, node, replaceEnt)
text = pre + text + post
try:
log.debug("%s.textdispatch: %s" % (self.getclassname(self), text))
except UnicodeDecodeError:
pass
text = self.apply_smartypants(text, client.smarty, node)
node.pdftext = text
return text
|
{
"content_hash": "7a8d3075e5743f6bfb9d1917754f23ce",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 84,
"avg_line_length": 39.47017543859649,
"alnum_prop": 0.6204995999644413,
"repo_name": "ddd332/presto",
"id": "2aaa0526bddd17fa559142017f707bb0839634fc",
"size": "11471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "presto-docs/target/sphinx/rst2pdf/basenodehandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1449"
},
{
"name": "CSS",
"bytes": "130017"
},
{
"name": "GAP",
"bytes": "41169"
},
{
"name": "Java",
"bytes": "6836515"
},
{
"name": "JavaScript",
"bytes": "135954"
},
{
"name": "Python",
"bytes": "8056702"
},
{
"name": "TeX",
"bytes": "55016"
}
],
"symlink_target": ""
}
|
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print ('Start :',tag)
for ele in attrs:
print ('->',ele[0],'>',ele[1])
def handle_endtag(self, tag):
print ('End :',tag)
def handle_startendtag(self, tag, attrs):
print ('Empty :',tag)
for ele in attrs:
print ('->',ele[0],'>',ele[1])
MyParser = MyHTMLParser()
MyParser.feed(''.join([input().strip() for _ in range(int(input()))]))
|
{
"content_hash": "3ae6266253365c1590499a045551294a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 32.8235294117647,
"alnum_prop": 0.525089605734767,
"repo_name": "nifannn/HackerRank",
"id": "dce79df1e2155076549d87e4fb2b0b0e47649565",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Practice/Python/Regex_and_Parsing/html_parser1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "28121"
},
{
"name": "PLpgSQL",
"bytes": "194"
},
{
"name": "Python",
"bytes": "28357"
},
{
"name": "Shell",
"bytes": "3568"
}
],
"symlink_target": ""
}
|
import argparse
from barbicanclient import client
IDENTITY = 'https://identity.api.rackspacecloud.com/v2.0'
ENDPOINT = 'https://barbican.api.rackspacecloud.com/v1/'
def connect(username, password, tenant, endpoint):
connection = client.Connection(IDENTITY,
username,
password,
tenant,
endpoint=endpoint)
return connection
def parse_args():
parser = argparse.ArgumentParser(
description='Testing code for deleting barbican secret.'
)
parser.add_argument(
'--username',
help='The keystone username used for for authentication'
)
parser.add_argument(
'--password',
help='The keystone password used for for authentication'
)
parser.add_argument(
'--tenant',
help='The keystone tenant used for for authentication'
)
parser.add_argument(
'--keystone',
default=IDENTITY,
help='The keystone endpoint used for for authentication'
)
parser.add_argument(
'--endpoint',
default=ENDPOINT,
help='The barbican endpoint to test against'
)
parser.add_argument(
'--secret-id',
default=None,
help='ID of secret'
)
parser.add_argument(
'--secret-href',
default=None,
help='href of secret'
)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
conn = connect(args.username, args.password, args.tenant, args.endpoint)
if args.secret_id is not None:
conn.delete_secret_by_id(args.secret_id)
else:
conn.delete_secret(args.secret_href)
|
{
"content_hash": "0cf8dba83d1b4a843986b3ded9f1064c",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 76,
"avg_line_length": 27.046153846153846,
"alnum_prop": 0.5819112627986348,
"repo_name": "wyllys66/python-barbicanclient",
"id": "7aefee8fba4aa891c0ed213411ed3862b902e3b2",
"size": "1758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/delete_secret.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "145662"
}
],
"symlink_target": ""
}
|
import pymongo # setup: sudo pip install pymongo
from ConfigParser import ConfigParser
import os.path
import sys
conf_file = ""
conf = ConfigParser({ # defaults
"mongo-db-name":"swift_usage",
"mongo-db-host":"localhost",
"mongo-db-port":"27017"
})
if os.path.exists("/etc/swift/swift-usage.conf"):
conf_file = "/etc/swift/swift-usage.conf"
conf.read(conf_file)
# load config from the config file if it exists
mongo_db_name = conf.get('DEFAULT', 'mongo-db-name')
mongo_db_host = conf.get('DEFAULT', 'mongo-db-host')
mongo_db_port = int(conf.get('DEFAULT', 'mongo-db-port'))
db = pymongo.Connection(mongo_db_host, mongo_db_port)[mongo_db_name] # use the swift_usage database
|
{
"content_hash": "9c4d8f401d8d18e372a730ce48e58232",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 99,
"avg_line_length": 31.869565217391305,
"alnum_prop": 0.6671214188267395,
"repo_name": "cloudops/swift_usage",
"id": "45afc7718884f6b2faad6ac1accd50d589f627cb",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swift_usage/utils/db_connect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "53920"
}
],
"symlink_target": ""
}
|
"""
Inside the .../moose-examples/CA1PyramidalCell/ directory supplied with MOOSE, run
python testNeuroML_CA1.py
(other channels and morph xml files are already present in this same directory).
The soma name below is hard coded for CA1, else any other file can be used by modifying this script.
"""
import moose
from moose.utils import *
from moose.neuroml.NeuroML import NeuroML
from pylab import *
simdt = 10e-6 # s
plotdt = 10e-6 # s
runtime = 0.2 # s
cells_path = '/cells' # neuromlR.readNeuroMLFromFile creates cells in '/cells'
def loadGran98NeuroML_L123(filename,params):
neuromlR = NeuroML()
populationDict, projectionDict = \
neuromlR.readNeuroMLFromFile(filename,params=params)
print "Number of compartments =",\
len(moose.Neuron(populationDict['CA1group'][1][0].path).children)
soma_path = populationDict['CA1group'][1][0].path+'/Seg0_soma_0_0'
somaVm = setupTable('somaVm',moose.Compartment(soma_path),'Vm')
#somaCa = setupTable('somaCa',moose.CaConc(soma_path+'/Gran_CaPool_98'),'Ca')
#somaIKCa = setupTable('somaIKCa',moose.HHChannel(soma_path+'/Gran_KCa_98'),'Gk')
#KDrX = setupTable('ChanX',moose.HHChannel(soma_path+'/Gran_KDr_98'),'X')
soma = moose.Compartment(soma_path)
print "Reinit MOOSE ... "
resetSim(['/elec','/cells'],simdt,plotdt,simmethod='hsolve') # from moose.utils
print "Running ... "
moose.start(runtime)
tvec = arange(0.0,runtime,simdt)
plot(tvec,somaVm.vector[1:])
title('Soma Vm')
xlabel('time (s)')
ylabel('Voltage (V)')
print "Showing plots ..."
show()
if __name__ == "__main__":
if len(sys.argv)<2:
filename = "CA1soma.net.xml"
params = {}
else:
filename = sys.argv[1]
params = {}
if len(sys.argv)>2:
params = {'combineSegments':bool(sys.argv[2])}
# sys.argv[2] should be True or False
loadGran98NeuroML_L123(filename,params)
|
{
"content_hash": "b697235f434a938b67d03d5eb66802e7",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 100,
"avg_line_length": 35.41818181818182,
"alnum_prop": 0.6591375770020534,
"repo_name": "h-mayorquin/camp_india_2016",
"id": "06f437ec27534244d4291f65767de66dc9b98d2d",
"size": "1987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/chemical switches/moose/neuroml/CA1PyramidalCell/CA1_hsolve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "33891"
},
{
"name": "C",
"bytes": "205445"
},
{
"name": "GAP",
"bytes": "71247"
},
{
"name": "Jupyter Notebook",
"bytes": "2211795"
},
{
"name": "OpenEdge ABL",
"bytes": "1723"
},
{
"name": "Python",
"bytes": "251481"
},
{
"name": "Shell",
"bytes": "564"
}
],
"symlink_target": ""
}
|
__author__ = 'ganeshchand'
# This is a comment
print('Hello, World')
|
{
"content_hash": "297d4e5ec6b806a7917960cc0928c4dd",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 26,
"avg_line_length": 23,
"alnum_prop": 0.6666666666666666,
"repo_name": "ganeshchand/python3",
"id": "833330aa3059aa06f05e8a99564150dbd9afb95f",
"size": "69",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic/helloWorld.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17939"
}
],
"symlink_target": ""
}
|
from app.admin.auth_view import AuthView
from app.pages.models import ShortLink
class ShortLinkModelView(AuthView):
column_display_pk = True
form_columns = ShortLink.__table__.columns._data.keys()
|
{
"content_hash": "dae29f87544586d74426c5446ca3ea91",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 57,
"avg_line_length": 33.5,
"alnum_prop": 0.7810945273631841,
"repo_name": "nickofbh/kort2",
"id": "bfbb6b9ca074bc8bfdf426ab8935908c390bf03e",
"size": "201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/admin/short_link_model_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "58244"
},
{
"name": "HTML",
"bytes": "358042"
},
{
"name": "JavaScript",
"bytes": "4758632"
},
{
"name": "Python",
"bytes": "23278"
}
],
"symlink_target": ""
}
|
from org.apache.helix.participant.statemachine.StateModelFactory import StateModelFactory
class DistClusterControllerStateModelFactory(StateModelFactory<DistClusterControllerStateModel>):
"""
Parameters:
String zkAddr
"""
def __init__(self, zkAddr):
self._zkAddr = zkAddr
def createNewStateModel(self, stateUnitKey):
"""
Returns DistClusterControllerStateModel
Parameters:
stateUnitKey: String
@Override
"""
return DistClusterControllerStateModel(_zkAddr)
|
{
"content_hash": "0b68e231e8d4422ad51c4e7898b499f9",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 97,
"avg_line_length": 19.482758620689655,
"alnum_prop": 0.6761061946902654,
"repo_name": "davzhang/helix-python-binding",
"id": "de38bdefa3749a4f662306f87318eccdf2c6b71f",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "org/apache/helix/participant/DistClusterControllerStateModelFactory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "33558"
},
{
"name": "Python",
"bytes": "760674"
}
],
"symlink_target": ""
}
|
import datetime
import os
import sys
import time
import warnings
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.core import serializers
from django.core.urlresolvers import reverse
from django.db import connection
from django.db.models import Min, Max
from django.http import HttpRequest
from django.template import Context, RequestContext, Template, TemplateSyntaxError
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import override_settings
from django.utils import timezone
from django.utils.tzinfo import FixedOffset
from django.utils.unittest import skipIf, skipUnless
from .forms import EventForm, EventSplitForm, EventModelForm
from .models import Event, MaybeEvent, Session, SessionEvent, Timestamp, AllDayEvent
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = FixedOffset(180) # Africa/Nairobi
ICT = FixedOffset(420) # Asia/Bangkok
TZ_SUPPORT = hasattr(time, 'tzset')
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that.")
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backend.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=UTC), dt)
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backend.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=ICT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_date_related_filters(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_dates(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertQuerysetEqual(Event.objects.dates('dt', 'year'),
[datetime.datetime(2011, 1, 1)], transform=lambda d: d)
self.assertQuerysetEqual(Event.objects.dates('dt', 'month'),
[datetime.datetime(2011, 1, 1)], transform=lambda d: d)
self.assertQuerysetEqual(Event.objects.dates('dt', 'day'),
[datetime.datetime(2011, 1, 1)], transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
event = AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class NewDatabaseTests(TestCase):
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField received a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField received a naive datetime"))
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField received a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField received a naive datetime"))
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
@skipIf(pytz is None, "this test requires pytz")
def test_query_filter_with_pytz_timezones(self):
tz = pytz.timezone('Europe/Paris')
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1)
self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
# naive datetimes are interpreted in local time
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
self.assertEqual(len(recorded), 3)
for warning in recorded:
msg = str(warning.message)
self.assertTrue(msg.startswith("DateTimeField received a naive datetime"))
def test_query_date_related_filters(self):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC, and aggregation is performed in UTC when
# time zone support is enabled. This test could be changed if the
# implementation is changed to perform the aggregation is local time.
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_dates(self):
# Same comment as in test_query_date_related_filters.
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertQuerysetEqual(Event.objects.dates('dt', 'year'),
[datetime.datetime(2010, 1, 1, tzinfo=UTC),
datetime.datetime(2011, 1, 1, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(Event.objects.dates('dt', 'month'),
[datetime.datetime(2010, 12, 1, tzinfo=UTC),
datetime.datetime(2011, 1, 1, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(Event.objects.dates('dt', 'day'),
[datetime.datetime(2010, 12, 31, tzinfo=UTC),
datetime.datetime(2011, 1, 1, tzinfo=UTC)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
event = AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertEqual(e.dt, None)
@override_settings(TIME_ZONE='Africa/Nairobi')
class SerializationTests(TestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes,
# but when it loads this representation, it substracts the offset and
# returns a naive datetime object in UTC (http://pyyaml.org/ticket/202).
# Tests are adapted to take these quirks into account.
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize('python', [Event(dt=dt)])
self.assertEqual(data[0]['fields']['dt'], dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assertIn('"fields": {"dt": "2011-09-01T13:20:30"}', data)
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assertIn('<field type="DateTimeField" name="dt">2011-09-01T13:20:30</field>', data)
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if 'yaml' in serializers.get_serializer_formats():
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assertIn("- fields: {dt: !!timestamp '2011-09-01 13:20:30'}", data)
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize('python', [Event(dt=dt)])
self.assertEqual(data[0]['fields']['dt'], dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assertIn('"fields": {"dt": "2011-09-01T13:20:30.405"}', data)
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assertIn('<field type="DateTimeField" name="dt">2011-09-01T13:20:30.405060</field>', data)
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if 'yaml' in serializers.get_serializer_formats():
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assertIn("- fields: {dt: !!timestamp '2011-09-01 13:20:30.405060'}", data)
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assertEqual(data[0]['fields']['dt'], dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assertIn('"fields": {"dt": "2011-09-01T17:20:30.405+07:00"}', data)
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assertIn('<field type="DateTimeField" name="dt">2011-09-01T17:20:30.405060+07:00</field>', data)
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if 'yaml' in serializers.get_serializer_formats():
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assertIn("- fields: {dt: !!timestamp '2011-09-01 17:20:30.405060+07:00'}", data)
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize('python', [Event(dt=dt)])
self.assertEqual(data[0]['fields']['dt'], dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assertIn('"fields": {"dt": "2011-09-01T10:20:30Z"}', data)
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assertIn('<field type="DateTimeField" name="dt">2011-09-01T10:20:30+00:00</field>', data)
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if 'yaml' in serializers.get_serializer_formats():
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assertIn("- fields: {dt: !!timestamp '2011-09-01 10:20:30+00:00'}", data)
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assertEqual(data[0]['fields']['dt'], dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assertIn('"fields": {"dt": "2011-09-01T13:20:30+03:00"}', data)
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assertIn('<field type="DateTimeField" name="dt">2011-09-01T13:20:30+03:00</field>', data)
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if 'yaml' in serializers.get_serializer_formats():
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assertIn("- fields: {dt: !!timestamp '2011-09-01 13:20:30+03:00'}", data)
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assertEqual(data[0]['fields']['dt'], dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assertIn('"fields": {"dt": "2011-09-01T17:20:30+07:00"}', data)
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assertIn('<field type="DateTimeField" name="dt">2011-09-01T17:20:30+07:00</field>', data)
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if 'yaml' in serializers.get_serializer_formats():
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assertIn("- fields: {dt: !!timestamp '2011-09-01 17:20:30+07:00'}", data)
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class TemplateTests(TestCase):
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
'naive': datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"),
'noarg': Template("{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'on': Template("{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'off': Template("{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return '|'.join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
'utc': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('utc', 'eat', 'utc', 'ict'),
},
'eat': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('eat', 'eat', 'utc', 'ict'),
},
'ict': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('ict', 'eat', 'utc', 'ict'),
},
'naive': {
'notag': t('naive', 'eat', 'utc', 'ict'),
'noarg': t('naive', 'eat', 'utc', 'ict'),
'on': t('naive', 'eat', 'utc', 'ict'),
'off': t('naive', 'eat', 'utc', 'ict'),
}
}
for k1, dt in datetimes.iteritems():
for k2, tpl in templates.iteritems():
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
# Changes for USE_TZ = False
results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict')
results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict')
with self.settings(USE_TZ=False):
for k1, dt in datetimes.iteritems():
for k2, tpl in templates.iteritems():
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
@skipIf(pytz is None, "this test requires pytz")
def test_localtime_filters_with_pytz(self):
"""
Test the |localtime, |utc, and |timezone filters with pytz.
"""
# Use a pytz timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE='Europe/Paris'):
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00")
# Use a pytz timezone as argument
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}")
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({'dt': None, 'tz': ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({'dt': 'not a date', 'tz': ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'})
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template("{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'tz1': ICT, 'tz2': None})
self.assertEqual(tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00")
@skipIf(pytz is None, "this test requires pytz")
def test_timezone_templatetag_with_pytz(self):
"""
Test the {% timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a pytz timezone as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': 'Europe/Paris'})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'}))
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template("{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
@skipIf(pytz is None, "this test requires pytz")
def test_get_current_timezone_templatetag_with_pytz(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
with timezone.override(pytz.timezone('Europe/Paris')):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template("{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.core.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
self.assertEqual(tpl.render(Context()), "")
self.assertEqual(tpl.render(RequestContext(HttpRequest())), "Africa/Nairobi" if pytz else "EAT")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template("{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}{% endlocaltime %}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
def test_localtime_with_time_zone_setting_set_to_none(self):
# Regression for #17274
tpl = Template("{% load tz %}{{ dt }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)})
with self.settings(TIME_ZONE=None):
# the actual value depends on the system time zone of the host
self.assertTrue(tpl.render(ctx).startswith("2011"))
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template("{% now \"O\" %}")
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False)
class LegacyFormsTests(TestCase):
def test_form(self):
form = EventForm({'dt': u'2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_non_existent_time(self):
form = EventForm({'dt': u'2011-03-27 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_ambiguous_time(self):
form = EventForm({'dt': u'2011-10-30 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0))
def test_split_form(self):
form = EventSplitForm({'dt_0': u'2011-09-01', 'dt_1': u'13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
def test_model_form(self):
EventModelForm({'dt': u'2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class NewFormsTests(TestCase):
@requires_tz_support
def test_form(self):
form = EventForm({'dt': u'2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_other_timezone(self):
form = EventForm({'dt': u'2011-09-01 17:20:30'})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_non_existent_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': u'2011-03-27 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
[u"2011-03-27 02:30:00 couldn't be interpreted in time zone "
u"Europe/Paris; it may be ambiguous or it may not exist."])
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_ambiguous_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': u'2011-10-30 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
[u"2011-10-30 02:30:00 couldn't be interpreted in time zone "
u"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({'dt_0': u'2011-09-01', 'dt_1': u'13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_model_form(self):
EventModelForm({'dt': u'2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminTests(TestCase):
urls = 'modeltests.timezones.urls'
fixtures = ['tz_users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(ICT).isoformat())
@override_settings(TIME_ZONE='Africa/Nairobi')
class UtilitiesTests(TestCase):
def test_make_aware(self):
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
)
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 10, 20, 30), UTC),
datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
)
def test_make_naive(self):
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30)
)
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), UTC),
datetime.datetime(2011, 9, 1, 10, 20, 30)
)
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC), UTC),
datetime.datetime(2011, 9, 1, 10, 20, 30)
)
|
{
"content_hash": "9c442958b784669a4a22e73611623ccd",
"timestamp": "",
"source": "github",
"line_count": 1031,
"max_line_length": 146,
"avg_line_length": 47.99321047526673,
"alnum_prop": 0.6171257654453225,
"repo_name": "mitar/django",
"id": "bd9c7658f92492d4a1abe25ea7683e18814463fd",
"size": "49481",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/modeltests/timezones/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "49825"
},
{
"name": "JavaScript",
"bytes": "89027"
},
{
"name": "Python",
"bytes": "8055913"
},
{
"name": "Shell",
"bytes": "11901"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
from oslo_log import log as logging
# Import extensions to pull in osapi_compute_extension CONF option used below.
from jacket.tests.compute.functional import api_paste_fixture
from jacket.tests.compute.functional import integrated_helpers
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ExtensionsTest(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2'
def setUp(self):
self.useFixture(api_paste_fixture.ApiPasteLegacyV2Fixture())
super(ExtensionsTest, self).setUp()
def _get_flags(self):
f = super(ExtensionsTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'compute.tests.unit.api.openstack.compute.legacy_v2.extensions.'
'foxinsocks.Foxinsocks')
return f
def test_get_foxnsocks(self):
# Simple check that fox-n-socks works.
response = self.api.api_request('/foxnsocks')
foxnsocks = response.content
LOG.debug("foxnsocks: %s" % foxnsocks)
self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks)
|
{
"content_hash": "5aa8beeb904e4f87c28a8aacd1db6814",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 78,
"avg_line_length": 36.46875,
"alnum_prop": 0.689802913453299,
"repo_name": "HybridF5/jacket",
"id": "723db993425f8f61a7d6208a444d371f849f2ab6",
"size": "1803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/tests/compute/functional/test_extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
}
|
import numpy as np
def weighted_percentile(a, q, weights=None, sorter=None):
"""
Returns the weighted percentile of a at q given weights.
Parameters
----------
a: array-like, shape=(n_samples,)
samples at which the quantile.
q: int
quantile.
weights: array-like, shape=(n_samples,)
weights[i] is the weight given to point a[i] while computing the
quantile. If weights[i] is zero, a[i] is simply ignored during the
percentile computation.
sorter: array-like, shape=(n_samples,)
If provided, assume that a[sorter] is sorted.
Returns
-------
percentile: float
Weighted percentile of a at q.
References
----------
1. https://en.wikipedia.org/wiki/Percentile#The_Weighted_Percentile_method
Notes
-----
Note that weighted_percentile(a, q) is not equivalent to
np.percentile(a, q). This is because in np.percentile
sorted(a)[i] is assumed to be at quantile 0.0, while here we assume
sorted(a)[i] is given a weight of 1.0 / len(a), hence it is at the
1.0 / len(a)th quantile.
"""
if weights is None:
weights = np.ones_like(a)
if q > 100 or q < 0:
raise ValueError("q should be in-between 0 and 100, "
"got %d" % q)
a = np.asarray(a, dtype=np.float32)
weights = np.asarray(weights, dtype=np.float32)
if len(a) != len(weights):
raise ValueError("a and weights should have the same length.")
if sorter is not None:
a = a[sorter]
weights = weights[sorter]
nz = weights != 0
a = a[nz]
weights = weights[nz]
if sorter is None:
sorted_indices = np.argsort(a)
sorted_a = a[sorted_indices]
sorted_weights = weights[sorted_indices]
else:
sorted_a = a
sorted_weights = weights
# Step 1
sorted_cum_weights = np.cumsum(sorted_weights)
total = sorted_cum_weights[-1]
# Step 2
partial_sum = 100.0 / total * (sorted_cum_weights - sorted_weights / 2.0)
start = np.searchsorted(partial_sum, q) - 1
if start == len(sorted_cum_weights) - 1:
return sorted_a[-1]
if start == -1:
return sorted_a[0]
# Step 3.
fraction = (q - partial_sum[start]) / (partial_sum[start + 1] - partial_sum[start])
return sorted_a[start] + fraction * (sorted_a[start + 1] - sorted_a[start])
|
{
"content_hash": "a9682e9ff3b814bfd68726d340d77f34",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 87,
"avg_line_length": 29.703703703703702,
"alnum_prop": 0.5997506234413965,
"repo_name": "MechCoder/scikit-garden",
"id": "dae425f63419542ca1f72b7d691064737d1b9879",
"size": "2406",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "skgarden/quantile/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "270129"
},
{
"name": "Shell",
"bytes": "3218"
}
],
"symlink_target": ""
}
|
"""Tests for config_lib classes."""
import os
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import test_lib
class YamlConfigTest(test_lib.GRRBaseTest):
"""Test the Yaml config file support."""
def testParsing(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_list("Section1.test_list", ["a", "b"], "A test integer.")
conf.DEFINE_integer("Section1.test", 0, "An integer")
conf.Initialize(parser=config_lib.YamlParser, data="""
# Configuration options can be written as long hand, dot separated parameters.
Section1.test: 2
Section1.test_list: x,y
Section2.test: 3%(Section1.test)
Client Context:
Section1.test: 6
Section1.test2: 1
Windows Context:
Section1.test: 10
Windows Context:
Section1.test: 5
Section1.test2: 2
""")
self.assertEquals(conf["Section1.test"], 2)
# Test interpolation works.
self.assertEquals(conf["Section2.test"], "32")
self.assertEquals(conf["Section1.test_list"], ["x", "y"])
self.assertEquals(conf.Get("Section1.test_list",
context=["Client Context", "Windows Context"]),
["x", "y"])
# Test that contexts affect option selection.
self.assertEquals(
conf.Get("Section1.test", context=["Client Context"]), 6)
self.assertEquals(
conf.Get("Section1.test", context=["Windows Context"]), 5)
context = ["Client Context", "Windows Context"]
self.assertEquals(
conf.Get("Section1.test", context=context), 10)
context = ["Windows Context", "Client Context"]
# Order of the context parameters should not matter.
self.assertEquals(
conf.Get("Section1.test", context=context), 10)
def testConflictingContexts(self):
"""Test that conflicting contexts are resolved by precedence."""
conf = config_lib.GrrConfigManager()
conf.DEFINE_integer("Section1.test", 0, "An integer")
conf.Initialize(parser=config_lib.YamlParser, data="""
Section1.test: 2
Client Context:
Section1.test: 6
Platform:Windows:
Section1.test: 10
Extra Context:
Section1.test: 15
""")
# Without contexts.
self.assertEquals(conf.Get("Section1.test"), 2)
# When running in the client context only.
self.assertEquals(conf.Get("Section1.test", context=["Client Context"]), 6)
# Later defined contexts (i.e. with later calls to AddContext()) are
# stronger than earlier contexts. For example, contexts set the command line
# --context option are stronger than contexts set by the running binary,
# since they are added last.
self.assertEquals(
conf.Get("Section1.test",
context=["Client Context", "Platform:Windows"]),
10)
self.assertEquals(
conf.Get("Section1.test",
context=["Platform:Windows", "Client Context"]),
6)
def testBackslashes(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("Section1.parameter", "", "A test.")
conf.DEFINE_string("Section1.parameter2", "", "A test.")
conf.DEFINE_string("Section1.parameter3", "", "A test.")
conf.Initialize(parser=config_lib.YamlParser, data=r"""
Section1.parameter: |
a\\b\\c\\d
Section1.parameter2: |
%(parameter)\\e
Section1.parameter3: |
\%(a\\b\\c\\d\)
""")
self.assertEqual(conf.Get("Section1.parameter"), "a\\b\\c\\d")
self.assertEqual(conf.Get("Section1.parameter2"), "a\\b\\c\\d\\e")
self.assertEqual(conf.Get("Section1.parameter3"), "%(a\\b\\c\\d)")
class ConfigLibTest(test_lib.GRRBaseTest):
"""Tests for config functionality."""
def testInit(self):
"""Testing initialization of a ConfigManager."""
conf = config_lib.CONFIG
# Check that the linux client have a different value from the windows
# client.
self.assertEquals(conf.Get("MemoryDriver.device_path",
context=("Client", "Platform:Linux")),
"/dev/pmem")
self.assertEquals(conf.Get("MemoryDriver.device_path",
context=("Client", "Platform:Windows")),
r"\\.\pmem")
def testSet(self):
"""Test setting options."""
# Test access methods.
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("NewSection1.new_option1", "Default Value", "Help")
conf.Set("NewSection1.new_option1", "New Value1")
self.assertEquals(conf["NewSection1.new_option1"], "New Value1")
def testSave(self):
"""Save the config and ensure it still works."""
conf = config_lib.GrrConfigManager()
config_file = os.path.join(self.temp_dir, "writeback.yaml")
conf.SetWriteBack(config_file)
conf.DEFINE_string("NewSection1.new_option1", "Default Value", "Help")
conf.Set("NewSection1.new_option1", "New Value1")
conf.Write()
new_conf = config_lib.GrrConfigManager()
new_conf.Initialize(config_file)
self.assertEquals(new_conf["NewSection1.new_option1"], "New Value1")
def testErrorDetection(self):
"""Check that invalid config files are detected immediately."""
test_conf = """
[Section1]
test = val2"""
conf = config_lib.GrrConfigManager()
# Define test as an integer.
conf.DEFINE_integer("Section1.test", 54, "A test integer.")
conf.Initialize(data=test_conf)
# This should raise since the config file is incorrect.
errors = conf.Validate("Section1")
self.assertTrue(
"Invalid value val2 for Integer" in str(errors["Section1.test"]))
def testEmptyClientPrivateKey(self):
"""Check an empty client private_key passes."""
# Clone a test config object from the global config so it knows about Client
# options.
conf = config_lib.CONFIG.MakeNewConfig()
conf.Initialize(data="""
[Client]
private_key =
driver_signing_public_key = -----BEGIN PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALnfFW1FffeKPs5PLUhFOSkNrr9TDCOD
QAI3WluLh0sW7/ro93eoIZ0FbipnTpzGkPpriONbSOXmxWNTo0b9ma8CAwEAAQ==
-----END PUBLIC KEY-----
executable_signing_public_key = -----BEGIN PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALnfFW1FffeKPs5PLUhFOSkNrr9TDCOD
QAI3WluLh0sW7/ro93eoIZ0FbipnTpzGkPpriONbSOXmxWNTo0b9ma8CAwEAAQ==
-----END PUBLIC KEY-----
""")
errors = conf.Validate(["Client"])
self.assertItemsEqual(errors.keys(), [])
def testEmptyClientKeys(self):
"""Check that empty other keys fail."""
conf = config_lib.CONFIG.MakeNewConfig()
conf.Initialize(data="""
[Client]
private_key =
driver_signing_public_key =
executable_signing_public_key =
certificate =
""")
errors = conf.Validate(["Client"])
self.assertItemsEqual(errors.keys(),
["Client.driver_signing_public_key",
"Client.executable_signing_public_key"])
def testAddOption(self):
"""Test that we can add options."""
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("Section1.foobar", "test", "A test string.")
conf.DEFINE_string("Section1.test", "test", "A test string.")
conf.DEFINE_string("Section1.interpolated", "", "An interpolated string.")
# This entry is not correct - the default is invalid.
conf.DEFINE_integer("Section1.test_int", "string", "A test integer.")
# The default value is invalid.
errors = conf.Validate("Section1")
self.assertTrue(
"Invalid value string for Integer" in str(errors["Section1.test_int"]))
conf.DEFINE_string("Section1.system", None, "The basic operating system.")
conf.DEFINE_integer("Section1.test_int", 54, "A test integer.")
conf.DEFINE_list("Section1.test_list", ["a", "b"], "A test integer.")
conf.DEFINE_list("Section1.test_list2", ["a", "b"], "A test integer.")
conf.Initialize(data="""
[Section1]
foobar = X
test_list = x,y
[Section2]
test_int = 34
interpolated = %(Section1.foobar)Y
[Section3]
test_int = 1
interpolated = %(%(Section1.foobar)|lower)Y
""")
# Section not specified:
self.assertRaises(config_lib.UnknownOption, conf.__getitem__, "a")
# Test direct access.
self.assertEquals(conf["Section1.foobar"], "X")
self.assertEquals(conf["Section1.test_list"], ["x", "y"])
self.assertEquals(conf["Section1.test_list2"], ["a", "b"])
# Test default access.
self.assertEquals(conf["Section1.test"], "test")
# Test interpolation with full section name.
self.assertEquals(conf["Section2.interpolated"], "XY")
# Check that default values are typed.
self.assertEquals(conf["Section1.test_int"], 54)
# Test filter functions.
self.assertEquals(conf["Section3.interpolated"], "xY")
def testUnbalancedParenthesis(self):
conf = config_lib.GrrConfigManager()
conf.Initialize(data=r"""
[Section1]
foobar = X
foo = %(Section1.foobar)
foo1 = %(foo
# Unbalanced parenthesis
foo2 = foo)
# Unbalanced parenthesis is ok if escaped.
foo3 = foo\)
# Or if enclosed in a literal block.
foo6 = %{foo)}
foo4 = %{%(hello)}
foo5 = %{hello
# Literal blocks can also appear inside filter interpolations to prevent
# automatic expansions.
# This pull the environment variable "sectionX"
interpolation1 = %(section%(Section1.foobar)|env)
# But this means literally section%(Section1.foo):
interpolation2 = %(section%{%(Section1.foo)}|env)
""")
# Test direct access.
self.assertEquals(conf["Section1.foo"], "X")
self.assertRaises(config_lib.ConfigFormatError,
conf.__getitem__, "Section1.foo1")
self.assertRaises(config_lib.ConfigFormatError,
conf.__getitem__, "Section1.foo2")
self.assertEquals(conf["Section1.foo3"], "foo)")
# Test literal expansion.
self.assertEquals(conf["Section1.foo4"], "%(hello)")
self.assertRaises(config_lib.ConfigFormatError,
conf.__getitem__, "Section1.foo5")
self.assertEquals(conf["Section1.foo6"], "foo)")
# The Env filter forces uppercase on args.
os.environ["sectionX".upper()] = "1"
os.environ["section%(Section1.foo)".upper()] = "2"
self.assertEquals(conf["Section1.interpolation1"], "1")
self.assertEquals(conf["Section1.interpolation2"], "2")
# Test that Set() escapes - i.e. reading the value back will return exactly
# the same as we wrote:
conf.Set("Section1.foo6", "%(Section1.foo3)")
self.assertEquals(conf["Section1.foo6"], "%(Section1.foo3)")
self.assertEquals(conf.GetRaw("Section1.foo6"), r"\%(Section1.foo3\)")
# OTOH when we write it raw, reading it back will interpolate:
conf.SetRaw("Section1.foo6", "%(Section1.foo3)")
self.assertEquals(conf["Section1.foo6"], "foo)")
def testDataTypes(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_float("Section1.float", 0, "A float")
conf.Initialize(parser=config_lib.YamlParser, data="Section1.float: abc")
errors = conf.Validate("Section1")
self.assertTrue(
"Invalid value abc for Float" in str(errors["Section1.float"]))
self.assertRaises(config_lib.ConfigFormatError, conf.Get, "Section1.float")
conf.Initialize(parser=config_lib.YamlParser, data="Section1.float: 2")
# Should have no errors now. Validate should normalize the value to a float.
self.assertEquals(conf.Validate("Section1"), {})
self.assertEquals(type(conf.Get("Section1.float")), float)
conf.DEFINE_integer("Section1.int", 0, "An integer")
conf.Initialize(parser=config_lib.YamlParser, data="Section1.int: 2.0")
errors = conf.Validate("Section1")
# Floats can not be coerced to an int because that will lose data.
self.assertTrue(
"Invalid value 2.0 for Integer" in str(errors["Section1.int"]))
# A string can be coerced to an int if it makes sense:
conf.Initialize(parser=config_lib.YamlParser, data="Section1.int: '2'")
errors = conf.Validate("Section1")
self.assertEquals(type(conf.Get("Section1.int")), long)
conf.DEFINE_list("Section1.list", default=[], help="A list")
self.assertEquals(type(conf.Get("Section1.list")), list)
self.assertEquals(conf.Get("Section1.list"), [])
conf.DEFINE_list("Section1.list2", default=["a", "2"], help="A list")
self.assertEquals(type(conf.Get("Section1.list2")), list)
self.assertEquals(conf.Get("Section1.list2"), ["a", "2"])
def main(argv):
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
{
"content_hash": "1c54768a8b188e04c510975ad16e1512",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 80,
"avg_line_length": 31.765463917525775,
"alnum_prop": 0.6666125760649088,
"repo_name": "simsong/grr-insider",
"id": "c3f32f1f36197f163ccf306eceeb65d0150e5b1f",
"size": "12347",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/config_lib_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36308"
},
{
"name": "JavaScript",
"bytes": "679269"
},
{
"name": "Python",
"bytes": "3553249"
},
{
"name": "Shell",
"bytes": "30813"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('asados', '0005_auto_20171023_1311'),
]
operations = [
migrations.RenameField(
model_name='asado',
old_name='estimated_price',
new_name='estimated_cost',
),
]
|
{
"content_hash": "180850b78eeb9a9631e92f0560dc0eb7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 46,
"avg_line_length": 20.555555555555557,
"alnum_prop": 0.5837837837837838,
"repo_name": "TenStrings/organizador-de-asados",
"id": "62d49eedff5eba5669af7b4aa987110e57b51d44",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asados/migrations/0006_auto_20171024_0214.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "977"
},
{
"name": "HTML",
"bytes": "7152"
},
{
"name": "JavaScript",
"bytes": "310"
},
{
"name": "Python",
"bytes": "34651"
}
],
"symlink_target": ""
}
|
"""Test for the database used fo tasks.
"""
from pytest import raises
from exopy.tasks.tasks.database import TaskDatabase
# TODO add tests checking that the notifiers did run properly
# =============================================================================
# --- Edition mode tests ------------------------------------------------------
# =============================================================================
def test_database_nodes():
"""Test all nodes operations.
"""
database = TaskDatabase()
database.create_node('root', 'node1')
database.create_node('root/node1', 'node2')
database.rename_node('root', 'node1', 'n_node1')
database.delete_node('root/n_node1', 'node2')
with raises(KeyError):
database.delete_node('root/n_node1', 'node2')
def test_database_values():
"""Test get/set value operations.
"""
database = TaskDatabase()
assert database.set_value('root', 'val1', 1) is True
assert database.get_value('root', 'val1') == 1
assert database.set_value('root', 'val1', 2) is False
database.create_node('root', 'node1')
database.set_value('root/node1', 'val2', 'a')
assert database.get_value('root/node1', 'val2') == 'a'
assert database.get_value('root/node1', 'val1') == 2
with raises(KeyError):
database.get_value('root/rrtt', 'val')
with raises(KeyError):
database.get_value('root/node1/rr', 'val')
def test_database_delete_value():
"""Test delete value operation.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
assert database.get_value('root', 'val1') == 1
database.delete_value('root', 'val1')
with raises(KeyError):
database.get_value('root', 'val1')
with raises(KeyError):
database.delete_value('root', 'val1')
def test_database_values3():
"""Test accessing a value with the wrong path.
"""
database = TaskDatabase()
database.create_node('root', 'node1')
database.create_node('root/node1', 'node2')
database.set_value('root/node1/node2', 'val1', 1)
assert database.get_value('root/node1/node2', 'val1') == 1
with raises(KeyError):
database.get_value('root/node1', 'val1')
def test_renaming_values():
"""Test renaming values to which no access exs is linked.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
database.rename_values('root', ['val1'], ['new_val'])
with raises(KeyError):
database.get_value('root', 'val1')
assert database.get_value('root', 'new_val') == 1
with raises(KeyError):
database.rename_values('root', ['val1'], ['new_val'])
def test_database_listing():
"""Test database entries listing.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
database.create_node('root', 'node1')
database.set_value('root/node1', 'val2', 'a')
assert database.list_all_entries() == \
sorted(['root/val1', 'root/node1/val2'])
assert database.list_all_entries(values=True) == \
{'root/val1': 1, 'root/node1/val2': 'a'}
assert database.list_accessible_entries('root') == ['val1']
assert database.list_accessible_entries('root/node1') ==\
sorted(['val1', 'val2'])
# Test excluding values from the database.
database.excluded = ['val1']
assert database.list_all_entries() == sorted(['root/node1/val2'])
assert database.list_all_entries(values=True) == {'root/node1/val2': 'a'}
assert database.list_accessible_entries('root') == []
assert database.list_accessible_entries('root/node1') == sorted(['val2'])
def test_access_exceptions():
"""Test access exceptions.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
database.create_node('root', 'node1')
database.set_value('root/node1', 'val2', 'a')
database.create_node('root', 'node2')
database.set_value('root/node2', 'val3', 2.0)
assert database.list_accessible_entries('root') == ['val1']
database.add_access_exception('root', 'root/node1', 'val2')
assert database.list_accessible_entries('root') == ['val1', 'val2']
assert database.get_value('root', 'val2') == 'a'
database.add_access_exception('root', 'root/node2', 'val3')
assert database.list_accessible_entries('root') == ['val1', 'val2', 'val3']
assert database.get_value('root', 'val3') == 2.0
database.remove_access_exception('root', 'val2')
assert database.list_accessible_entries('root') == ['val1', 'val3']
database.remove_access_exception('root')
assert database.list_accessible_entries('root') == ['val1']
def test_access_exceptions_renaming_values():
"""Test renaming values linked to an access ex.
"""
database = TaskDatabase()
database.create_node('root', 'node1')
database.set_value('root/node1', 'val1', 2.0)
database.add_access_exception('root', 'root/node1', 'val1')
database.rename_values('root/node1', ['val1'], ['new_val'], {'val1': 1})
assert database.get_value('root', 'new_val') == 2.0
def test_access_exceptions_renaming_node():
"""Test renaming a node holding an access exception.
The relative path is exactly the name of the renamed node.
"""
database = TaskDatabase()
database.create_node('root', 'node1')
database.create_node('root/node1', 'node2')
database.set_value('root/node1/node2', 'val1', 2.0)
database.add_access_exception('root/node1', 'root/node1/node2', 'val1')
assert database.get_value('root/node1', 'val1') == 2.0
database.rename_node('root/node1', 'node2', 'node22')
assert database.get_value('root/node1', 'val1') == 2.0
database.rename_node('root', 'node1', 'node11')
assert database.get_value('root/node11', 'val1') == 2.0
def test_copy_node_values():
"""Test copying the values found in a node.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
database.create_node('root', 'node1')
database.set_value('root/node1', 'val2', 'a')
database.create_node('root', 'node2')
database.set_value('root/node2', 'val3', 2.0)
assert database.copy_node_values() == {'val1': 1}
assert database.copy_node_values('root/node1') == {'val2': 'a'}
def test_list_nodes():
"""Test listing the nodes existing in the database.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
database.create_node('root', 'node1')
database.set_value('root/node1', 'val2', 'a')
database.create_node('root', 'node2')
database.set_value('root/node2', 'val3', 2.0)
nodes = database.list_nodes()
assert 'root' in nodes
assert 'root/node1' in nodes
assert 'root/node2' in nodes
# =============================================================================
# --- Running mode tests ------------------------------------------------------
# =============================================================================
def test_forbidden_operations():
"""Check that all forbidden operations does raise a RuntimeError.
"""
database = TaskDatabase()
database.prepare_to_run()
with raises(RuntimeError):
database.rename_values('root', [], [])
with raises(RuntimeError):
database.delete_value('root', '')
with raises(RuntimeError):
database.create_node('root', '')
with raises(RuntimeError):
database.rename_node('root', '', '')
with raises(RuntimeError):
database.delete_node('root', '')
def test_flattening_database():
"""Check that the database can be flattened.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
database.create_node('root', 'node1')
database.set_value('root/node1', 'val2', 'a')
database.prepare_to_run()
def test_index_op_on_flat_database1():
"""Test operation on flat database relying on indexes.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
database.create_node('root', 'node1')
database.set_value('root/node1', 'val2', 'a')
database.create_node('root/node1', 'node2')
database.prepare_to_run()
assert database.get_entries_indexes('root', ['val1']) == {'val1': 0}
assert database.get_entries_indexes('root/node1', ['val1', 'val2']) == \
{'val1': 0, 'val2': 1}
assert database.get_entries_indexes('root/node1/node2', ['val2']) == \
{'val2': 1}
assert database.get_values_by_index([0, 1]) == [1, 'a']
assert database.get_values_by_index([0, 1], 'e_') == \
{'e_0': 1, 'e_1': 'a'}
with raises(KeyError):
database.get_entries_indexes('root/rr', [''])
def test_index_op_on_flat_database2():
"""Test operation on flat database relying on indexes when a simple access
ex exists.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
database.create_node('root', 'node1')
database.set_value('root/node1', 'val2', 'a')
database.add_access_exception('root', 'root/node1', 'val2')
database.prepare_to_run()
assert database.get_entries_indexes('root', ['val1']) == {'val1': 0}
assert database.get_entries_indexes('root', ['val1', 'val2']) == \
{'val1': 0, 'val2': 1}
def test_index_op_on_flat_database3():
"""Test operation on flat database relying on indexes when a nested access
ex exists.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
database.create_node('root', 'node1')
database.create_node('root/node1', 'node2')
database.set_value('root/node1/node2', 'val2', 'a')
database.add_access_exception('root/node1', 'root/node1/node2', 'val2')
database.add_access_exception('root', 'root/node1', 'val2')
database.prepare_to_run()
assert database.get_entries_indexes('root', ['val1']) == {'val1': 0}
assert database.get_entries_indexes('root', ['val1', 'val2']) == \
{'val1': 0, 'val2': 1}
def test_get_set_on_flat_database1():
"""Test get/set operations on flat database using names.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
database.create_node('root', 'node1')
database.set_value('root/node1', 'val2', 'a')
database.prepare_to_run()
assert not database.set_value('root', 'val1', 2)
assert database.get_value('root', 'val1') == 2
assert database.get_value('root/node1', 'val1') == 2
def test_get_set_on_flat_database2():
"""Test get/set operations on flat database using names when an access ex
exists.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
database.create_node('root', 'node1')
database.set_value('root/node1', 'val2', 'a')
database.add_access_exception('root', 'root/node1', 'val2')
database.prepare_to_run()
assert not database.set_value('root', 'val2', 2)
assert database.get_value('root', 'val2') == 2
def test_get_set_on_flat_database3():
"""Test get/set operations on flat database using names when a nested
access ex exists.
"""
database = TaskDatabase()
database.set_value('root', 'val1', 1)
database.create_node('root', 'node1')
database.create_node('root/node1', 'node2')
database.set_value('root/node1/node2', 'val2', 'a')
database.add_access_exception('root/node1', 'root/node1/node2', 'val2')
database.add_access_exception('root', 'root/node1', 'val2')
database.prepare_to_run()
assert not database.set_value('root', 'val2', 2)
assert database.get_value('root', 'val2') == 2
assert not database.set_value('root/node1', 'val2', 2)
assert database.get_value('root/node1', 'val2') == 2
|
{
"content_hash": "a1bc4dcb52e4e54ebba72a95be873f83",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 79,
"avg_line_length": 33.48424068767908,
"alnum_prop": 0.609532774259798,
"repo_name": "Ecpy/ecpy",
"id": "f5ad6b76fde6f6a2bc121ebfb82fbe12360defda",
"size": "12069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tasks/tasks/test_database.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "162"
},
{
"name": "Python",
"bytes": "1344669"
},
{
"name": "Shell",
"bytes": "420"
}
],
"symlink_target": ""
}
|
"""
Support for the Netatmo cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.netatmo/.
"""
import logging
import requests
import voluptuous as vol
from homeassistant.const import CONF_VERIFY_SSL
from homeassistant.components.netatmo import CameraData
from homeassistant.components.camera import (Camera, PLATFORM_SCHEMA)
from homeassistant.helpers import config_validation as cv
DEPENDENCIES = ['netatmo']
_LOGGER = logging.getLogger(__name__)
CONF_HOME = 'home'
CONF_CAMERAS = 'cameras'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_HOME): cv.string,
vol.Optional(CONF_CAMERAS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up access to Netatmo cameras."""
netatmo = hass.components.netatmo
home = config.get(CONF_HOME)
verify_ssl = config.get(CONF_VERIFY_SSL, True)
import pyatmo
try:
data = CameraData(netatmo.NETATMO_AUTH, home)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
add_entities([NetatmoCamera(data, camera_name, home,
camera_type, verify_ssl)])
except pyatmo.NoDevice:
return None
class NetatmoCamera(Camera):
"""Representation of the images published from a Netatmo camera."""
def __init__(self, data, camera_name, home, camera_type, verify_ssl):
"""Set up for access to the Netatmo camera images."""
super(NetatmoCamera, self).__init__()
self._data = data
self._camera_name = camera_name
self._verify_ssl = verify_ssl
if home:
self._name = home + ' / ' + camera_name
else:
self._name = camera_name
self._vpnurl, self._localurl = self._data.camera_data.cameraUrls(
camera=camera_name
)
self._cameratype = camera_type
def camera_image(self):
"""Return a still image response from the camera."""
try:
if self._localurl:
response = requests.get('{0}/live/snapshot_720.jpg'.format(
self._localurl), timeout=10)
elif self._vpnurl:
response = requests.get('{0}/live/snapshot_720.jpg'.format(
self._vpnurl), timeout=10, verify=self._verify_ssl)
else:
_LOGGER.error("Welcome VPN URL is None")
self._data.update()
(self._vpnurl, self._localurl) = \
self._data.camera_data.cameraUrls(camera=self._camera_name)
return None
except requests.exceptions.RequestException as error:
_LOGGER.error("Welcome URL changed: %s", error)
self._data.update()
(self._vpnurl, self._localurl) = \
self._data.camera_data.cameraUrls(camera=self._camera_name)
return None
return response.content
@property
def name(self):
"""Return the name of this Netatmo camera device."""
return self._name
@property
def brand(self):
"""Return the camera brand."""
return "Netatmo"
@property
def model(self):
"""Return the camera model."""
if self._cameratype == "NOC":
return "Presence"
if self._cameratype == "NACamera":
return "Welcome"
return None
|
{
"content_hash": "014d2b5a714f8b44e8703ec2f3548caa",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 79,
"avg_line_length": 34.40909090909091,
"alnum_prop": 0.6015852047556143,
"repo_name": "PetePriority/home-assistant",
"id": "93ad2cd055b7cddd91ffc657985a25bff8cc1d97",
"size": "3785",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/netatmo/camera.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1073"
},
{
"name": "Python",
"bytes": "13985647"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
}
|
"""
WSGI config for vales project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vales.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "754f317d5503138a6b9ca4ad8ebeb3af",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.5,
"alnum_prop": 0.7443609022556391,
"repo_name": "diego-d5000/MisValesMd",
"id": "b9786cde4849b58ea47cd15fe0d5a7056fd345eb",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vales/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "115465"
},
{
"name": "Groff",
"bytes": "22"
},
{
"name": "HTML",
"bytes": "1415583"
},
{
"name": "JavaScript",
"bytes": "1381588"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "8107650"
},
{
"name": "Shell",
"bytes": "11786"
}
],
"symlink_target": ""
}
|
from connector import channel
from google3.cloud.graphite.mmv2.services.google.dataproc import autoscaling_policy_pb2
from google3.cloud.graphite.mmv2.services.google.dataproc import (
autoscaling_policy_pb2_grpc,
)
from typing import List
class AutoscalingPolicy(object):
def __init__(
self,
name: str = None,
basic_algorithm: dict = None,
worker_config: dict = None,
secondary_worker_config: dict = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.basic_algorithm = basic_algorithm
self.worker_config = worker_config
self.secondary_worker_config = secondary_worker_config
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = autoscaling_policy_pb2_grpc.DataprocAlphaAutoscalingPolicyServiceStub(
channel.Channel()
)
request = autoscaling_policy_pb2.ApplyDataprocAlphaAutoscalingPolicyRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if AutoscalingPolicyBasicAlgorithm.to_proto(self.basic_algorithm):
request.resource.basic_algorithm.CopyFrom(
AutoscalingPolicyBasicAlgorithm.to_proto(self.basic_algorithm)
)
else:
request.resource.ClearField("basic_algorithm")
if AutoscalingPolicyWorkerConfig.to_proto(self.worker_config):
request.resource.worker_config.CopyFrom(
AutoscalingPolicyWorkerConfig.to_proto(self.worker_config)
)
else:
request.resource.ClearField("worker_config")
if AutoscalingPolicySecondaryWorkerConfig.to_proto(
self.secondary_worker_config
):
request.resource.secondary_worker_config.CopyFrom(
AutoscalingPolicySecondaryWorkerConfig.to_proto(
self.secondary_worker_config
)
)
else:
request.resource.ClearField("secondary_worker_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyDataprocAlphaAutoscalingPolicy(request)
self.name = Primitive.from_proto(response.name)
self.basic_algorithm = AutoscalingPolicyBasicAlgorithm.from_proto(
response.basic_algorithm
)
self.worker_config = AutoscalingPolicyWorkerConfig.from_proto(
response.worker_config
)
self.secondary_worker_config = (
AutoscalingPolicySecondaryWorkerConfig.from_proto(
response.secondary_worker_config
)
)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = autoscaling_policy_pb2_grpc.DataprocAlphaAutoscalingPolicyServiceStub(
channel.Channel()
)
request = autoscaling_policy_pb2.DeleteDataprocAlphaAutoscalingPolicyRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if AutoscalingPolicyBasicAlgorithm.to_proto(self.basic_algorithm):
request.resource.basic_algorithm.CopyFrom(
AutoscalingPolicyBasicAlgorithm.to_proto(self.basic_algorithm)
)
else:
request.resource.ClearField("basic_algorithm")
if AutoscalingPolicyWorkerConfig.to_proto(self.worker_config):
request.resource.worker_config.CopyFrom(
AutoscalingPolicyWorkerConfig.to_proto(self.worker_config)
)
else:
request.resource.ClearField("worker_config")
if AutoscalingPolicySecondaryWorkerConfig.to_proto(
self.secondary_worker_config
):
request.resource.secondary_worker_config.CopyFrom(
AutoscalingPolicySecondaryWorkerConfig.to_proto(
self.secondary_worker_config
)
)
else:
request.resource.ClearField("secondary_worker_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteDataprocAlphaAutoscalingPolicy(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = autoscaling_policy_pb2_grpc.DataprocAlphaAutoscalingPolicyServiceStub(
channel.Channel()
)
request = autoscaling_policy_pb2.ListDataprocAlphaAutoscalingPolicyRequest()
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListDataprocAlphaAutoscalingPolicy(request).items
def to_proto(self):
resource = autoscaling_policy_pb2.DataprocAlphaAutoscalingPolicy()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if AutoscalingPolicyBasicAlgorithm.to_proto(self.basic_algorithm):
resource.basic_algorithm.CopyFrom(
AutoscalingPolicyBasicAlgorithm.to_proto(self.basic_algorithm)
)
else:
resource.ClearField("basic_algorithm")
if AutoscalingPolicyWorkerConfig.to_proto(self.worker_config):
resource.worker_config.CopyFrom(
AutoscalingPolicyWorkerConfig.to_proto(self.worker_config)
)
else:
resource.ClearField("worker_config")
if AutoscalingPolicySecondaryWorkerConfig.to_proto(
self.secondary_worker_config
):
resource.secondary_worker_config.CopyFrom(
AutoscalingPolicySecondaryWorkerConfig.to_proto(
self.secondary_worker_config
)
)
else:
resource.ClearField("secondary_worker_config")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class AutoscalingPolicyBasicAlgorithm(object):
def __init__(self, yarn_config: dict = None, cooldown_period: str = None):
self.yarn_config = yarn_config
self.cooldown_period = cooldown_period
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = autoscaling_policy_pb2.DataprocAlphaAutoscalingPolicyBasicAlgorithm()
if AutoscalingPolicyBasicAlgorithmYarnConfig.to_proto(resource.yarn_config):
res.yarn_config.CopyFrom(
AutoscalingPolicyBasicAlgorithmYarnConfig.to_proto(resource.yarn_config)
)
else:
res.ClearField("yarn_config")
if Primitive.to_proto(resource.cooldown_period):
res.cooldown_period = Primitive.to_proto(resource.cooldown_period)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return AutoscalingPolicyBasicAlgorithm(
yarn_config=AutoscalingPolicyBasicAlgorithmYarnConfig.from_proto(
resource.yarn_config
),
cooldown_period=Primitive.from_proto(resource.cooldown_period),
)
class AutoscalingPolicyBasicAlgorithmArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [AutoscalingPolicyBasicAlgorithm.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [AutoscalingPolicyBasicAlgorithm.from_proto(i) for i in resources]
class AutoscalingPolicyBasicAlgorithmYarnConfig(object):
def __init__(
self,
graceful_decommission_timeout: str = None,
scale_up_factor: float = None,
scale_down_factor: float = None,
scale_up_min_worker_fraction: float = None,
scale_down_min_worker_fraction: float = None,
):
self.graceful_decommission_timeout = graceful_decommission_timeout
self.scale_up_factor = scale_up_factor
self.scale_down_factor = scale_down_factor
self.scale_up_min_worker_fraction = scale_up_min_worker_fraction
self.scale_down_min_worker_fraction = scale_down_min_worker_fraction
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
autoscaling_policy_pb2.DataprocAlphaAutoscalingPolicyBasicAlgorithmYarnConfig()
)
if Primitive.to_proto(resource.graceful_decommission_timeout):
res.graceful_decommission_timeout = Primitive.to_proto(
resource.graceful_decommission_timeout
)
if Primitive.to_proto(resource.scale_up_factor):
res.scale_up_factor = Primitive.to_proto(resource.scale_up_factor)
if Primitive.to_proto(resource.scale_down_factor):
res.scale_down_factor = Primitive.to_proto(resource.scale_down_factor)
if Primitive.to_proto(resource.scale_up_min_worker_fraction):
res.scale_up_min_worker_fraction = Primitive.to_proto(
resource.scale_up_min_worker_fraction
)
if Primitive.to_proto(resource.scale_down_min_worker_fraction):
res.scale_down_min_worker_fraction = Primitive.to_proto(
resource.scale_down_min_worker_fraction
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return AutoscalingPolicyBasicAlgorithmYarnConfig(
graceful_decommission_timeout=Primitive.from_proto(
resource.graceful_decommission_timeout
),
scale_up_factor=Primitive.from_proto(resource.scale_up_factor),
scale_down_factor=Primitive.from_proto(resource.scale_down_factor),
scale_up_min_worker_fraction=Primitive.from_proto(
resource.scale_up_min_worker_fraction
),
scale_down_min_worker_fraction=Primitive.from_proto(
resource.scale_down_min_worker_fraction
),
)
class AutoscalingPolicyBasicAlgorithmYarnConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
AutoscalingPolicyBasicAlgorithmYarnConfig.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
AutoscalingPolicyBasicAlgorithmYarnConfig.from_proto(i) for i in resources
]
class AutoscalingPolicyWorkerConfig(object):
def __init__(
self, min_instances: int = None, max_instances: int = None, weight: int = None
):
self.min_instances = min_instances
self.max_instances = max_instances
self.weight = weight
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = autoscaling_policy_pb2.DataprocAlphaAutoscalingPolicyWorkerConfig()
if Primitive.to_proto(resource.min_instances):
res.min_instances = Primitive.to_proto(resource.min_instances)
if Primitive.to_proto(resource.max_instances):
res.max_instances = Primitive.to_proto(resource.max_instances)
if Primitive.to_proto(resource.weight):
res.weight = Primitive.to_proto(resource.weight)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return AutoscalingPolicyWorkerConfig(
min_instances=Primitive.from_proto(resource.min_instances),
max_instances=Primitive.from_proto(resource.max_instances),
weight=Primitive.from_proto(resource.weight),
)
class AutoscalingPolicyWorkerConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [AutoscalingPolicyWorkerConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [AutoscalingPolicyWorkerConfig.from_proto(i) for i in resources]
class AutoscalingPolicySecondaryWorkerConfig(object):
def __init__(
self, min_instances: int = None, max_instances: int = None, weight: int = None
):
self.min_instances = min_instances
self.max_instances = max_instances
self.weight = weight
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
autoscaling_policy_pb2.DataprocAlphaAutoscalingPolicySecondaryWorkerConfig()
)
if Primitive.to_proto(resource.min_instances):
res.min_instances = Primitive.to_proto(resource.min_instances)
if Primitive.to_proto(resource.max_instances):
res.max_instances = Primitive.to_proto(resource.max_instances)
if Primitive.to_proto(resource.weight):
res.weight = Primitive.to_proto(resource.weight)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return AutoscalingPolicySecondaryWorkerConfig(
min_instances=Primitive.from_proto(resource.min_instances),
max_instances=Primitive.from_proto(resource.max_instances),
weight=Primitive.from_proto(resource.weight),
)
class AutoscalingPolicySecondaryWorkerConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [AutoscalingPolicySecondaryWorkerConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [AutoscalingPolicySecondaryWorkerConfig.from_proto(i) for i in resources]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
{
"content_hash": "9e7a16676eabed0e645090884a02d11c",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 91,
"avg_line_length": 37.26767676767677,
"alnum_prop": 0.6503591272530154,
"repo_name": "GoogleCloudPlatform/declarative-resource-client-library",
"id": "29c3ce0cba3297742ffa1feac1f8620b14961acb",
"size": "15357",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/services/dataproc/alpha/autoscaling_policy.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2560"
},
{
"name": "C++",
"bytes": "3947"
},
{
"name": "Go",
"bytes": "116489733"
},
{
"name": "Python",
"bytes": "17240408"
},
{
"name": "Starlark",
"bytes": "319733"
}
],
"symlink_target": ""
}
|
"""
SendGrid notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.sendgrid/
"""
import logging
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TITLE_DEFAULT, PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import (CONF_API_KEY, CONF_SENDER, CONF_RECIPIENT)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['sendgrid==3.6.3']
_LOGGER = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_SENDER): vol.Email(),
vol.Required(CONF_RECIPIENT): vol.Email(),
})
def get_service(hass, config):
"""Get the SendGrid notification service."""
api_key = config.get(CONF_API_KEY)
sender = config.get(CONF_SENDER)
recipient = config.get(CONF_RECIPIENT)
return SendgridNotificationService(api_key, sender, recipient)
class SendgridNotificationService(BaseNotificationService):
"""Implementation the notification service for email via Sendgrid."""
def __init__(self, api_key, sender, recipient):
"""Initialize the service."""
from sendgrid import SendGridAPIClient
self.api_key = api_key
self.sender = sender
self.recipient = recipient
self._sg = SendGridAPIClient(apikey=self.api_key)
def send_message(self, message='', **kwargs):
"""Send an email to a user via SendGrid."""
subject = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = {
"personalizations": [
{
"to": [
{
"email": self.recipient
}
],
"subject": subject
}
],
"from": {
"email": self.sender
},
"content": [
{
"type": "text/plain",
"value": message
}
]
}
response = self._sg.client.mail.send.post(request_body=data)
if response.status_code is not 202:
_LOGGER.error('Unable to send notification with SendGrid')
|
{
"content_hash": "ffc685a63b0669ac006b71c8ea873cd9",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 30.03846153846154,
"alnum_prop": 0.5928297055057619,
"repo_name": "ma314smith/home-assistant",
"id": "54f0f4b8cb35335df97aaddc7b0e439372515c31",
"size": "2343",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/notify/sendgrid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1436909"
},
{
"name": "Python",
"bytes": "4511947"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "4460"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
import os
from setuptools import setup, find_packages
# http://peak.telecommunity.com/DevCenter/setuptools#developer-s-guide
# Get version info
__version__ = None
__release__ = None
exec(open('kajiki/version.py').read())
def content_of(*files):
import codecs
open = lambda path: codecs.open(path, encoding='utf-8')
here = os.path.abspath(os.path.dirname(__file__))
content = []
for f in files:
with open(os.path.join(here, f)) as stream:
content.append(stream.read())
return '\n'.join(content)
setup(name='Kajiki',
version=__release__,
description='Fast XML-based template engine with Genshi syntax and '
'Jinja blocks',
long_description=content_of('README.rst', 'CHANGES.rst'),
classifiers=[ # http://pypi.python.org/pypi?:action=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Text Processing :: Markup :: XML',
],
keywords='templating engine template genshi jinja jinja2 mako '
'chameleon xml html xhtml',
author='Rick Copeland',
author_email='rick446@usa.net',
maintainer='Nando Florestan',
maintainer_email='nandoflorestan@gmail.com',
url='https://github.com/nandoflorestan/kajiki',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=['nine'],
test_suite='kajiki.tests',
entry_points="""
[babel.extractors]
kajiki = kajiki.i18n:extract
[python.templating.engines]
kajiki = kajiki.integration.turbogears1:XMLTemplateEnginePlugin
""",
)
|
{
"content_hash": "f34495ff192bfb52c0b6fd81ed0e5cc8",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 75,
"avg_line_length": 38.53731343283582,
"alnum_prop": 0.6134779240898528,
"repo_name": "ollyc/kajiki",
"id": "1c9f22d5151e512a30a087aa6b21ad5b3d2e19e4",
"size": "2629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "154"
},
{
"name": "Genshi",
"bytes": "949"
},
{
"name": "HTML",
"bytes": "7851"
},
{
"name": "Myghty",
"bytes": "774"
},
{
"name": "Python",
"bytes": "220835"
},
{
"name": "Smalltalk",
"bytes": "662"
}
],
"symlink_target": ""
}
|
"""Define tests for endpoints."""
|
{
"content_hash": "03e129129f78e5ff7fae5f97b56640b7",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 33,
"avg_line_length": 34,
"alnum_prop": 0.6764705882352942,
"repo_name": "bachya/regenmaschine",
"id": "45d4fecd64298dd7a27c41c29bc4ca464a012d60",
"size": "34",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/endpoints/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "116689"
},
{
"name": "Shell",
"bytes": "1472"
}
],
"symlink_target": ""
}
|
class Solution(object):
def killProcess(self, pid, ppid, kill):
"""
:type pid: List[int]
:type ppid: List[int]
:type kill: int
:rtype: List[int]
"""
table = collections.defaultdict(list)
for i in range(len(pid)):
table[ppid[i]].append(pid[i])
p = [kill]
children = table[kill]
while children:
temp = []
for c in children:
temp.extend(table[c])
p.append(c)
children = temp
return p
|
{
"content_hash": "895e36045c817e8259d7eee4bde020b6",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 45,
"avg_line_length": 28,
"alnum_prop": 0.4714285714285714,
"repo_name": "Mlieou/lXXtcode",
"id": "676d219b731d7bf54fb6da3fa465af4451dc80ab",
"size": "560",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "leetcode/python/ex_582.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "44654"
},
{
"name": "Java",
"bytes": "46838"
},
{
"name": "Python",
"bytes": "186767"
},
{
"name": "Shell",
"bytes": "127"
}
],
"symlink_target": ""
}
|
import coba
print(coba.addGlobal(5))
print(coba.powerGlobal(2))
print(coba.addGlobal(12345))
print(coba.powerGlobal(-1))
|
{
"content_hash": "c92226cba267be2ca876d999aafe255c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 28,
"avg_line_length": 20.166666666666668,
"alnum_prop": 0.7768595041322314,
"repo_name": "yahya09/liga-badr",
"id": "9fe801332cc30903478f4763357236cbc5b74a91",
"size": "121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14912"
}
],
"symlink_target": ""
}
|
"""drop_user_and_chart
Revision ID: cf5dc11e79ad
Revises: 41f5f12752f8
Create Date: 2019-01-24 15:30:35.834740
"""
from alembic import op
from sqlalchemy.dialects import mysql
from sqlalchemy.engine.reflection import Inspector
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cf5dc11e79ad'
down_revision = '41f5f12752f8'
branch_labels = None
depends_on = None
def upgrade():
# We previously had a KnownEvent's table, but we deleted the table without
# a down migration to remove it (so we didn't delete anyone's data if they
# were happing to use the feature.
#
# But before we can delete the users table we need to drop the FK
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
if 'known_event' in inspector.get_table_names():
op.drop_constraint('known_event_user_id_fkey', 'known_event')
op.drop_table("chart")
op.drop_table("users")
def downgrade():
conn = op.get_bind()
op.create_table(
'users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=250), nullable=True),
sa.Column('email', sa.String(length=500), nullable=True),
sa.Column('password', sa.String(255)),
sa.Column('superuser', sa.Boolean(), default=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table(
'chart',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=200), nullable=True),
sa.Column('conn_id', sa.String(length=250), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('chart_type', sa.String(length=100), nullable=True),
sa.Column('sql_layout', sa.String(length=50), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('y_log_scale', sa.Boolean(), nullable=True),
sa.Column('show_datatable', sa.Boolean(), nullable=True),
sa.Column('show_sql', sa.Boolean(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('default_params', sa.String(length=5000), nullable=True),
sa.Column('x_is_date', sa.Boolean(), nullable=True),
sa.Column('iteration_no', sa.Integer(), nullable=True),
sa.Column('last_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
if conn.dialect.name == 'mysql':
conn.execute("SET time_zone = '+00:00'")
op.alter_column(table_name='chart', column_name='last_modified', type_=mysql.TIMESTAMP(fsp=6))
else:
if conn.dialect.name in ('sqlite', 'mssql'):
return
if conn.dialect.name == 'postgresql':
conn.execute("set timezone=UTC")
op.alter_column(table_name='chart', column_name='last_modified', type_=sa.TIMESTAMP(timezone=True))
|
{
"content_hash": "dd68323b80b26b960547ee76c1f5dc37",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 107,
"avg_line_length": 35.795180722891565,
"alnum_prop": 0.6405250757320767,
"repo_name": "r39132/airflow",
"id": "52f20681697682de296b463c308cb5cb7e3f7fa7",
"size": "3757",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/migrations/versions/cf5dc11e79ad_drop_user_and_chart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4111"
},
{
"name": "HTML",
"bytes": "128531"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5928206"
},
{
"name": "Shell",
"bytes": "41869"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='restpi',
version='0.1',
description='Nifty Library written in Python for controlling Raspberry Pi based devices remotely.',
url='http://github.com/andela-cnnadi/restpi',
author='Chidiebere Nnadi',
author_email='chidiebere.nnadi@gmail.com',
license='MIT',
packages=['restpi'],
install_requires=[
'flask',
'flask_restful',
'RPi.GPIO',
'docopt',
'PyYAML'
],
test_suite='nose.collector',
tests_require=['nose'],
scripts=['bin/restpi'],
zip_safe=False)
|
{
"content_hash": "2cae64ef5042dbcc904a61bf576fff7f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 103,
"avg_line_length": 26.681818181818183,
"alnum_prop": 0.616695059625213,
"repo_name": "andela-cnnadi/restpi",
"id": "943573b06bade825137d5667a2b20394204a1712",
"size": "587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7323"
}
],
"symlink_target": ""
}
|
""" This module describes system wide enums.
"""
class Enum(object):
""" Base class for enums
"""
__global_increment = 1
def __init__(self, for_str):
""" Initialize base class for enumerates.
:param for_str: return value for build in str() function
"""
self.value = Enum.__global_increment
self._str = for_str
Enum.__global_increment += 1
def __eq__(self, other):
return self.value == other.value
def __str__(self):
return self._str
def __hash__(self):
return self.value
class Plane(Enum):
""" Enum for choosing plane for circular interpolation.
"""
pass
PLANE_XY = Plane("XY")
PLANE_ZX = Plane("ZX")
PLANE_YZ = Plane("YZ")
class RotationDirection(Enum):
""" Enum for choosing rotation direction.
"""
pass
CW = RotationDirection("CW")
CCW = RotationDirection("CCW")
class Heaters(Enum):
""" Enum for selecting heater.
"""
pass
HEATER_EXTRUDER = Heaters("extruder")
HEATER_BED = Heaters("bed")
|
{
"content_hash": "652781819633b13cd033735cff655987",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 64,
"avg_line_length": 19.69811320754717,
"alnum_prop": 0.5967432950191571,
"repo_name": "Nikolay-Kha/PyCNC",
"id": "31a1833fc1a35649c7ce17753e87789d9e021783",
"size": "1044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cnc/enums.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176955"
},
{
"name": "Shell",
"bytes": "2289"
}
],
"symlink_target": ""
}
|
import logging
def classifiedCount(request):
print "testclient.classifiedCount called for user %s!" % request['user']
return {'count': 0}
# These are copy/pasted from our first client, the carshare study
def getSectionFilter(uuid):
from dao.user import User
from datetime import datetime, timedelta
logging.info("testclient.getSectionFilter called for user %s" % uuid)
# If this is the first two weeks, show everything
user = User.fromUUID(uuid)
# Note that this is the last time that the profile was updated. So if the
# user goes to the "Auth" screen and signs in again, it will be updated, and
# we will reset the clock. If this is not acceptable, we need to ensure that
# we have a create ts that is never updated
updateTS = user.getUpdateTS()
if (datetime.now() - updateTS) < timedelta(days = 14):
# In the first two weeks, don't do any filtering
return []
else:
return [{'test_auto_confirmed.prob': {'$lt': 0.9}}]
def clientSpecificSetters(uuid, sectionId, predictedModeMap):
from main import common
from get_database import get_mode_db
maxMode = None
maxProb = 0
for mode, prob in predictedModeMap.iteritems():
if prob > maxProb:
maxProb = prob
maxMode = mode
return {"$set":
{"test_auto_confirmed": {
"mode": common.convertModeNameToIndex(get_mode_db(), maxMode),
"prob": maxProb,
}
}
}
def getClientConfirmedModeField():
return "test_auto_confirmed.mode"
|
{
"content_hash": "ac9777a30b55f3c2944c22ccc2966877",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 33.75555555555555,
"alnum_prop": 0.6747860434496379,
"repo_name": "sdsingh/e-mission-server",
"id": "f36b46f146563929953e1391387e8d85dc517994",
"size": "1519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CFC_DataCollector/clients/testclient/testclient.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "12835"
},
{
"name": "JavaScript",
"bytes": "1586288"
},
{
"name": "Python",
"bytes": "682057"
}
],
"symlink_target": ""
}
|
from __future__ import division
"""Toyplot has been implemented to work equally well in Python 2 and Python 3,
without the use of code-modification tools like `2to3`. The
`toyplot.compatibility` module contains code to facilitate this.
"""
try:
string_type = basestring
except: # pragma: no cover
string_type = str
try:
basestring
unicode_type = unicode
except: # pragma: no cover
unicode_type = str
try:
bytes_type = bytes
except: # pragma: no cover
bytes_type = str
|
{
"content_hash": "39463ca53fb3365a27aef5a889e3ae78",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 78,
"avg_line_length": 23.952380952380953,
"alnum_prop": 0.7017892644135189,
"repo_name": "cmorgan/toyplot",
"id": "0b5a257cb52d7399c0bbaf9528057758a60640e0",
"size": "678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toyplot/compatibility.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "23"
},
{
"name": "Cucumber",
"bytes": "29282"
},
{
"name": "HTML",
"bytes": "124998"
},
{
"name": "Python",
"bytes": "741104"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ttpy'
copyright = '2013, I. Oseledets'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'agogo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ttpydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ttpy.tex', 'ttpy Documentation',
'I. Oseledets', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ttpy', 'ttpy Documentation',
['I. Oseledets'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ttpy', 'ttpy Documentation',
'I. Oseledets', 'ttpy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "8d3aa58d78a858e5758ea5baec863896",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 145,
"avg_line_length": 32.35744680851064,
"alnum_prop": 0.7001578116780641,
"repo_name": "uranix/ttpy",
"id": "736fa4ff3104ae14abe151213e3d0d8c46d059de",
"size": "8019",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "tt/doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "FORTRAN",
"bytes": "19274"
},
{
"name": "Makefile",
"bytes": "6754"
},
{
"name": "Python",
"bytes": "203868"
},
{
"name": "Shell",
"bytes": "219"
}
],
"symlink_target": ""
}
|
from google.cloud import retail_v2
def sample_add_catalog_attribute():
# Create a client
client = retail_v2.CatalogServiceClient()
# Initialize request argument(s)
catalog_attribute = retail_v2.CatalogAttribute()
catalog_attribute.key = "key_value"
request = retail_v2.AddCatalogAttributeRequest(
attributes_config="attributes_config_value",
catalog_attribute=catalog_attribute,
)
# Make the request
response = client.add_catalog_attribute(request=request)
# Handle the response
print(response)
# [END retail_v2_generated_CatalogService_AddCatalogAttribute_sync]
|
{
"content_hash": "1b8fcb42009526f8d3a62c328f7f0dc8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 67,
"avg_line_length": 27.391304347826086,
"alnum_prop": 0.7222222222222222,
"repo_name": "googleapis/python-retail",
"id": "858c3e3114b441f4033a350b2ea971265bfbafed",
"size": "2026",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/retail_v2_generated_catalog_service_add_catalog_attribute_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "7420556"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
}
|
"""
Tests for F() query expression syntax.
"""
from django.db import models
class Employee(models.Model):
firstname = models.CharField(max_length=50)
lastname = models.CharField(max_length=50)
def __unicode__(self):
return u'%s %s' % (self.firstname, self.lastname)
class Company(models.Model):
name = models.CharField(max_length=100)
num_employees = models.PositiveIntegerField()
num_chairs = models.PositiveIntegerField()
ceo = models.ForeignKey(
Employee,
related_name='company_ceo_set')
point_of_contact = models.ForeignKey(
Employee,
related_name='company_point_of_contact_set',
null=True)
def __unicode__(self):
return self.name
__test__ = {'API_TESTS': """
>>> from django.db.models import F
>>> Company(name='Example Inc.', num_employees=2300, num_chairs=5,
... ceo=Employee.objects.create(firstname='Joe', lastname='Smith')).save()
>>> Company(name='Foobar Ltd.', num_employees=3, num_chairs=3,
... ceo=Employee.objects.create(firstname='Frank', lastname='Meyer')).save()
>>> Company(name='Test GmbH', num_employees=32, num_chairs=1,
... ceo=Employee.objects.create(firstname='Max', lastname='Mustermann')).save()
# We can filter for companies where the number of employees is greater than the
# number of chairs.
>>> Company.objects.filter(num_employees__gt=F('num_chairs'))
[<Company: Example Inc.>, <Company: Test GmbH>]
# The relation of a foreign key can become copied over to an other foreign key.
>>> Company.objects.update(point_of_contact=F('ceo'))
3
>>> [c.point_of_contact for c in Company.objects.all()]
[<Employee: Joe Smith>, <Employee: Frank Meyer>, <Employee: Max Mustermann>]
>>> c = Company.objects.all()[0]
>>> c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
>>> c.save()
# F Expressions can also span joins
>>> Company.objects.filter(ceo__firstname=F('point_of_contact__firstname')).distinct()
[<Company: Foobar Ltd.>, <Company: Test GmbH>]
>>> _ = Company.objects.exclude(ceo__firstname=F('point_of_contact__firstname')).update(name='foo')
>>> Company.objects.exclude(ceo__firstname=F('point_of_contact__firstname')).get().name
u'foo'
>>> _ = Company.objects.exclude(ceo__firstname=F('point_of_contact__firstname')).update(name=F('point_of_contact__lastname'))
Traceback (most recent call last):
...
FieldError: Joined field references are not permitted in this query
"""}
|
{
"content_hash": "5337f93d962304e9269cc21764b872a6",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 125,
"avg_line_length": 34.70422535211268,
"alnum_prop": 0.6866883116883117,
"repo_name": "chewable/django",
"id": "4043f5ec341515e246c074f6d4f03191c7fe57be",
"size": "2464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/modeltests/expressions/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import sys
sys.dont_write_bytecode = True
sys.path.append( 'devices' )
sys.path.append( 'libs' )
import getopt
# interface handler...
#from libPyArdyApp import PyArdyApp
import libPyArdyApp
# connection
from libArdySer import ArdySer
# support libraries
import GS_Timing
from GS_Timing import delay,micros,millis
from random import random,randint
from libGamma import LlamaGamma
# devices
class I2cScan( libPyArdyApp.PyArdyApp ):
################################################################################
# main run
def run( self, ardy ):
print "..."
################################################################################
# put this in your main app as well.
if __name__ == "__main__":
da = I2cScan()
da.main( ['-p'] )
|
{
"content_hash": "a304366befe89b738f16de0ae928ee9b",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 80,
"avg_line_length": 21.25,
"alnum_prop": 0.5542483660130719,
"repo_name": "BleuLlama/LlamaPyArdy",
"id": "f9b02993f10a8f98101d3af4c31223b5201c532a",
"size": "1069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/I2CScan.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "21689"
},
{
"name": "C",
"bytes": "1106"
},
{
"name": "Makefile",
"bytes": "471"
},
{
"name": "Python",
"bytes": "48976"
}
],
"symlink_target": ""
}
|
$license
try:
sqlite_ok = True
import cyclone.sqlite
except ImportError as sqlite_err:
sqlite_ok = False
import cyclone.redis
from twisted.enterprise import adbapi
from twisted.internet import defer
from twisted.internet import reactor
from twisted.python import log
class DatabaseMixin(object):
mysql = None
redis = None
sqlite = None
@classmethod
def setup(cls, conf):
if "sqlite_settings" in conf:
if sqlite_ok:
DatabaseMixin.sqlite = \
cyclone.sqlite.InlineSQLite(conf["sqlite_settings"].database)
else:
log.err("SQLite is currently disabled: %s" % sqlite_err)
if "redis_settings" in conf:
if conf["redis_settings"].get("unixsocket"):
DatabaseMixin.redis = \
cyclone.redis.lazyUnixConnectionPool(
conf["redis_settings"].unixsocket,
conf["redis_settings"].dbid,
conf["redis_settings"].poolsize)
else:
DatabaseMixin.redis = \
cyclone.redis.lazyConnectionPool(
conf["redis_settings"].host,
conf["redis_settings"].port,
conf["redis_settings"].dbid,
conf["redis_settings"].poolsize)
if "mysql_settings" in conf:
DatabaseMixin.mysql = \
adbapi.ConnectionPool("MySQLdb",
host=conf["mysql_settings"].host,
port=conf["mysql_settings"].port,
db=conf["mysql_settings"].database,
user=conf["mysql_settings"].username,
passwd=conf["mysql_settings"].password,
cp_min=1,
cp_max=conf["mysql_settings"].poolsize,
cp_reconnect=True,
cp_noisy=conf["mysql_settings"].debug)
# Ping MySQL to avoid timeouts. On timeouts, the first query
# responds with the following error, before it reconnects:
# mysql.Error: (2006, 'MySQL server has gone away')
#
# There's no way differentiate this from the server shutting down
# and write() failing. To avoid the timeout, we ping.
@defer.inlineCallbacks
def _ping_mysql():
try:
yield cls.mysql.runQuery("select 1")
except Exception as e:
log.msg("MySQL ping error:", e)
else:
if conf["mysql_settings"].debug:
log.msg("MySQL ping: OK")
reactor.callLater(conf["mysql_settings"].ping, _ping_mysql)
if conf["mysql_settings"].ping > 1:
_ping_mysql()
|
{
"content_hash": "78feac73f27200f01174eb11ebfca059",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 38.55128205128205,
"alnum_prop": 0.4968407050216162,
"repo_name": "fiorix/cyclone",
"id": "826c4a25c75508a8c0f29fa129195495c6ddc0c6",
"size": "3025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appskel/default/modname/storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2386"
},
{
"name": "HTML",
"bytes": "32384"
},
{
"name": "Makefile",
"bytes": "642"
},
{
"name": "Python",
"bytes": "518718"
},
{
"name": "Shell",
"bytes": "9517"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, unicode_literals
from attr import attrib, attrs
from attr.validators import provides
from automat import MethodicalMachine
from zope.interface import implementer
from . import _interfaces
@attrs
@implementer(_interfaces.ILister)
class Lister(object):
_timing = attrib(validator=provides(_interfaces.ITiming))
m = MethodicalMachine()
set_trace = getattr(m, "_setTrace",
lambda self, f: None) # pragma: no cover
def wire(self, rendezvous_connector, input):
self._RC = _interfaces.IRendezvousConnector(rendezvous_connector)
self._I = _interfaces.IInput(input)
# Ideally, each API request would spawn a new "list_nameplates" message
# to the server, so the response would be maximally fresh, but that would
# require correlating server request+response messages, and the protocol
# is intended to be less stateful than that. So we offer a weaker
# freshness property: if no server requests are in flight, then a new API
# request will provoke a new server request, and the result will be
# fresh. But if a server request is already in flight when a second API
# request arrives, both requests will be satisfied by the same response.
@m.state(initial=True)
def S0A_idle_disconnected(self):
pass # pragma: no cover
@m.state()
def S1A_wanting_disconnected(self):
pass # pragma: no cover
@m.state()
def S0B_idle_connected(self):
pass # pragma: no cover
@m.state()
def S1B_wanting_connected(self):
pass # pragma: no cover
@m.input()
def connected(self):
pass
@m.input()
def lost(self):
pass
@m.input()
def refresh(self):
pass
@m.input()
def rx_nameplates(self, all_nameplates):
pass
@m.output()
def RC_tx_list(self):
self._RC.tx_list()
@m.output()
def I_got_nameplates(self, all_nameplates):
# We get a set of nameplate ids. There may be more attributes in the
# future: change RendezvousConnector._response_handle_nameplates to
# get them
self._I.got_nameplates(all_nameplates)
S0A_idle_disconnected.upon(connected, enter=S0B_idle_connected, outputs=[])
S0B_idle_connected.upon(lost, enter=S0A_idle_disconnected, outputs=[])
S0A_idle_disconnected.upon(
refresh, enter=S1A_wanting_disconnected, outputs=[])
S1A_wanting_disconnected.upon(
refresh, enter=S1A_wanting_disconnected, outputs=[])
S1A_wanting_disconnected.upon(
connected, enter=S1B_wanting_connected, outputs=[RC_tx_list])
S0B_idle_connected.upon(
refresh, enter=S1B_wanting_connected, outputs=[RC_tx_list])
S0B_idle_connected.upon(
rx_nameplates, enter=S0B_idle_connected, outputs=[I_got_nameplates])
S1B_wanting_connected.upon(
lost, enter=S1A_wanting_disconnected, outputs=[])
S1B_wanting_connected.upon(
refresh, enter=S1B_wanting_connected, outputs=[RC_tx_list])
S1B_wanting_connected.upon(
rx_nameplates, enter=S0B_idle_connected, outputs=[I_got_nameplates])
|
{
"content_hash": "afd12211c9dbcd92e2641f86d301189c",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 34.10752688172043,
"alnum_prop": 0.674968474148802,
"repo_name": "warner/magic-wormhole",
"id": "de085fac4039a1b8094df0605ae7f71f1feb1e43",
"size": "3172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/wormhole/_lister.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "1229"
},
{
"name": "HTML",
"bytes": "478"
},
{
"name": "JavaScript",
"bytes": "38865"
},
{
"name": "Python",
"bytes": "867520"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
}
|
import capnp
import capnp.lib.capnp as cpl
capnp.remove_import_hook()
fs_capnp = capnp.load('schemas/fs.capnp')
def create_ffi(capnp_file):
GenericFFI = type('GenericFFI', (object,), {})
capnp_schema = capnp.load(capnp_file)
for interface in dir(capnp_schema):
interface = getattr(capnp_schema, interface)
if not isinstance(interface, cpl._InterfaceModule):
continue
InterfaceType = type(interface, (interface.Server,), {})
#attach the implementations for the ffi functions, we know what they are
for method_name, method in interface.schema.methods.items():
def ffi_method(self, **kwargs):
for field_name, field in method.param_type.fields.items():
field=field.proto.slot
if field.hasExplicitDefault:
if field_name not in kwargs:
kwargs[field_name] = field.defaultValue
try:
assert field in kwargs
except Exception:
raise KeyError('Missing required field "{}" to function "{}" with no default'.format(field_name, method_name))
# insert call to ffi library here and parse return type but what is it?
ffi_method.__name__ = method_name
setattr(InterfaceType, method_name, ffi_method)
setattr(GenericFFI, interface, InterfaceType)
return GenericFFI
fs = create_ffi('schemas/fs.capnp')
assert hasattr(fs, 'Node')
assert hasattr(fs.Node, 'isDirectory')
|
{
"content_hash": "2a5259c617449a80ce016655caf08251",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 134,
"avg_line_length": 42.7027027027027,
"alnum_prop": 0.6056962025316456,
"repo_name": "waynenilsen/capnp-ffi",
"id": "e23b82739a10c615aa4121312bb52461952f8e70",
"size": "1580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-capnp-ffi/fs_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cap'n Proto",
"bytes": "767"
},
{
"name": "Python",
"bytes": "5143"
},
{
"name": "Rust",
"bytes": "6175"
}
],
"symlink_target": ""
}
|
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.operators.sensors import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class GoogleCloudStorageObjectSensor(BaseSensorOperator):
"""
Checks for the existence of a file in Google Cloud Storage.
"""
template_fields = ('bucket', 'object')
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket,
object, # pylint:disable=redefined-builtin
google_cloud_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
"""
Create a new GoogleCloudStorageObjectSensor.
:param bucket: The Google cloud storage bucket where the object is.
:type bucket: string
:param object: The name of the object to check in the Google cloud
storage bucket.
:type object: string
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide delegation enabled.
:type delegate_to: string
"""
super(GoogleCloudStorageObjectSensor, self).__init__(*args, **kwargs)
self.bucket = bucket
self.object = object
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
def poke(self, context):
self.log.info('Sensor checks existence of : %s, %s', self.bucket, self.object)
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to)
return hook.exists(self.bucket, self.object)
def ts_function(context):
"""
Default callback for the GoogleCloudStorageObjectUpdatedSensor. The default
behaviour is check for the object being updated after execution_date +
schedule_interval.
"""
return context['execution_date'] + context['dag'].schedule_interval
class GoogleCloudStorageObjectUpdatedSensor(BaseSensorOperator):
"""
Checks if an object is updated in Google Cloud Storage.
"""
template_fields = ('bucket', 'object')
template_ext = ('.sql',)
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket,
object, # pylint:disable=redefined-builtin
ts_func=ts_function,
google_cloud_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
"""
Create a new GoogleCloudStorageObjectUpdatedSensor.
:param bucket: The Google cloud storage bucket where the object is.
:type bucket: string
:param object: The name of the object to download in the Google cloud
storage bucket.
:type object: string
:param ts_func: Callback for defining the update condition. The default callback
returns execution_date + schedule_interval. The callback takes the context
as parameter.
:type ts_func: function
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
"""
super(GoogleCloudStorageObjectUpdatedSensor, self).__init__(*args, **kwargs)
self.bucket = bucket
self.object = object
self.ts_func = ts_func
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
def poke(self, context):
self.log.info('Sensor checks existence of : %s, %s', self.bucket, self.object)
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to)
return hook.is_updated_after(self.bucket, self.object, self.ts_func(context))
class GoogleCloudStoragePrefixSensor(BaseSensorOperator):
"""
Checks for the existence of a files at prefix in Google Cloud Storage bucket.
"""
template_fields = ('bucket', 'prefix')
ui_color = '#f0eee4'
@apply_defaults
def __init__(
self,
bucket,
prefix,
google_cloud_conn_id='google_cloud_storage_default',
delegate_to=None,
*args,
**kwargs):
"""
Create a new GoogleCloudStorageObjectSensor.
:param bucket: The Google cloud storage bucket where the object is.
:type bucket: string
:param prefix: The name of the prefix to check in the Google cloud
storage bucket.
:type prefix: string
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide delegation enabled.
:type delegate_to: string
"""
super(GoogleCloudStoragePrefixSensor, self).__init__(*args, **kwargs)
self.bucket = bucket
self.prefix = prefix
self.google_cloud_conn_id = google_cloud_conn_id
self.delegate_to = delegate_to
def poke(self, context):
self.log.info('Sensor checks existence of objects: %s, %s', self.bucket, self.prefix)
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_conn_id,
delegate_to=self.delegate_to)
return bool(hook.list(self.bucket, prefix=self.prefix))
|
{
"content_hash": "50ab4374bf85b1c8bb8f24c317a3cf82",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 110,
"avg_line_length": 38.8,
"alnum_prop": 0.639008979048886,
"repo_name": "jfantom/incubator-airflow",
"id": "a45923a6b0d5d044ee1fdad273643868c115c05f",
"size": "6580",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "airflow/contrib/sensors/gcs_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57054"
},
{
"name": "HTML",
"bytes": "152247"
},
{
"name": "JavaScript",
"bytes": "1364571"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2665909"
},
{
"name": "Shell",
"bytes": "28054"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import wrap
from colors import blue, cyan, green, red
from pants.help.help_info_extracter import HelpInfoExtracter
class HelpFormatter(object):
def __init__(self, scope, show_recursive, show_advanced, color):
self._scope = scope
self._show_recursive = show_recursive
self._show_advanced = show_advanced
self._color = color
def _maybe_blue(self, s):
return self._maybe_color(blue, s)
def _maybe_cyan(self, s):
return self._maybe_color(cyan, s)
def _maybe_green(self, s):
return self._maybe_color(green, s)
def _maybe_red(self, s):
return self._maybe_color(red, s)
def _maybe_color(self, color, s):
return color(s) if self._color else s
def format_options(self, scope, description, registration_args):
"""Return a help message for the specified options.
:param registration_args: A list of (args, kwargs) pairs, as passed in to options registration.
"""
oshi = HelpInfoExtracter(self._scope).get_option_scope_help_info(registration_args)
lines = []
def add_option(category, ohis):
if ohis:
lines.append('')
display_scope = scope or 'Global'
if category:
lines.append(self._maybe_blue('{} {} options:'.format(display_scope, category)))
else:
lines.append(self._maybe_blue('{} options:'.format(display_scope)))
if description:
lines.append(description)
lines.append(' ')
for ohi in ohis:
lines.extend(self.format_option(ohi))
add_option('', oshi.basic)
if self._show_recursive:
add_option('recursive', oshi.recursive)
if self._show_advanced:
add_option('advanced', oshi.advanced)
return lines
def format_option(self, ohi):
lines = []
arg_line = ('{args} {fromfile}{dflt}'
.format(args=self._maybe_cyan(', '.join(ohi.display_args)),
dflt=self._maybe_green('(default: {})'.format(ohi.default)),
fromfile=self._maybe_green('(@fromfile value supported) ' if ohi.fromfile
else '')))
lines.append(arg_line)
indent = ' '
lines.extend(['{}{}'.format(indent, s) for s in wrap(ohi.help, 76)])
if ohi.deprecated_message:
lines.append(self._maybe_red('{}{}.'.format(indent, ohi.deprecated_message)))
if ohi.deprecated_hint:
lines.append(self._maybe_red('{}{}'.format(indent, ohi.deprecated_hint)))
return lines
|
{
"content_hash": "c4460b37423624a40259379b6fe6681d",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 99,
"avg_line_length": 35.346666666666664,
"alnum_prop": 0.6190116937004904,
"repo_name": "Gabriel439/pants",
"id": "d028a143a9fa75d0d7686a3815c1383866d97763",
"size": "2798",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/python/pants/help/help_formatter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "69479"
},
{
"name": "Java",
"bytes": "302900"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "3821134"
},
{
"name": "Scala",
"bytes": "76623"
},
{
"name": "Shell",
"bytes": "49689"
},
{
"name": "Thrift",
"bytes": "2583"
}
],
"symlink_target": ""
}
|
"""
Example of running an RLlib policy server, allowing connections from
external environment running clients. The server listens on
(a simple CartPole env
in this case) against an RLlib policy server listening on one or more
HTTP-speaking ports. See `cartpole_client.py` in this same directory for how
to start any number of clients (after this server has been started).
This script will not create any actual env to illustrate that RLlib can
run w/o needing an internalized environment.
Setup:
1) Start this server:
$ python cartpole_server.py --num-workers --[other options]
Use --help for help.
2) Run n policy clients:
See `cartpole_client.py` on how to do this.
The `num-workers` setting will allow you to distribute the incoming feed over n
listen sockets (in this example, between 9900 and 990n with n=worker_idx-1).
You may connect more than one policy client to any open listen port.
"""
import argparse
import gym
import os
import ray
from ray import air, tune
from ray.rllib.env.policy_server_input import PolicyServerInput
from ray.rllib.examples.custom_metrics_and_callbacks import MyCallbacks
from ray.tune.logger import pretty_print
from ray.tune.registry import get_trainable_cls
SERVER_ADDRESS = "localhost"
# In this example, the user can run the policy server with
# n workers, opening up listen ports 9900 - 990n (n = num_workers - 1)
# to each of which different clients may connect.
SERVER_BASE_PORT = 9900 # + worker-idx - 1
CHECKPOINT_FILE = "last_checkpoint_{}.out"
def get_cli_args():
"""Create CLI parser and return parsed arguments"""
parser = argparse.ArgumentParser()
# Example-specific args.
parser.add_argument(
"--port",
type=int,
default=SERVER_BASE_PORT,
help="The base-port to use (on localhost). " f"Default is {SERVER_BASE_PORT}.",
)
parser.add_argument(
"--callbacks-verbose",
action="store_true",
help="Activates info-messages for different events on "
"server/client (episode steps, postprocessing, etc..).",
)
parser.add_argument(
"--num-workers",
type=int,
default=2,
help="The number of workers to use. Each worker will create "
"its own listening socket for incoming experiences.",
)
parser.add_argument(
"--no-restore",
action="store_true",
help="Do not restore from a previously saved checkpoint (location of "
"which is saved in `last_checkpoint_[algo-name].out`).",
)
# General args.
parser.add_argument(
"--run",
default="PPO",
choices=["APEX", "DQN", "IMPALA", "PPO", "R2D2"],
help="The RLlib-registered algorithm to use.",
)
parser.add_argument("--num-cpus", type=int, default=3)
parser.add_argument(
"--framework",
choices=["tf", "tf2", "torch"],
default="tf",
help="The DL framework specifier.",
)
parser.add_argument(
"--use-lstm",
action="store_true",
help="Whether to auto-wrap the model with an LSTM. Only valid option for "
"--run=[IMPALA|PPO|R2D2]",
)
parser.add_argument(
"--stop-iters", type=int, default=200, help="Number of iterations to train."
)
parser.add_argument(
"--stop-timesteps",
type=int,
default=500000,
help="Number of timesteps to train.",
)
parser.add_argument(
"--stop-reward",
type=float,
default=80.0,
help="Reward at which we stop training.",
)
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.",
)
parser.add_argument(
"--no-tune",
action="store_true",
help="Run without Tune using a manual train loop instead. Here,"
"there is no TensorBoard support.",
)
parser.add_argument(
"--local-mode",
action="store_true",
help="Init Ray in local mode for easier debugging.",
)
args = parser.parse_args()
print(f"Running with following CLI args: {args}")
return args
if __name__ == "__main__":
args = get_cli_args()
ray.init()
# `InputReader` generator (returns None if no input reader is needed on
# the respective worker).
def _input(ioctx):
# We are remote worker or we are local worker with num_workers=0:
# Create a PolicyServerInput.
if ioctx.worker_index > 0 or ioctx.worker.num_workers == 0:
return PolicyServerInput(
ioctx,
SERVER_ADDRESS,
args.port + ioctx.worker_index - (1 if ioctx.worker_index > 0 else 0),
)
# No InputReader (PolicyServerInput) needed.
else:
return None
# Algorithm config. Note that this config is sent to the client only in case
# the client needs to create its own policy copy for local inference.
config = (
get_trainable_cls(args.run).get_default_config()
# Indicate that the Algorithm we setup here doesn't need an actual env.
# Allow spaces to be determined by user (see below).
.environment(
env=None,
# TODO: (sven) make these settings unnecessary and get the information
# about the env spaces from the client.
observation_space=gym.spaces.Box(float("-inf"), float("inf"), (4,)),
action_space=gym.spaces.Discrete(2),
)
# DL framework to use.
.framework(args.framework)
# Create a "chatty" client/server or not.
.callbacks(MyCallbacks if args.callbacks_verbose else None)
# Use the `PolicyServerInput` to generate experiences.
.offline_data(input_=_input)
# Use n worker processes to listen on different ports.
.rollouts(num_rollout_workers=args.num_workers)
# Disable OPE, since the rollouts are coming from online clients.
.evaluation(off_policy_estimation_methods={})
# Set to INFO so we'll see the server's actual address:port.
.debugging(log_level="INFO")
)
# DQN.
if args.run == "DQN" or args.run == "APEX" or args.run == "R2D2":
# Example of using DQN (supports off-policy actions).
config.update_from_dict(
{
"num_steps_sampled_before_learning_starts": 100,
"min_sample_timesteps_per_iteration": 200,
"n_step": 3,
"rollout_fragment_length": 4,
"train_batch_size": 8,
}
)
config.model.update(
{
"fcnet_hiddens": [64],
"fcnet_activation": "linear",
}
)
if args.run == "R2D2":
config.model["use_lstm"] = args.use_lstm
elif args.run == "IMPALA":
config.update_from_dict(
{
"num_gpus": 0,
"model": {"use_lstm": args.use_lstm},
}
)
# PPO.
else:
# Example of using PPO (does NOT support off-policy actions).
config.update_from_dict(
{
"rollout_fragment_length": 1000,
"train_batch_size": 4000,
"model": {"use_lstm": args.use_lstm},
}
)
checkpoint_path = CHECKPOINT_FILE.format(args.run)
# Attempt to restore from checkpoint, if possible.
if not args.no_restore and os.path.exists(checkpoint_path):
checkpoint_path = open(checkpoint_path).read()
else:
checkpoint_path = None
# Manual training loop (no Ray tune).
if args.no_tune:
algo = config.build()
if checkpoint_path:
print("Restoring from checkpoint path", checkpoint_path)
algo.restore(checkpoint_path)
# Serving and training loop.
ts = 0
for _ in range(args.stop_iters):
results = algo.train()
print(pretty_print(results))
checkpoint = algo.save()
print("Last checkpoint", checkpoint)
with open(checkpoint_path, "w") as f:
f.write(checkpoint)
if (
results["episode_reward_mean"] >= args.stop_reward
or ts >= args.stop_timesteps
):
break
ts += results["timesteps_total"]
algo.stop()
# Run with Tune for auto env and algo creation and TensorBoard.
else:
print("Ignoring restore even if previous checkpoint is provided...")
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
tune.Tuner(
args.run, param_space=config, run_config=air.RunConfig(stop=stop, verbose=2)
).fit()
|
{
"content_hash": "5370ed6c21e4290a0f49185b281bb899",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 88,
"avg_line_length": 33.92424242424242,
"alnum_prop": 0.5969182670835195,
"repo_name": "ray-project/ray",
"id": "11c76f85536806282f4ad4f32f176de8a5556220",
"size": "8978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/examples/serving/cartpole_server.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
}
|
from pyrep.robots.mobiles.nonholonomic_base import NonHolonomicBase
class LoCoBot(NonHolonomicBase):
def __init__(self, count: int = 0):
super().__init__(count, 2, 'LoCoBot')
|
{
"content_hash": "92a3fe55db7ee429c559efc938967ae9",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 67,
"avg_line_length": 31.5,
"alnum_prop": 0.6878306878306878,
"repo_name": "stepjam/PyRep",
"id": "04645debebc13549f125391d5af60107a638e9e7",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrep/robots/mobiles/locobot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "56307"
},
{
"name": "Lua",
"bytes": "16854"
},
{
"name": "Python",
"bytes": "428818"
}
],
"symlink_target": ""
}
|
"""Middleware for account-related functionality."""
import pytz
from django.conf import settings
from django.contrib import auth
from django.utils import timezone
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.accounts.backends import X509Backend
def timezone_middleware(get_response):
"""Middleware that activates the user's local timezone.
Args:
get_response (callable):
The method to execute the view.
"""
def middleware(request):
"""Activate the user's selected timezone for this request.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
Returns:
django.http.HttpResponse:
The response object.
"""
if request.user.is_authenticated:
try:
user = request.user.get_profile()
timezone.activate(pytz.timezone(user.timezone))
except pytz.UnknownTimeZoneError:
pass
return get_response(request)
return middleware
def update_last_login_middleware(get_response):
"""Middleware that updates a user's last login time more frequently.
This will update the user's stored login time if it's been more than 30
minutes since they last made a request. This helps turn the login time into
a recent activity time, providing a better sense of how often people are
actively using Review Board.
Args:
get_response (callable):
The method to execute the view.
"""
#: The smallest period of time between login time updates.
UPDATE_PERIOD_SECS = 30 * 60 # 30 minutes
def middleware(request):
"""Process the request and update the login time.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
Returns:
django.http.HttpResponse:
The response object.
"""
user = request.user
if user.is_authenticated:
now = timezone.now()
delta = now - request.user.last_login
if delta.total_seconds() >= UPDATE_PERIOD_SECS:
user.last_login = now
user.save(update_fields=('last_login',))
return get_response(request)
return middleware
def x509_auth_middleware(get_response):
"""Middleware that authenticates a user using X.509 certificates.
If Review Board is configured to use the X.509 authentication backend, this
will automatically authenticate the user using the environment variables
set by mod_ssl.
Apache needs to be configured with mod_ssl. For Review Board to be usable
with X.509 client certificate authentication, the ``SSLVerifyClient``
configuration directive should be set to ``optional``. This will ensure
that basic authentication will still work, allowing clients to work with a
username and password.
Args:
get_response (callable):
The method to execute the view.
"""
def middleware(request):
"""Log in users by their certificate if using X.509 authentication.
This will only log in a user if the request environment (*not* the
headers) are populated with a pre-verified username, and the request
is being handled over HTTPS.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
Returns:
django.http.HttpResponse:
The response object.
"""
siteconfig = SiteConfiguration.objects.get_current()
if (request.is_secure() and
siteconfig.get('auth_backend') == X509Backend.backend_id):
x509_settings_field = getattr(settings, 'X509_USERNAME_FIELD',
None)
if x509_settings_field == 'CUSTOM':
x509_settings_field = getattr(settings,
'X509_CUSTOM_USERNAME_FIELD',
None)
if x509_settings_field:
x509_field = request.environ.get(x509_settings_field)
if x509_field:
user = auth.authenticate(request=request,
x509_field=x509_field)
if user:
request.user = user
auth.login(request, user)
return get_response(request)
return middleware
|
{
"content_hash": "456cabef7ac38c19c550c0fcbf0412eb",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 79,
"avg_line_length": 32.5,
"alnum_prop": 0.605934065934066,
"repo_name": "reviewboard/reviewboard",
"id": "27323d050e5e56ae35587528c6fffd325295bc7b",
"size": "4550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/accounts/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
import os
import re
import argparse
import PyPDF2
def strip_punctuation(full_name):
things_to_remove = ["'", "-", ".", " "]
for thing in things_to_remove:
full_name = full_name.replace(thing, "")
return full_name
def get_oen_re():
return re.compile('OEN Number: +[0-9]{3}-[0-9]{3}-[0-9]{3}')
def get_ugcloud_re():
return re.compile('^[a-zA-Z]{5}[0-9]{4}$')
def get_username(text, oen_match):
oen = text[oen_match.start()+11:oen_match.end()].strip()
full_name = strip_punctuation(text[35:oen_match.start()].strip())
last_name, first_name = full_name.split(',')
return '{0}{1}{2}'.format(first_name.strip().capitalize()[:2],
last_name.strip().capitalize()[:3],
oen.replace('-', '')[5:])
def close_document(writer, output_dir, current_ugcloud):
outfile = f"{current_ugcloud}.pdf"
outpath = os.path.join(output_dir, outfile)
with open(outpath, "wb") as f:
writer.write(f)
print(f" Done!")
return PyPDF2.PdfFileWriter()
def file_checker(args, value_type, message):
if vars(args)[value_type] is not None:
if os.path.exists(vars(args)[value_type]):
return vars(args)[value_type]
else:
value = None
while not value:
value = input(f"{message} location: ").strip()
if os.path.exists(value):
return value
else:
print(f"{value} is not a valid location")
value = None
def main(filename, output_dir):
oen_re = get_oen_re()
ugcloud_re = get_ugcloud_re()
reader = None
with open(filename, 'rb') as f:
reader = PyPDF2.PdfFileReader(f)
writer = PyPDF2.PdfFileWriter()
current_ugcloud = None
new_document = True
print("processing pages...\n")
for page in reader.pages:
text = page.extractText()
oen = oen_re.search(text)
if oen:
email_address = get_username(text, oen)
ugcloud = ugcloud_re.search(email_address)
print(f"processing {email_address}...", end="")
if not ugcloud:
print(f"{email_address} is not a valid UGCloud address")
if new_document:
current_ugcloud = email_address
else:
writer = close_document(writer,
output_dir,
current_ugcloud)
new_document = True
current_ugcloud = email_address
writer.addPage(page)
new_document = False
else:
writer.addPage(page)
close_document(writer, output_dir, current_ugcloud)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=("Take a Maplewood timetable pdf "
"and split it into individual files "
" named with a student's ugcloud"
" user name"))
parser.add_argument("-f", "--filename", dest="filename")
parser.add_argument("-o", "--output", dest="output")
args = parser.parse_args()
filename = file_checker(args, "filename", "Maplewood Timetable")
output = file_checker(args, "output", "Output")
main(filename, output)
|
{
"content_hash": "ef95ee1d68ceb969a5fab7aaa3870efb",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 76,
"avg_line_length": 31.889908256880734,
"alnum_prop": 0.5253164556962026,
"repo_name": "gregorysenyshyn/timetable-mailer",
"id": "a47c07407f4b3e2ca4dd1a603582bf8060d83bc3",
"size": "3476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ttm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "622"
},
{
"name": "Python",
"bytes": "3476"
}
],
"symlink_target": ""
}
|
import boto3
import json
# Create an S3 client
s3 = boto3.client('s3')
bucket_name = 'my-bucket'
# Create the bucket policy
bucket_policy = {
'Version': '2012-10-17,
'Statement': [{
'Sid': 'AddPerm',
'Effect': 'Allow',
'Principal': '*',
'Action': ['s3:GetObject'],
'Resource': [bucket_name]
}]
}
# Convert the policy to a JSON string
bucket_policy = json.dumps(bucket_policy)
# Set the new policy on the given bucket
s3.put_bucket_policy(Bucket=bucket_name, Policy=bucket_policy)
|
{
"content_hash": "4d998bcfe07cd7e7b4685d96e97ad396",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 62,
"avg_line_length": 21.48,
"alnum_prop": 0.6256983240223464,
"repo_name": "imshashank/aws-doc-sdk-examples",
"id": "775be0ccef553c736b9c8bde4ecca0001d6b07e5",
"size": "1066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/example_code/s3/s3-python-example-put-bucket-policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "105917"
},
{
"name": "C++",
"bytes": "157148"
},
{
"name": "CMake",
"bytes": "5136"
},
{
"name": "Go",
"bytes": "136009"
},
{
"name": "Java",
"bytes": "216776"
},
{
"name": "JavaScript",
"bytes": "101864"
},
{
"name": "Makefile",
"bytes": "436"
},
{
"name": "PHP",
"bytes": "148580"
},
{
"name": "Python",
"bytes": "31842"
},
{
"name": "Ruby",
"bytes": "112274"
},
{
"name": "Shell",
"bytes": "1348"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_true, assert_raises
from nose.plugins.skip import SkipTest
import warnings
from mne.fixes import tril_indices
from mne.connectivity import spectral_connectivity
from mne.connectivity.spectral import _CohEst
from mne import SourceEstimate
from mne.utils import run_tests_if_main, slow_test
from mne.filter import band_pass_filter
warnings.simplefilter('always')
def _stc_gen(data, sfreq, tmin, combo=False):
"""Simulate a SourceEstimate generator"""
vertices = [np.arange(data.shape[1]), np.empty(0)]
for d in data:
if not combo:
stc = SourceEstimate(data=d, vertices=vertices,
tmin=tmin, tstep=1 / float(sfreq))
yield stc
else:
# simulate a combination of array and source estimate
arr = d[0]
stc = SourceEstimate(data=d[1:], vertices=vertices,
tmin=tmin, tstep=1 / float(sfreq))
yield (arr, stc)
@slow_test
def test_spectral_connectivity():
"""Test frequency-domain connectivity methods"""
# XXX For some reason on 14 Oct 2015 Travis started timing out on this
# test, so for a quick workaround we will skip it:
if os.getenv('TRAVIS', 'false') == 'true':
raise SkipTest('Travis is broken')
# Use a case known to have no spurious correlations (it would bad if
# nosetests could randomly fail):
np.random.seed(0)
sfreq = 50.
n_signals = 3
n_epochs = 8
n_times = 256
tmin = 0.
tmax = (n_times - 1) / sfreq
data = np.random.randn(n_epochs, n_signals, n_times)
times_data = np.linspace(tmin, tmax, n_times)
# simulate connectivity from 5Hz..15Hz
fstart, fend = 5.0, 15.0
for i in range(n_epochs):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
data[i, 1, :] = band_pass_filter(data[i, 0, :],
sfreq, fstart, fend)
# add some noise, so the spectrum is not exactly zero
data[i, 1, :] += 1e-2 * np.random.randn(n_times)
# First we test some invalid parameters:
assert_raises(ValueError, spectral_connectivity, data, method='notamethod')
assert_raises(ValueError, spectral_connectivity, data,
mode='notamode')
# test invalid fmin fmax settings
assert_raises(ValueError, spectral_connectivity, data, fmin=10,
fmax=10 + 0.5 * (sfreq / float(n_times)))
assert_raises(ValueError, spectral_connectivity, data, fmin=10, fmax=5)
assert_raises(ValueError, spectral_connectivity, data, fmin=(0, 11),
fmax=(5, 10))
assert_raises(ValueError, spectral_connectivity, data, fmin=(11,),
fmax=(12, 15))
methods = ['coh', 'cohy', 'imcoh', ['plv', 'ppc', 'pli', 'pli2_unbiased',
'wpli', 'wpli2_debiased', 'coh']]
modes = ['multitaper', 'fourier', 'cwt_morlet']
# define some frequencies for cwt
cwt_frequencies = np.arange(3, 24.5, 1)
for mode in modes:
for method in methods:
if method == 'coh' and mode == 'multitaper':
# only check adaptive estimation for coh to reduce test time
check_adaptive = [False, True]
else:
check_adaptive = [False]
if method == 'coh' and mode == 'cwt_morlet':
# so we also test using an array for num cycles
cwt_n_cycles = 7. * np.ones(len(cwt_frequencies))
else:
cwt_n_cycles = 7.
for adaptive in check_adaptive:
if adaptive:
mt_bandwidth = 1.
else:
mt_bandwidth = None
con, freqs, times, n, _ = spectral_connectivity(
data, method=method, mode=mode, indices=None, sfreq=sfreq,
mt_adaptive=adaptive, mt_low_bias=True,
mt_bandwidth=mt_bandwidth, cwt_frequencies=cwt_frequencies,
cwt_n_cycles=cwt_n_cycles)
assert_true(n == n_epochs)
assert_array_almost_equal(times_data, times)
if mode == 'multitaper':
upper_t = 0.95
lower_t = 0.5
else:
# other estimates have higher variance
upper_t = 0.8
lower_t = 0.75
# test the simulated signal
if method == 'coh':
idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
# we see something for zero-lag
assert_true(np.all(con[1, 0, idx[0]:idx[1]] > upper_t))
if mode != 'cwt_morlet':
idx = np.searchsorted(freqs, (fstart - 1, fend + 1))
assert_true(np.all(con[1, 0, :idx[0]] < lower_t))
assert_true(np.all(con[1, 0, idx[1]:] < lower_t))
elif method == 'cohy':
idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
# imaginary coh will be zero
assert_true(np.all(np.imag(con[1, 0, idx[0]:idx[1]]) <
lower_t))
# we see something for zero-lag
assert_true(np.all(np.abs(con[1, 0, idx[0]:idx[1]]) >
upper_t))
idx = np.searchsorted(freqs, (fstart - 1, fend + 1))
if mode != 'cwt_morlet':
assert_true(np.all(np.abs(con[1, 0, :idx[0]]) <
lower_t))
assert_true(np.all(np.abs(con[1, 0, idx[1]:]) <
lower_t))
elif method == 'imcoh':
idx = np.searchsorted(freqs, (fstart + 1, fend - 1))
# imaginary coh will be zero
assert_true(np.all(con[1, 0, idx[0]:idx[1]] < lower_t))
idx = np.searchsorted(freqs, (fstart - 1, fend + 1))
assert_true(np.all(con[1, 0, :idx[0]] < lower_t))
assert_true(np.all(con[1, 0, idx[1]:] < lower_t))
# compute same connections using indices and 2 jobs
indices = tril_indices(n_signals, -1)
if not isinstance(method, list):
test_methods = (method, _CohEst)
else:
test_methods = method
stc_data = _stc_gen(data, sfreq, tmin)
con2, freqs2, times2, n2, _ = spectral_connectivity(
stc_data, method=test_methods, mode=mode, indices=indices,
sfreq=sfreq, mt_adaptive=adaptive, mt_low_bias=True,
mt_bandwidth=mt_bandwidth, tmin=tmin, tmax=tmax,
cwt_frequencies=cwt_frequencies,
cwt_n_cycles=cwt_n_cycles, n_jobs=2)
assert_true(isinstance(con2, list))
assert_true(len(con2) == len(test_methods))
if method == 'coh':
assert_array_almost_equal(con2[0], con2[1])
if not isinstance(method, list):
con2 = con2[0] # only keep the first method
# we get the same result for the probed connections
assert_array_almost_equal(freqs, freqs2)
assert_array_almost_equal(con[indices], con2)
assert_true(n == n2)
assert_array_almost_equal(times_data, times2)
else:
# we get the same result for the probed connections
assert_true(len(con) == len(con2))
for c, c2 in zip(con, con2):
assert_array_almost_equal(freqs, freqs2)
assert_array_almost_equal(c[indices], c2)
assert_true(n == n2)
assert_array_almost_equal(times_data, times2)
# compute same connections for two bands, fskip=1, and f. avg.
fmin = (5., 15.)
fmax = (15., 30.)
con3, freqs3, times3, n3, _ = spectral_connectivity(
data, method=method, mode=mode, indices=indices,
sfreq=sfreq, fmin=fmin, fmax=fmax, fskip=1, faverage=True,
mt_adaptive=adaptive, mt_low_bias=True,
mt_bandwidth=mt_bandwidth, cwt_frequencies=cwt_frequencies,
cwt_n_cycles=cwt_n_cycles)
assert_true(isinstance(freqs3, list))
assert_true(len(freqs3) == len(fmin))
for i in range(len(freqs3)):
assert_true(np.all((freqs3[i] >= fmin[i]) &
(freqs3[i] <= fmax[i])))
# average con2 "manually" and we get the same result
if not isinstance(method, list):
for i in range(len(freqs3)):
freq_idx = np.searchsorted(freqs2, freqs3[i])
con2_avg = np.mean(con2[:, freq_idx], axis=1)
assert_array_almost_equal(con2_avg, con3[:, i])
else:
for j in range(len(con2)):
for i in range(len(freqs3)):
freq_idx = np.searchsorted(freqs2, freqs3[i])
con2_avg = np.mean(con2[j][:, freq_idx], axis=1)
assert_array_almost_equal(con2_avg, con3[j][:, i])
run_tests_if_main()
|
{
"content_hash": "c9f23a6eb19d7b4754d04671e5da3411",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 79,
"avg_line_length": 43,
"alnum_prop": 0.5087593484274152,
"repo_name": "ARudiuk/mne-python",
"id": "8678f5b315c35ad3b3a7ce7c25333d92c5677974",
"size": "9761",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "mne/connectivity/tests/test_spectral.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3769"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "5086775"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from codecs import open as codecs_open
from setuptools import setup, find_packages
# Get the long description from the relevant file
with codecs_open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(name='mltools',
version='0.0.1',
description=u'Machine Learning Tools',
long_description=long_description,
classifiers=[],
keywords='',
author=u'Vidal Alcala',
author_email='vidal.alcala@gmail.com',
url='https://github.com/vidalalcala/mltools',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'click',
'rpy2==2.8.4',
'scikit-learn',
'scipy',
'pandas',
'numpy'
],
extras_require={
'test': ['pytest'],
},
entry_points="""
[console_scripts]
mltools=mltools.scripts.cli:cli
"""
)
|
{
"content_hash": "06d097971cfe6c3d45c030295500cfdc",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 26.236842105263158,
"alnum_prop": 0.5777331995987964,
"repo_name": "vidalalcala/ml-tools",
"id": "f6714271e7414153526564d9f74ae76d77aa9e86",
"size": "997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6704"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class TSVToJSON(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the TSVToJSON Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(TSVToJSON, self).__init__(temboo_session, '/Library/Utilities/DataConversions/TSVToJSON')
def new_input_set(self):
return TSVToJSONInputSet()
def _make_result_set(self, result, path):
return TSVToJSONResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return TSVToJSONChoreographyExecution(session, exec_id, path)
class TSVToJSONInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the TSVToJSON
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_TSV(self, value):
"""
Set the value of the TSV input for this Choreo. ((required, multiline) The TSV file to convert to XML. Your TSV data must contain column names.)
"""
super(TSVToJSONInputSet, self)._set_input('TSV', value)
class TSVToJSONResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the TSVToJSON Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_JSON(self):
"""
Retrieve the value for the "JSON" output from this Choreo execution. ((json) The JSON formatted data.)
"""
return self._output.get('JSON', None)
class TSVToJSONChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return TSVToJSONResultSet(response, path)
|
{
"content_hash": "b6e13475ef6241cc59e12c3499705aa8",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 152,
"avg_line_length": 35.964285714285715,
"alnum_prop": 0.702085402184707,
"repo_name": "jordanemedlock/psychtruths",
"id": "3b026090e55d0b1e4c5d387c2b374e09529a22c2",
"size": "2867",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/Library/Utilities/DataConversions/TSVToJSON.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
"Errors and warnings reported by the Tango framework."
class TangoException(Exception):
"Base exception for Tango-specific errors."
class ParseError(TangoException):
"Error when parsing markup or scraping screens."
class NoSuchWriterException(TangoException):
"Error when getting a response writer by a name that is not registered."
class HeaderException(TangoException):
"Error in parsing a module's metadata docstring."
class ConfigurationError(TangoException):
"Error in app.config, either a missing or wrongly set value."
class ModuleNotFound(TangoException):
"Error when requiring a Python module, but it's filepath cannot be found."
class TangoWarning(Warning):
"Base warning for Tango-specific warnings."
class DuplicateWarning(TangoWarning):
"Base warning for reporting duplicates."
class DuplicateRouteWarning(DuplicateWarning):
"Route is declared multiple times in a module header."
class DuplicateExportWarning(DuplicateWarning):
"Export is declared multiple times in a module header."
class DuplicateContextWarning(DuplicateWarning):
"Route context item is replaced by a new route context in same project."
|
{
"content_hash": "fa6b897b9f81a6d44a16dde6187aca35",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.7746835443037975,
"repo_name": "willowtreeapps/tango-core",
"id": "8b8afcd46d4d15ce00d14e4f94fd591c2e5cd0fd",
"size": "1185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tango/errors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "114588"
}
],
"symlink_target": ""
}
|
"""
testing_processer
-----------------
The testing function for test the processer utilities.
"""
import os
from processer import Processer
from ..Logger import Logger
from processer import check_subprocess, create_empty_list_from_hierarchylist,\
store_time_subprocess, initial_message_creation
## Code a dummy processer class
#class DummyProcesser(Processer):
# proc_name = "Dummy process"
#
# def __init__(self, logfile, lim_rows=0, prompt_inform=False):
# """Dummy process initialization."""
# self._initialization()
# # Logfile
# self.logfile = logfile
# # Other parameters
# self.lim_rows = lim_rows
# self.bool_inform = True if self.lim_rows != 0 else False
# self.prompt_inform = prompt_inform
# self.n_procs = 0
# self.proc_desc = "Computation dummy processer"
#
# def compute(self, n):
# t00 = self.setting_global_process()
# ## 1. Computation of the measure (parallel if)
# # Begin to track the process
# t0, bun = self.setting_loop(n)
# for i in xrange(n):
# ## Finish to track this process
# t0, bun = self.messaging_loop(i, t0, bun)
# # Stop tracking
# self.close_process(t00)
class TesterProcesserClass(Processer):
proc_name = "Tester Processer"
def __init__(self, logfile, lim_rows=0, prompt_inform=False):
self._initialization()
# Logfile
self.logfile = logfile
# Other parameters
self.lim_rows = lim_rows
self.bool_inform = True if self.lim_rows != 0 else False
self.prompt_inform = prompt_inform
self.n_procs = 0
self.proc_desc = "Computation %s with %s"
self._create_subprocess_hierharchy([['prueba']])
self.check_subprocess()
def compute(self, n):
# Main function to test the main utilities
t00 = self.setting_global_process()
# Begin to track the process
t0, bun = self.setting_loop(n)
t0_s = self.set_subprocess([0, 0])
for i in range(n):
## Finish to track this process
t0, bun = self.messaging_loop(i, t0, bun)
i += 1
self.close_subprocess([0, 0], t0_s)
self.save_process_info('prueba')
self.close_process(t00)
def test():
## Parameters
logfile = Logger('logfile.log')
##### Test the auxiliar functions
initial_message_creation(proc_name='a', proc_desc='fd')
subprocess_desc, t_expended_subproc =\
create_empty_list_from_hierarchylist(['', ['', '']])
check_subprocess(subprocess_desc, t_expended_subproc)
### WARNING: Correct!
store_time_subprocess([0], subprocess_desc, t_expended_subproc, 0)
store_time_subprocess([1, 0], subprocess_desc, t_expended_subproc, 0)
##### Test the whole class
dummy_proc = TesterProcesserClass(logfile)
dummy_proc.compute(10000)
## Remove the files created
try:
os.remove('logfile.log')
os.remove('prueba')
except:
pass
dummy_proc = TesterProcesserClass(logfile, 1000)
dummy_proc.compute(10000)
## Remove the files created
try:
os.remove('logfile.log')
os.remove('prueba')
except:
pass
dummy_proc = TesterProcesserClass(logfile, 1000, True)
dummy_proc.compute(10000)
## Remove the files created
try:
os.remove('logfile.log')
os.remove('prueba')
except:
pass
|
{
"content_hash": "34f84d3885063e744fd23f5d2b2b16ba",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 78,
"avg_line_length": 30.44736842105263,
"alnum_prop": 0.6101987899740708,
"repo_name": "tgquintela/pythonUtils",
"id": "1b3952fd71bf08bf9195843ddc69ebb7af593b88",
"size": "3472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonUtils/ProcessTools/test_processtools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "425182"
},
{
"name": "Shell",
"bytes": "4096"
}
],
"symlink_target": ""
}
|
"""
Unit tests for MLlib Python DataFrame-based APIs.
"""
import sys
if sys.version > '3':
xrange = range
basestring = str
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from shutil import rmtree
import tempfile
import array as pyarray
import numpy as np
from numpy import abs, all, arange, array, array_equal, inf, ones, tile, zeros
import inspect
from pyspark import keyword_only, SparkContext
from pyspark.ml import Estimator, Model, Pipeline, PipelineModel, Transformer, UnaryTransformer
from pyspark.ml.classification import *
from pyspark.ml.clustering import *
from pyspark.ml.common import _java2py, _py2java
from pyspark.ml.evaluation import BinaryClassificationEvaluator, \
MulticlassClassificationEvaluator, RegressionEvaluator
from pyspark.ml.feature import *
from pyspark.ml.fpm import FPGrowth, FPGrowthModel
from pyspark.ml.linalg import DenseMatrix, DenseMatrix, DenseVector, Matrices, MatrixUDT, \
SparseMatrix, SparseVector, Vector, VectorUDT, Vectors
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasInputCol, HasMaxIter, HasSeed
from pyspark.ml.recommendation import ALS
from pyspark.ml.regression import DecisionTreeRegressor, GeneralizedLinearRegression, \
LinearRegression
from pyspark.ml.stat import ChiSquareTest
from pyspark.ml.tuning import *
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaParams, JavaWrapper
from pyspark.serializers import PickleSerializer
from pyspark.sql import DataFrame, Row, SparkSession
from pyspark.sql.functions import rand
from pyspark.sql.types import DoubleType, IntegerType
from pyspark.storagelevel import *
from pyspark.tests import ReusedPySparkTestCase as PySparkTestCase
ser = PickleSerializer()
class MLlibTestCase(unittest.TestCase):
def setUp(self):
self.sc = SparkContext('local[4]', "MLlib tests")
self.spark = SparkSession(self.sc)
def tearDown(self):
self.spark.stop()
class SparkSessionTestCase(PySparkTestCase):
@classmethod
def setUpClass(cls):
PySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
PySparkTestCase.tearDownClass()
cls.spark.stop()
class MockDataset(DataFrame):
def __init__(self):
self.index = 0
class HasFake(Params):
def __init__(self):
super(HasFake, self).__init__()
self.fake = Param(self, "fake", "fake param")
def getFake(self):
return self.getOrDefault(self.fake)
class MockTransformer(Transformer, HasFake):
def __init__(self):
super(MockTransformer, self).__init__()
self.dataset_index = None
def _transform(self, dataset):
self.dataset_index = dataset.index
dataset.index += 1
return dataset
class MockUnaryTransformer(UnaryTransformer, DefaultParamsReadable, DefaultParamsWritable):
shift = Param(Params._dummy(), "shift", "The amount by which to shift " +
"data in a DataFrame",
typeConverter=TypeConverters.toFloat)
def __init__(self, shiftVal=1):
super(MockUnaryTransformer, self).__init__()
self._setDefault(shift=1)
self._set(shift=shiftVal)
def getShift(self):
return self.getOrDefault(self.shift)
def setShift(self, shift):
self._set(shift=shift)
def createTransformFunc(self):
shiftVal = self.getShift()
return lambda x: x + shiftVal
def outputDataType(self):
return DoubleType()
def validateInputType(self, inputType):
if inputType != DoubleType():
raise TypeError("Bad input type: {}. ".format(inputType) +
"Requires Double.")
class MockEstimator(Estimator, HasFake):
def __init__(self):
super(MockEstimator, self).__init__()
self.dataset_index = None
def _fit(self, dataset):
self.dataset_index = dataset.index
model = MockModel()
self._copyValues(model)
return model
class MockModel(MockTransformer, Model, HasFake):
pass
class ParamTypeConversionTests(PySparkTestCase):
"""
Test that param type conversion happens.
"""
def test_int(self):
lr = LogisticRegression(maxIter=5.0)
self.assertEqual(lr.getMaxIter(), 5)
self.assertTrue(type(lr.getMaxIter()) == int)
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter="notAnInt"))
self.assertRaises(TypeError, lambda: LogisticRegression(maxIter=5.1))
def test_float(self):
lr = LogisticRegression(tol=1)
self.assertEqual(lr.getTol(), 1.0)
self.assertTrue(type(lr.getTol()) == float)
self.assertRaises(TypeError, lambda: LogisticRegression(tol="notAFloat"))
def test_vector(self):
ewp = ElementwiseProduct(scalingVec=[1, 3])
self.assertEqual(ewp.getScalingVec(), DenseVector([1.0, 3.0]))
ewp = ElementwiseProduct(scalingVec=np.array([1.2, 3.4]))
self.assertEqual(ewp.getScalingVec(), DenseVector([1.2, 3.4]))
self.assertRaises(TypeError, lambda: ElementwiseProduct(scalingVec=["a", "b"]))
def test_list(self):
l = [0, 1]
for lst_like in [l, np.array(l), DenseVector(l), SparseVector(len(l),
range(len(l)), l), pyarray.array('l', l), xrange(2), tuple(l)]:
converted = TypeConverters.toList(lst_like)
self.assertEqual(type(converted), list)
self.assertListEqual(converted, l)
def test_list_int(self):
for indices in [[1.0, 2.0], np.array([1.0, 2.0]), DenseVector([1.0, 2.0]),
SparseVector(2, {0: 1.0, 1: 2.0}), xrange(1, 3), (1.0, 2.0),
pyarray.array('d', [1.0, 2.0])]:
vs = VectorSlicer(indices=indices)
self.assertListEqual(vs.getIndices(), [1, 2])
self.assertTrue(all([type(v) == int for v in vs.getIndices()]))
self.assertRaises(TypeError, lambda: VectorSlicer(indices=["a", "b"]))
def test_list_float(self):
b = Bucketizer(splits=[1, 4])
self.assertEqual(b.getSplits(), [1.0, 4.0])
self.assertTrue(all([type(v) == float for v in b.getSplits()]))
self.assertRaises(TypeError, lambda: Bucketizer(splits=["a", 1.0]))
def test_list_string(self):
for labels in [np.array(['a', u'b']), ['a', u'b'], np.array(['a', 'b'])]:
idx_to_string = IndexToString(labels=labels)
self.assertListEqual(idx_to_string.getLabels(), ['a', 'b'])
self.assertRaises(TypeError, lambda: IndexToString(labels=['a', 2]))
def test_string(self):
lr = LogisticRegression()
for col in ['features', u'features', np.str_('features')]:
lr.setFeaturesCol(col)
self.assertEqual(lr.getFeaturesCol(), 'features')
self.assertRaises(TypeError, lambda: LogisticRegression(featuresCol=2.3))
def test_bool(self):
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept=1))
self.assertRaises(TypeError, lambda: LogisticRegression(fitIntercept="false"))
class PipelineTests(PySparkTestCase):
def test_pipeline(self):
dataset = MockDataset()
estimator0 = MockEstimator()
transformer1 = MockTransformer()
estimator2 = MockEstimator()
transformer3 = MockTransformer()
pipeline = Pipeline(stages=[estimator0, transformer1, estimator2, transformer3])
pipeline_model = pipeline.fit(dataset, {estimator0.fake: 0, transformer1.fake: 1})
model0, transformer1, model2, transformer3 = pipeline_model.stages
self.assertEqual(0, model0.dataset_index)
self.assertEqual(0, model0.getFake())
self.assertEqual(1, transformer1.dataset_index)
self.assertEqual(1, transformer1.getFake())
self.assertEqual(2, dataset.index)
self.assertIsNone(model2.dataset_index, "The last model shouldn't be called in fit.")
self.assertIsNone(transformer3.dataset_index,
"The last transformer shouldn't be called in fit.")
dataset = pipeline_model.transform(dataset)
self.assertEqual(2, model0.dataset_index)
self.assertEqual(3, transformer1.dataset_index)
self.assertEqual(4, model2.dataset_index)
self.assertEqual(5, transformer3.dataset_index)
self.assertEqual(6, dataset.index)
def test_identity_pipeline(self):
dataset = MockDataset()
def doTransform(pipeline):
pipeline_model = pipeline.fit(dataset)
return pipeline_model.transform(dataset)
# check that empty pipeline did not perform any transformation
self.assertEqual(dataset.index, doTransform(Pipeline(stages=[])).index)
# check that failure to set stages param will raise KeyError for missing param
self.assertRaises(KeyError, lambda: doTransform(Pipeline()))
class TestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(TestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
class OtherTestParams(HasMaxIter, HasInputCol, HasSeed):
"""
A subclass of Params mixed with HasMaxIter, HasInputCol and HasSeed.
"""
@keyword_only
def __init__(self, seed=None):
super(OtherTestParams, self).__init__()
self._setDefault(maxIter=10)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, seed=None):
"""
setParams(self, seed=None)
Sets params for this test.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
class HasThrowableProperty(Params):
def __init__(self):
super(HasThrowableProperty, self).__init__()
self.p = Param(self, "none", "empty param")
@property
def test_property(self):
raise RuntimeError("Test property to raise error when invoked")
class ParamTests(PySparkTestCase):
def test_copy_new_parent(self):
testParams = TestParams()
# Copying an instantiated param should fail
with self.assertRaises(ValueError):
testParams.maxIter._copy_new_parent(testParams)
# Copying a dummy param should succeed
TestParams.maxIter._copy_new_parent(testParams)
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_param(self):
testParams = TestParams()
maxIter = testParams.maxIter
self.assertEqual(maxIter.name, "maxIter")
self.assertEqual(maxIter.doc, "max number of iterations (>= 0).")
self.assertTrue(maxIter.parent == testParams.uid)
def test_hasparam(self):
testParams = TestParams()
self.assertTrue(all([testParams.hasParam(p.name) for p in testParams.params]))
self.assertFalse(testParams.hasParam("notAParameter"))
self.assertTrue(testParams.hasParam(u"maxIter"))
def test_resolveparam(self):
testParams = TestParams()
self.assertEqual(testParams._resolveParam(testParams.maxIter), testParams.maxIter)
self.assertEqual(testParams._resolveParam("maxIter"), testParams.maxIter)
self.assertEqual(testParams._resolveParam(u"maxIter"), testParams.maxIter)
if sys.version_info[0] >= 3:
# In Python 3, it is allowed to get/set attributes with non-ascii characters.
e_cls = AttributeError
else:
e_cls = UnicodeEncodeError
self.assertRaises(e_cls, lambda: testParams._resolveParam(u"아"))
def test_params(self):
testParams = TestParams()
maxIter = testParams.maxIter
inputCol = testParams.inputCol
seed = testParams.seed
params = testParams.params
self.assertEqual(params, [inputCol, maxIter, seed])
self.assertTrue(testParams.hasParam(maxIter.name))
self.assertTrue(testParams.hasDefault(maxIter))
self.assertFalse(testParams.isSet(maxIter))
self.assertTrue(testParams.isDefined(maxIter))
self.assertEqual(testParams.getMaxIter(), 10)
testParams.setMaxIter(100)
self.assertTrue(testParams.isSet(maxIter))
self.assertEqual(testParams.getMaxIter(), 100)
self.assertTrue(testParams.hasParam(inputCol.name))
self.assertFalse(testParams.hasDefault(inputCol))
self.assertFalse(testParams.isSet(inputCol))
self.assertFalse(testParams.isDefined(inputCol))
with self.assertRaises(KeyError):
testParams.getInputCol()
otherParam = Param(Params._dummy(), "otherParam", "Parameter used to test that " +
"set raises an error for a non-member parameter.",
typeConverter=TypeConverters.toString)
with self.assertRaises(ValueError):
testParams.set(otherParam, "value")
# Since the default is normally random, set it to a known number for debug str
testParams._setDefault(seed=41)
testParams.setSeed(43)
self.assertEqual(
testParams.explainParams(),
"\n".join(["inputCol: input column name. (undefined)",
"maxIter: max number of iterations (>= 0). (default: 10, current: 100)",
"seed: random seed. (default: 41, current: 43)"]))
def test_kmeans_param(self):
algo = KMeans()
self.assertEqual(algo.getInitMode(), "k-means||")
algo.setK(10)
self.assertEqual(algo.getK(), 10)
algo.setInitSteps(10)
self.assertEqual(algo.getInitSteps(), 10)
def test_hasseed(self):
noSeedSpecd = TestParams()
withSeedSpecd = TestParams(seed=42)
other = OtherTestParams()
# Check that we no longer use 42 as the magic number
self.assertNotEqual(noSeedSpecd.getSeed(), 42)
origSeed = noSeedSpecd.getSeed()
# Check that we only compute the seed once
self.assertEqual(noSeedSpecd.getSeed(), origSeed)
# Check that a specified seed is honored
self.assertEqual(withSeedSpecd.getSeed(), 42)
# Check that a different class has a different seed
self.assertNotEqual(other.getSeed(), noSeedSpecd.getSeed())
def test_param_property_error(self):
param_store = HasThrowableProperty()
self.assertRaises(RuntimeError, lambda: param_store.test_property)
params = param_store.params # should not invoke the property 'test_property'
self.assertEqual(len(params), 1)
def test_word2vec_param(self):
model = Word2Vec().setWindowSize(6)
# Check windowSize is set properly
self.assertEqual(model.getWindowSize(), 6)
def test_copy_param_extras(self):
tp = TestParams(seed=42)
extra = {tp.getParam(TestParams.inputCol.name): "copy_input"}
tp_copy = tp.copy(extra=extra)
self.assertEqual(tp.uid, tp_copy.uid)
self.assertEqual(tp.params, tp_copy.params)
for k, v in extra.items():
self.assertTrue(tp_copy.isDefined(k))
self.assertEqual(tp_copy.getOrDefault(k), v)
copied_no_extra = {}
for k, v in tp_copy._paramMap.items():
if k not in extra:
copied_no_extra[k] = v
self.assertEqual(tp._paramMap, copied_no_extra)
self.assertEqual(tp._defaultParamMap, tp_copy._defaultParamMap)
def test_logistic_regression_check_thresholds(self):
self.assertIsInstance(
LogisticRegression(threshold=0.5, thresholds=[0.5, 0.5]),
LogisticRegression
)
self.assertRaisesRegexp(
ValueError,
"Logistic Regression getThreshold found inconsistent.*$",
LogisticRegression, threshold=0.42, thresholds=[0.5, 0.5]
)
@staticmethod
def check_params(test_self, py_stage, check_params_exist=True):
"""
Checks common requirements for Params.params:
- set of params exist in Java and Python and are ordered by names
- param parent has the same UID as the object's UID
- default param value from Java matches value in Python
- optionally check if all params from Java also exist in Python
"""
py_stage_str = "%s %s" % (type(py_stage), py_stage)
if not hasattr(py_stage, "_to_java"):
return
java_stage = py_stage._to_java()
if java_stage is None:
return
test_self.assertEqual(py_stage.uid, java_stage.uid(), msg=py_stage_str)
if check_params_exist:
param_names = [p.name for p in py_stage.params]
java_params = list(java_stage.params())
java_param_names = [jp.name() for jp in java_params]
test_self.assertEqual(
param_names, sorted(java_param_names),
"Param list in Python does not match Java for %s:\nJava = %s\nPython = %s"
% (py_stage_str, java_param_names, param_names))
for p in py_stage.params:
test_self.assertEqual(p.parent, py_stage.uid)
java_param = java_stage.getParam(p.name)
py_has_default = py_stage.hasDefault(p)
java_has_default = java_stage.hasDefault(java_param)
test_self.assertEqual(py_has_default, java_has_default,
"Default value mismatch of param %s for Params %s"
% (p.name, str(py_stage)))
if py_has_default:
if p.name == "seed":
continue # Random seeds between Spark and PySpark are different
java_default = _java2py(test_self.sc,
java_stage.clear(java_param).getOrDefault(java_param))
py_stage._clear(p)
py_default = py_stage.getOrDefault(p)
# equality test for NaN is always False
if isinstance(java_default, float) and np.isnan(java_default):
java_default = "NaN"
py_default = "NaN" if np.isnan(py_default) else "not NaN"
test_self.assertEqual(
java_default, py_default,
"Java default %s != python default %s of param %s for Params %s"
% (str(java_default), str(py_default), p.name, str(py_stage)))
class EvaluatorTests(SparkSessionTestCase):
def test_java_params(self):
"""
This tests a bug fixed by SPARK-18274 which causes multiple copies
of a Params instance in Python to be linked to the same Java instance.
"""
evaluator = RegressionEvaluator(metricName="r2")
df = self.spark.createDataFrame([Row(label=1.0, prediction=1.1)])
evaluator.evaluate(df)
self.assertEqual(evaluator._java_obj.getMetricName(), "r2")
evaluatorCopy = evaluator.copy({evaluator.metricName: "mae"})
evaluator.evaluate(df)
evaluatorCopy.evaluate(df)
self.assertEqual(evaluator._java_obj.getMetricName(), "r2")
self.assertEqual(evaluatorCopy._java_obj.getMetricName(), "mae")
class FeatureTests(SparkSessionTestCase):
def test_binarizer(self):
b0 = Binarizer()
self.assertListEqual(b0.params, [b0.inputCol, b0.outputCol, b0.threshold])
self.assertTrue(all([~b0.isSet(p) for p in b0.params]))
self.assertTrue(b0.hasDefault(b0.threshold))
self.assertEqual(b0.getThreshold(), 0.0)
b0.setParams(inputCol="input", outputCol="output").setThreshold(1.0)
self.assertTrue(all([b0.isSet(p) for p in b0.params]))
self.assertEqual(b0.getThreshold(), 1.0)
self.assertEqual(b0.getInputCol(), "input")
self.assertEqual(b0.getOutputCol(), "output")
b0c = b0.copy({b0.threshold: 2.0})
self.assertEqual(b0c.uid, b0.uid)
self.assertListEqual(b0c.params, b0.params)
self.assertEqual(b0c.getThreshold(), 2.0)
b1 = Binarizer(threshold=2.0, inputCol="input", outputCol="output")
self.assertNotEqual(b1.uid, b0.uid)
self.assertEqual(b1.getThreshold(), 2.0)
self.assertEqual(b1.getInputCol(), "input")
self.assertEqual(b1.getOutputCol(), "output")
def test_idf(self):
dataset = self.spark.createDataFrame([
(DenseVector([1.0, 2.0]),),
(DenseVector([0.0, 1.0]),),
(DenseVector([3.0, 0.2]),)], ["tf"])
idf0 = IDF(inputCol="tf")
self.assertListEqual(idf0.params, [idf0.inputCol, idf0.minDocFreq, idf0.outputCol])
idf0m = idf0.fit(dataset, {idf0.outputCol: "idf"})
self.assertEqual(idf0m.uid, idf0.uid,
"Model should inherit the UID from its parent estimator.")
output = idf0m.transform(dataset)
self.assertIsNotNone(output.head().idf)
# Test that parameters transferred to Python Model
ParamTests.check_params(self, idf0m)
def test_ngram(self):
dataset = self.spark.createDataFrame([
Row(input=["a", "b", "c", "d", "e"])])
ngram0 = NGram(n=4, inputCol="input", outputCol="output")
self.assertEqual(ngram0.getN(), 4)
self.assertEqual(ngram0.getInputCol(), "input")
self.assertEqual(ngram0.getOutputCol(), "output")
transformedDF = ngram0.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a b c d", "b c d e"])
def test_stopwordsremover(self):
dataset = self.spark.createDataFrame([Row(input=["a", "panda"])])
stopWordRemover = StopWordsRemover(inputCol="input", outputCol="output")
# Default
self.assertEqual(stopWordRemover.getInputCol(), "input")
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["panda"])
self.assertEqual(type(stopWordRemover.getStopWords()), list)
self.assertTrue(isinstance(stopWordRemover.getStopWords()[0], basestring))
# Custom
stopwords = ["panda"]
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getInputCol(), "input")
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, ["a"])
# with language selection
stopwords = StopWordsRemover.loadDefaultStopWords("turkish")
dataset = self.spark.createDataFrame([Row(input=["acaba", "ama", "biri"])])
stopWordRemover.setStopWords(stopwords)
self.assertEqual(stopWordRemover.getStopWords(), stopwords)
transformedDF = stopWordRemover.transform(dataset)
self.assertEqual(transformedDF.head().output, [])
def test_count_vectorizer_with_binary(self):
dataset = self.spark.createDataFrame([
(0, "a a a b b c".split(' '), SparseVector(3, {0: 1.0, 1: 1.0, 2: 1.0}),),
(1, "a a".split(' '), SparseVector(3, {0: 1.0}),),
(2, "a b".split(' '), SparseVector(3, {0: 1.0, 1: 1.0}),),
(3, "c".split(' '), SparseVector(3, {2: 1.0}),)], ["id", "words", "expected"])
cv = CountVectorizer(binary=True, inputCol="words", outputCol="features")
model = cv.fit(dataset)
transformedList = model.transform(dataset).select("features", "expected").collect()
for r in transformedList:
feature, expected = r
self.assertEqual(feature, expected)
def test_rformula_force_index_label(self):
df = self.spark.createDataFrame([
(1.0, 1.0, "a"),
(0.0, 2.0, "b"),
(1.0, 0.0, "a")], ["y", "x", "s"])
# Does not index label by default since it's numeric type.
rf = RFormula(formula="y ~ x + s")
model = rf.fit(df)
transformedDF = model.transform(df)
self.assertEqual(transformedDF.head().label, 1.0)
# Force to index label.
rf2 = RFormula(formula="y ~ x + s").setForceIndexLabel(True)
model2 = rf2.fit(df)
transformedDF2 = model2.transform(df)
self.assertEqual(transformedDF2.head().label, 0.0)
def test_rformula_string_indexer_order_type(self):
df = self.spark.createDataFrame([
(1.0, 1.0, "a"),
(0.0, 2.0, "b"),
(1.0, 0.0, "a")], ["y", "x", "s"])
rf = RFormula(formula="y ~ x + s", stringIndexerOrderType="alphabetDesc")
self.assertEqual(rf.getStringIndexerOrderType(), 'alphabetDesc')
transformedDF = rf.fit(df).transform(df)
observed = transformedDF.select("features").collect()
expected = [[1.0, 0.0], [2.0, 1.0], [0.0, 0.0]]
for i in range(0, len(expected)):
self.assertTrue(all(observed[i]["features"].toArray() == expected[i]))
def test_string_indexer_handle_invalid(self):
df = self.spark.createDataFrame([
(0, "a"),
(1, "d"),
(2, None)], ["id", "label"])
si1 = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="keep",
stringOrderType="alphabetAsc")
model1 = si1.fit(df)
td1 = model1.transform(df)
actual1 = td1.select("id", "indexed").collect()
expected1 = [Row(id=0, indexed=0.0), Row(id=1, indexed=1.0), Row(id=2, indexed=2.0)]
self.assertEqual(actual1, expected1)
si2 = si1.setHandleInvalid("skip")
model2 = si2.fit(df)
td2 = model2.transform(df)
actual2 = td2.select("id", "indexed").collect()
expected2 = [Row(id=0, indexed=0.0), Row(id=1, indexed=1.0)]
self.assertEqual(actual2, expected2)
class HasInducedError(Params):
def __init__(self):
super(HasInducedError, self).__init__()
self.inducedError = Param(self, "inducedError",
"Uniformly-distributed error added to feature")
def getInducedError(self):
return self.getOrDefault(self.inducedError)
class InducedErrorModel(Model, HasInducedError):
def __init__(self):
super(InducedErrorModel, self).__init__()
def _transform(self, dataset):
return dataset.withColumn("prediction",
dataset.feature + (rand(0) * self.getInducedError()))
class InducedErrorEstimator(Estimator, HasInducedError):
def __init__(self, inducedError=1.0):
super(InducedErrorEstimator, self).__init__()
self._set(inducedError=inducedError)
def _fit(self, dataset):
model = InducedErrorModel()
self._copyValues(model)
return model
class CrossValidatorTests(SparkSessionTestCase):
def test_copy(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvCopied = cv.copy()
self.assertEqual(cv.getEstimator().uid, cvCopied.getEstimator().uid)
cvModel = cv.fit(dataset)
cvModelCopied = cvModel.copy()
for index in range(len(cvModel.avgMetrics)):
self.assertTrue(abs(cvModel.avgMetrics[index] - cvModelCopied.avgMetrics[index])
< 0.0001)
def test_fit_minimize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
def test_fit_maximize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = (ParamGridBuilder()
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0])
.build())
cv = CrossValidator(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
bestModel = cvModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
def test_save_load_trained_model(self):
# This tests saving and loading the trained model only.
# Save/load for CrossValidator will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
lrModel = cvModel.bestModel
cvModelPath = temp_path + "/cvModel"
lrModel.save(cvModelPath)
loadedLrModel = LogisticRegressionModel.load(cvModelPath)
self.assertEqual(loadedLrModel.uid, lrModel.uid)
self.assertEqual(loadedLrModel.intercept, lrModel.intercept)
def test_save_load_simple_estimator(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
self.assertEqual(loadedCV.getEstimatorParamMaps(), cv.getEstimatorParamMaps())
# test save/load of CrossValidatorModel
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
def test_parallel_evaluation(self):
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [5, 6]).build()
evaluator = BinaryClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
cv.setParallelism(1)
cvSerialModel = cv.fit(dataset)
cv.setParallelism(2)
cvParallelModel = cv.fit(dataset)
self.assertEqual(cvSerialModel.avgMetrics, cvParallelModel.avgMetrics)
def test_save_load_nested_estimator(self):
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
ova = OneVsRest(classifier=LogisticRegression())
lr1 = LogisticRegression().setMaxIter(100)
lr2 = LogisticRegression().setMaxIter(150)
grid = ParamGridBuilder().addGrid(ova.classifier, [lr1, lr2]).build()
evaluator = MulticlassClassificationEvaluator()
# test save/load of CrossValidator
cv = CrossValidator(estimator=ova, estimatorParamMaps=grid, evaluator=evaluator)
cvModel = cv.fit(dataset)
cvPath = temp_path + "/cv"
cv.save(cvPath)
loadedCV = CrossValidator.load(cvPath)
self.assertEqual(loadedCV.getEstimator().uid, cv.getEstimator().uid)
self.assertEqual(loadedCV.getEvaluator().uid, cv.getEvaluator().uid)
originalParamMap = cv.getEstimatorParamMaps()
loadedParamMap = loadedCV.getEstimatorParamMaps()
for i, param in enumerate(loadedParamMap):
for p in param:
if p.name == "classifier":
self.assertEqual(param[p].uid, originalParamMap[i][p].uid)
else:
self.assertEqual(param[p], originalParamMap[i][p])
# test save/load of CrossValidatorModel
cvModelPath = temp_path + "/cvModel"
cvModel.save(cvModelPath)
loadedModel = CrossValidatorModel.load(cvModelPath)
self.assertEqual(loadedModel.bestModel.uid, cvModel.bestModel.uid)
class TrainValidationSplitTests(SparkSessionTestCase):
def test_fit_minimize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="rmse")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
validationMetrics = tvsModel.validationMetrics
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(0.0, bestModelMetric, "Best model has RMSE of 0")
self.assertEqual(len(grid), len(validationMetrics),
"validationMetrics has the same size of grid parameter")
self.assertEqual(0.0, min(validationMetrics))
def test_fit_maximize_metric(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
bestModel = tvsModel.bestModel
bestModelMetric = evaluator.evaluate(bestModel.transform(dataset))
validationMetrics = tvsModel.validationMetrics
self.assertEqual(0.0, bestModel.getOrDefault('inducedError'),
"Best model should have zero induced error")
self.assertEqual(1.0, bestModelMetric, "Best model has R-squared of 1")
self.assertEqual(len(grid), len(validationMetrics),
"validationMetrics has the same size of grid parameter")
self.assertEqual(1.0, max(validationMetrics))
def test_save_load_trained_model(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
lrModel = tvsModel.bestModel
tvsModelPath = temp_path + "/tvsModel"
lrModel.save(tvsModelPath)
loadedLrModel = LogisticRegressionModel.load(tvsModelPath)
self.assertEqual(loadedLrModel.uid, lrModel.uid)
self.assertEqual(loadedLrModel.intercept, lrModel.intercept)
def test_save_load_simple_estimator(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsPath = temp_path + "/tvs"
tvs.save(tvsPath)
loadedTvs = TrainValidationSplit.load(tvsPath)
self.assertEqual(loadedTvs.getEstimator().uid, tvs.getEstimator().uid)
self.assertEqual(loadedTvs.getEvaluator().uid, tvs.getEvaluator().uid)
self.assertEqual(loadedTvs.getEstimatorParamMaps(), tvs.getEstimatorParamMaps())
tvsModelPath = temp_path + "/tvsModel"
tvsModel.save(tvsModelPath)
loadedModel = TrainValidationSplitModel.load(tvsModelPath)
self.assertEqual(loadedModel.bestModel.uid, tvsModel.bestModel.uid)
def test_parallel_evaluation(self):
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
lr = LogisticRegression()
grid = ParamGridBuilder().addGrid(lr.maxIter, [5, 6]).build()
evaluator = BinaryClassificationEvaluator()
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator)
tvs.setParallelism(1)
tvsSerialModel = tvs.fit(dataset)
tvs.setParallelism(2)
tvsParallelModel = tvs.fit(dataset)
self.assertEqual(tvsSerialModel.validationMetrics, tvsParallelModel.validationMetrics)
def test_save_load_nested_estimator(self):
# This tests saving and loading the trained model only.
# Save/load for TrainValidationSplit will be added later: SPARK-13786
temp_path = tempfile.mkdtemp()
dataset = self.spark.createDataFrame(
[(Vectors.dense([0.0]), 0.0),
(Vectors.dense([0.4]), 1.0),
(Vectors.dense([0.5]), 0.0),
(Vectors.dense([0.6]), 1.0),
(Vectors.dense([1.0]), 1.0)] * 10,
["features", "label"])
ova = OneVsRest(classifier=LogisticRegression())
lr1 = LogisticRegression().setMaxIter(100)
lr2 = LogisticRegression().setMaxIter(150)
grid = ParamGridBuilder().addGrid(ova.classifier, [lr1, lr2]).build()
evaluator = MulticlassClassificationEvaluator()
tvs = TrainValidationSplit(estimator=ova, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsPath = temp_path + "/tvs"
tvs.save(tvsPath)
loadedTvs = TrainValidationSplit.load(tvsPath)
self.assertEqual(loadedTvs.getEstimator().uid, tvs.getEstimator().uid)
self.assertEqual(loadedTvs.getEvaluator().uid, tvs.getEvaluator().uid)
originalParamMap = tvs.getEstimatorParamMaps()
loadedParamMap = loadedTvs.getEstimatorParamMaps()
for i, param in enumerate(loadedParamMap):
for p in param:
if p.name == "classifier":
self.assertEqual(param[p].uid, originalParamMap[i][p].uid)
else:
self.assertEqual(param[p], originalParamMap[i][p])
tvsModelPath = temp_path + "/tvsModel"
tvsModel.save(tvsModelPath)
loadedModel = TrainValidationSplitModel.load(tvsModelPath)
self.assertEqual(loadedModel.bestModel.uid, tvsModel.bestModel.uid)
def test_copy(self):
dataset = self.spark.createDataFrame([
(10, 10.0),
(50, 50.0),
(100, 100.0),
(500, 500.0)] * 10,
["feature", "label"])
iee = InducedErrorEstimator()
evaluator = RegressionEvaluator(metricName="r2")
grid = ParamGridBuilder() \
.addGrid(iee.inducedError, [100.0, 0.0, 10000.0]) \
.build()
tvs = TrainValidationSplit(estimator=iee, estimatorParamMaps=grid, evaluator=evaluator)
tvsModel = tvs.fit(dataset)
tvsCopied = tvs.copy()
tvsModelCopied = tvsModel.copy()
self.assertEqual(tvs.getEstimator().uid, tvsCopied.getEstimator().uid,
"Copied TrainValidationSplit has the same uid of Estimator")
self.assertEqual(tvsModel.bestModel.uid, tvsModelCopied.bestModel.uid)
self.assertEqual(len(tvsModel.validationMetrics),
len(tvsModelCopied.validationMetrics),
"Copied validationMetrics has the same size of the original")
for index in range(len(tvsModel.validationMetrics)):
self.assertEqual(tvsModel.validationMetrics[index],
tvsModelCopied.validationMetrics[index])
class PersistenceTest(SparkSessionTestCase):
def test_linear_regression(self):
lr = LinearRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/lr"
lr.save(lr_path)
lr2 = LinearRegression.load(lr_path)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(type(lr.uid), type(lr2.uid))
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LinearRegression instance uid (%s) did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LinearRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_logistic_regression(self):
lr = LogisticRegression(maxIter=1)
path = tempfile.mkdtemp()
lr_path = path + "/logreg"
lr.save(lr_path)
lr2 = LogisticRegression.load(lr_path)
self.assertEqual(lr2.uid, lr2.maxIter.parent,
"Loaded LogisticRegression instance uid (%s) "
"did not match Param's uid (%s)"
% (lr2.uid, lr2.maxIter.parent))
self.assertEqual(lr._defaultParamMap[lr.maxIter], lr2._defaultParamMap[lr2.maxIter],
"Loaded LogisticRegression instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def _compare_params(self, m1, m2, param):
"""
Compare 2 ML Params instances for the given param, and assert both have the same param value
and parent. The param must be a parameter of m1.
"""
# Prevent key not found error in case of some param in neither paramMap nor defaultParamMap.
if m1.isDefined(param):
paramValue1 = m1.getOrDefault(param)
paramValue2 = m2.getOrDefault(m2.getParam(param.name))
if isinstance(paramValue1, Params):
self._compare_pipelines(paramValue1, paramValue2)
else:
self.assertEqual(paramValue1, paramValue2) # for general types param
# Assert parents are equal
self.assertEqual(param.parent, m2.getParam(param.name).parent)
else:
# If m1 is not defined param, then m2 should not, too. See SPARK-14931.
self.assertFalse(m2.isDefined(m2.getParam(param.name)))
def _compare_pipelines(self, m1, m2):
"""
Compare 2 ML types, asserting that they are equivalent.
This currently supports:
- basic types
- Pipeline, PipelineModel
- OneVsRest, OneVsRestModel
This checks:
- uid
- type
- Param values and parents
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
if isinstance(m1, JavaParams) or isinstance(m1, Transformer):
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
self._compare_params(m1, m2, p)
elif isinstance(m1, Pipeline):
self.assertEqual(len(m1.getStages()), len(m2.getStages()))
for s1, s2 in zip(m1.getStages(), m2.getStages()):
self._compare_pipelines(s1, s2)
elif isinstance(m1, PipelineModel):
self.assertEqual(len(m1.stages), len(m2.stages))
for s1, s2 in zip(m1.stages, m2.stages):
self._compare_pipelines(s1, s2)
elif isinstance(m1, OneVsRest) or isinstance(m1, OneVsRestModel):
for p in m1.params:
self._compare_params(m1, m2, p)
if isinstance(m1, OneVsRestModel):
self.assertEqual(len(m1.models), len(m2.models))
for x, y in zip(m1.models, m2.models):
self._compare_pipelines(x, y)
else:
raise RuntimeError("_compare_pipelines does not yet support type: %s" % type(m1))
def test_pipeline_persistence(self):
"""
Pipeline[HashingTF, PCA]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
pl = Pipeline(stages=[tf, pca])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_nested_pipeline_persistence(self):
"""
Pipeline[HashingTF, Pipeline[PCA]]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.createDataFrame([(["a", "b", "c"],), (["c", "d", "e"],)], ["words"])
tf = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
pca = PCA(k=2, inputCol="features", outputCol="pca_features")
p0 = Pipeline(stages=[pca])
pl = Pipeline(stages=[tf, p0])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_python_transformer_pipeline_persistence(self):
"""
Pipeline[MockUnaryTransformer, Binarizer]
"""
temp_path = tempfile.mkdtemp()
try:
df = self.spark.range(0, 10).toDF('input')
tf = MockUnaryTransformer(shiftVal=2)\
.setInputCol("input").setOutputCol("shiftedInput")
tf2 = Binarizer(threshold=6, inputCol="shiftedInput", outputCol="binarized")
pl = Pipeline(stages=[tf, tf2])
model = pl.fit(df)
pipeline_path = temp_path + "/pipeline"
pl.save(pipeline_path)
loaded_pipeline = Pipeline.load(pipeline_path)
self._compare_pipelines(pl, loaded_pipeline)
model_path = temp_path + "/pipeline-model"
model.save(model_path)
loaded_model = PipelineModel.load(model_path)
self._compare_pipelines(model, loaded_model)
finally:
try:
rmtree(temp_path)
except OSError:
pass
def test_onevsrest(self):
temp_path = tempfile.mkdtemp()
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))] * 10,
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
model = ovr.fit(df)
ovrPath = temp_path + "/ovr"
ovr.save(ovrPath)
loadedOvr = OneVsRest.load(ovrPath)
self._compare_pipelines(ovr, loadedOvr)
modelPath = temp_path + "/ovrModel"
model.save(modelPath)
loadedModel = OneVsRestModel.load(modelPath)
self._compare_pipelines(model, loadedModel)
def test_decisiontree_classifier(self):
dt = DecisionTreeClassifier(maxDepth=1)
path = tempfile.mkdtemp()
dtc_path = path + "/dtc"
dt.save(dtc_path)
dt2 = DecisionTreeClassifier.load(dtc_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeClassifier instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeClassifier instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_decisiontree_regressor(self):
dt = DecisionTreeRegressor(maxDepth=1)
path = tempfile.mkdtemp()
dtr_path = path + "/dtr"
dt.save(dtr_path)
dt2 = DecisionTreeClassifier.load(dtr_path)
self.assertEqual(dt2.uid, dt2.maxDepth.parent,
"Loaded DecisionTreeRegressor instance uid (%s) "
"did not match Param's uid (%s)"
% (dt2.uid, dt2.maxDepth.parent))
self.assertEqual(dt._defaultParamMap[dt.maxDepth], dt2._defaultParamMap[dt2.maxDepth],
"Loaded DecisionTreeRegressor instance default params did not match " +
"original defaults")
try:
rmtree(path)
except OSError:
pass
def test_default_read_write(self):
temp_path = tempfile.mkdtemp()
lr = LogisticRegression()
lr.setMaxIter(50)
lr.setThreshold(.75)
writer = DefaultParamsWriter(lr)
savePath = temp_path + "/lr"
writer.save(savePath)
reader = DefaultParamsReadable.read()
lr2 = reader.load(savePath)
self.assertEqual(lr.uid, lr2.uid)
self.assertEqual(lr.extractParamMap(), lr2.extractParamMap())
# test overwrite
lr.setThreshold(.8)
writer.overwrite().save(savePath)
reader = DefaultParamsReadable.read()
lr3 = reader.load(savePath)
self.assertEqual(lr.uid, lr3.uid)
self.assertEqual(lr.extractParamMap(), lr3.extractParamMap())
class LDATest(SparkSessionTestCase):
def _compare(self, m1, m2):
"""
Temp method for comparing instances.
TODO: Replace with generic implementation once SPARK-14706 is merged.
"""
self.assertEqual(m1.uid, m2.uid)
self.assertEqual(type(m1), type(m2))
self.assertEqual(len(m1.params), len(m2.params))
for p in m1.params:
if m1.isDefined(p):
self.assertEqual(m1.getOrDefault(p), m2.getOrDefault(p))
self.assertEqual(p.parent, m2.getParam(p.name).parent)
if isinstance(m1, LDAModel):
self.assertEqual(m1.vocabSize(), m2.vocabSize())
self.assertEqual(m1.topicsMatrix(), m2.topicsMatrix())
def test_persistence(self):
# Test save/load for LDA, LocalLDAModel, DistributedLDAModel.
df = self.spark.createDataFrame([
[1, Vectors.dense([0.0, 1.0])],
[2, Vectors.sparse(2, {0: 1.0})],
], ["id", "features"])
# Fit model
lda = LDA(k=2, seed=1, optimizer="em")
distributedModel = lda.fit(df)
self.assertTrue(distributedModel.isDistributed())
localModel = distributedModel.toLocal()
self.assertFalse(localModel.isDistributed())
# Define paths
path = tempfile.mkdtemp()
lda_path = path + "/lda"
dist_model_path = path + "/distLDAModel"
local_model_path = path + "/localLDAModel"
# Test LDA
lda.save(lda_path)
lda2 = LDA.load(lda_path)
self._compare(lda, lda2)
# Test DistributedLDAModel
distributedModel.save(dist_model_path)
distributedModel2 = DistributedLDAModel.load(dist_model_path)
self._compare(distributedModel, distributedModel2)
# Test LocalLDAModel
localModel.save(local_model_path)
localModel2 = LocalLDAModel.load(local_model_path)
self._compare(localModel, localModel2)
# Clean up
try:
rmtree(path)
except OSError:
pass
class TrainingSummaryTest(SparkSessionTestCase):
def test_linear_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight",
fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertAlmostEqual(s.explainedVariance, 0.25, 2)
self.assertAlmostEqual(s.meanAbsoluteError, 0.0)
self.assertAlmostEqual(s.meanSquaredError, 0.0)
self.assertAlmostEqual(s.rootMeanSquaredError, 0.0)
self.assertAlmostEqual(s.r2, 1.0, 2)
self.assertTrue(isinstance(s.residuals, DataFrame))
self.assertEqual(s.numInstances, 2)
self.assertEqual(s.degreesOfFreedom, 1)
devResiduals = s.devianceResiduals
self.assertTrue(isinstance(devResiduals, list) and isinstance(devResiduals[0], float))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class LinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.explainedVariance, s.explainedVariance)
def test_glr_summary(self):
from pyspark.ml.linalg import Vectors
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
glr = GeneralizedLinearRegression(family="gaussian", link="identity", weightCol="weight",
fitIntercept=False)
model = glr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertEqual(s.numIterations, 1) # this should default to a single iteration of WLS
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.numInstances, 2)
self.assertTrue(isinstance(s.residuals(), DataFrame))
self.assertTrue(isinstance(s.residuals("pearson"), DataFrame))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
self.assertEqual(s.degreesOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedomNull, 2)
self.assertEqual(s.rank, 1)
self.assertTrue(isinstance(s.solver, basestring))
self.assertTrue(isinstance(s.aic, float))
self.assertTrue(isinstance(s.deviance, float))
self.assertTrue(isinstance(s.nullDeviance, float))
self.assertTrue(isinstance(s.dispersion, float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class GeneralizedLinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.deviance, s.deviance)
def test_binary_logistic_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
self.assertAlmostEqual(s.accuracy, 1.0, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_multiclass_logistic_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], [])),
(2.0, 2.0, Vectors.dense(2.0)),
(2.0, 2.0, Vectors.dense(1.9))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertAlmostEqual(s.accuracy, 0.75, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 0.75, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.25, 2)
self.assertAlmostEqual(s.weightedRecall, 0.75, 2)
self.assertAlmostEqual(s.weightedPrecision, 0.583, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 0.65, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 0.65, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.accuracy, s.accuracy)
def test_gaussian_mixture_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
gmm = GaussianMixture(k=2)
model = gmm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertTrue(isinstance(s.probability, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
def test_bisecting_kmeans_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
bkm = BisectingKMeans(k=2)
model = bkm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
def test_kmeans_summary(self):
data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
(Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
class OneVsRestTests(SparkSessionTestCase):
def test_copy(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr)
ovr1 = ovr.copy({lr.maxIter: 10})
self.assertEqual(ovr.getClassifier().getMaxIter(), 5)
self.assertEqual(ovr1.getClassifier().getMaxIter(), 10)
model = ovr.fit(df)
model1 = model.copy({model.predictionCol: "indexed"})
self.assertEqual(model1.getPredictionCol(), "indexed")
def test_output_columns(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr, parallelism=1)
model = ovr.fit(df)
output = model.transform(df)
self.assertEqual(output.columns, ["label", "features", "prediction"])
def test_parallelism_doesnt_change_output(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8)),
(1.0, Vectors.sparse(2, [], [])),
(2.0, Vectors.dense(0.5, 0.5))],
["label", "features"])
ovrPar1 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=1)
modelPar1 = ovrPar1.fit(df)
ovrPar2 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=.01), parallelism=2)
modelPar2 = ovrPar2.fit(df)
for i, model in enumerate(modelPar1.models):
self.assertTrue(np.allclose(model.coefficients.toArray(),
modelPar2.models[i].coefficients.toArray(), atol=1E-4))
self.assertTrue(np.allclose(model.intercept, modelPar2.models[i].intercept, atol=1E-4))
def test_support_for_weightCol(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense(1.0, 0.8), 1.0),
(1.0, Vectors.sparse(2, [], []), 1.0),
(2.0, Vectors.dense(0.5, 0.5), 1.0)],
["label", "features", "weight"])
# classifier inherits hasWeightCol
lr = LogisticRegression(maxIter=5, regParam=0.01)
ovr = OneVsRest(classifier=lr, weightCol="weight")
self.assertIsNotNone(ovr.fit(df))
# classifier doesn't inherit hasWeightCol
dt = DecisionTreeClassifier()
ovr2 = OneVsRest(classifier=dt, weightCol="weight")
self.assertIsNotNone(ovr2.fit(df))
class HashingTFTest(SparkSessionTestCase):
def test_apply_binary_term_freqs(self):
df = self.spark.createDataFrame([(0, ["a", "a", "b", "c", "c", "c"])], ["id", "words"])
n = 10
hashingTF = HashingTF()
hashingTF.setInputCol("words").setOutputCol("features").setNumFeatures(n).setBinary(True)
output = hashingTF.transform(df)
features = output.select("features").first().features.toArray()
expected = Vectors.dense([1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).toArray()
for i in range(0, n):
self.assertAlmostEqual(features[i], expected[i], 14, "Error at " + str(i) +
": expected " + str(expected[i]) + ", got " + str(features[i]))
class GeneralizedLinearRegressionTest(SparkSessionTestCase):
def test_tweedie_distribution(self):
df = self.spark.createDataFrame(
[(1.0, Vectors.dense(0.0, 0.0)),
(1.0, Vectors.dense(1.0, 2.0)),
(2.0, Vectors.dense(0.0, 0.0)),
(2.0, Vectors.dense(1.0, 1.0)), ], ["label", "features"])
glr = GeneralizedLinearRegression(family="tweedie", variancePower=1.6)
model = glr.fit(df)
self.assertTrue(np.allclose(model.coefficients.toArray(), [-0.4645, 0.3402], atol=1E-4))
self.assertTrue(np.isclose(model.intercept, 0.7841, atol=1E-4))
model2 = glr.setLinkPower(-1.0).fit(df)
self.assertTrue(np.allclose(model2.coefficients.toArray(), [-0.6667, 0.5], atol=1E-4))
self.assertTrue(np.isclose(model2.intercept, 0.6667, atol=1E-4))
def test_offset(self):
df = self.spark.createDataFrame(
[(0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)),
(0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)),
(0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)),
(0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0))], ["label", "weight", "offset", "features"])
glr = GeneralizedLinearRegression(family="poisson", weightCol="weight", offsetCol="offset")
model = glr.fit(df)
self.assertTrue(np.allclose(model.coefficients.toArray(), [0.664647, -0.3192581],
atol=1E-4))
self.assertTrue(np.isclose(model.intercept, -1.561613, atol=1E-4))
class LogisticRegressionTest(SparkSessionTestCase):
def test_binomial_logistic_regression_with_bound(self):
df = self.spark.createDataFrame(
[(1.0, 1.0, Vectors.dense(0.0, 5.0)),
(0.0, 2.0, Vectors.dense(1.0, 2.0)),
(1.0, 3.0, Vectors.dense(2.0, 1.0)),
(0.0, 4.0, Vectors.dense(3.0, 3.0)), ], ["label", "weight", "features"])
lor = LogisticRegression(regParam=0.01, weightCol="weight",
lowerBoundsOnCoefficients=Matrices.dense(1, 2, [-1.0, -1.0]),
upperBoundsOnIntercepts=Vectors.dense(0.0))
model = lor.fit(df)
self.assertTrue(
np.allclose(model.coefficients.toArray(), [-0.2944, -0.0484], atol=1E-4))
self.assertTrue(np.isclose(model.intercept, 0.0, atol=1E-4))
def test_multinomial_logistic_regression_with_bound(self):
data_path = "data/mllib/sample_multiclass_classification_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
lor = LogisticRegression(regParam=0.01,
lowerBoundsOnCoefficients=Matrices.dense(3, 4, range(12)),
upperBoundsOnIntercepts=Vectors.dense(0.0, 0.0, 0.0))
model = lor.fit(df)
expected = [[4.593, 4.5516, 9.0099, 12.2904],
[1.0, 8.1093, 7.0, 10.0],
[3.041, 5.0, 8.0, 11.0]]
for i in range(0, len(expected)):
self.assertTrue(
np.allclose(model.coefficientMatrix.toArray()[i], expected[i], atol=1E-4))
self.assertTrue(
np.allclose(model.interceptVector.toArray(), [-0.9057, -1.1392, -0.0033], atol=1E-4))
class MultilayerPerceptronClassifierTest(SparkSessionTestCase):
def test_raw_and_probability_prediction(self):
data_path = "data/mllib/sample_multiclass_classification_data.txt"
df = self.spark.read.format("libsvm").load(data_path)
mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[4, 5, 4, 3],
blockSize=128, seed=123)
model = mlp.fit(df)
test = self.sc.parallelize([Row(features=Vectors.dense(0.1, 0.1, 0.25, 0.25))]).toDF()
result = model.transform(test).head()
expected_prediction = 2.0
expected_probability = [0.0, 0.0, 1.0]
expected_rawPrediction = [57.3955, -124.5462, 67.9943]
self.assertTrue(result.prediction, expected_prediction)
self.assertTrue(np.allclose(result.probability, expected_probability, atol=1E-4))
self.assertTrue(np.allclose(result.rawPrediction, expected_rawPrediction, atol=1E-4))
class FPGrowthTests(SparkSessionTestCase):
def setUp(self):
super(FPGrowthTests, self).setUp()
self.data = self.spark.createDataFrame(
[([1, 2], ), ([1, 2], ), ([1, 2, 3], ), ([1, 3], )],
["items"])
def test_association_rules(self):
fp = FPGrowth()
fpm = fp.fit(self.data)
expected_association_rules = self.spark.createDataFrame(
[([3], [1], 1.0), ([2], [1], 1.0)],
["antecedent", "consequent", "confidence"]
)
actual_association_rules = fpm.associationRules
self.assertEqual(actual_association_rules.subtract(expected_association_rules).count(), 0)
self.assertEqual(expected_association_rules.subtract(actual_association_rules).count(), 0)
def test_freq_itemsets(self):
fp = FPGrowth()
fpm = fp.fit(self.data)
expected_freq_itemsets = self.spark.createDataFrame(
[([1], 4), ([2], 3), ([2, 1], 3), ([3], 2), ([3, 1], 2)],
["items", "freq"]
)
actual_freq_itemsets = fpm.freqItemsets
self.assertEqual(actual_freq_itemsets.subtract(expected_freq_itemsets).count(), 0)
self.assertEqual(expected_freq_itemsets.subtract(actual_freq_itemsets).count(), 0)
def tearDown(self):
del self.data
class ALSTest(SparkSessionTestCase):
def test_storage_levels(self):
df = self.spark.createDataFrame(
[(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
["user", "item", "rating"])
als = ALS().setMaxIter(1).setRank(1)
# test default params
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als.getFinalStorageLevel(), "MEMORY_AND_DISK")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "MEMORY_AND_DISK")
# test non-default params
als.setIntermediateStorageLevel("MEMORY_ONLY_2")
als.setFinalStorageLevel("DISK_ONLY")
als.fit(df)
self.assertEqual(als.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als._java_obj.getIntermediateStorageLevel(), "MEMORY_ONLY_2")
self.assertEqual(als.getFinalStorageLevel(), "DISK_ONLY")
self.assertEqual(als._java_obj.getFinalStorageLevel(), "DISK_ONLY")
class DefaultValuesTests(PySparkTestCase):
"""
Test :py:class:`JavaParams` classes to see if their default Param values match
those in their Scala counterparts.
"""
def test_java_params(self):
import pyspark.ml.feature
import pyspark.ml.classification
import pyspark.ml.clustering
import pyspark.ml.pipeline
import pyspark.ml.recommendation
import pyspark.ml.regression
modules = [pyspark.ml.feature, pyspark.ml.classification, pyspark.ml.clustering,
pyspark.ml.pipeline, pyspark.ml.recommendation, pyspark.ml.regression]
for module in modules:
for name, cls in inspect.getmembers(module, inspect.isclass):
if not name.endswith('Model') and issubclass(cls, JavaParams)\
and not inspect.isabstract(cls):
# NOTE: disable check_params_exist until there is parity with Scala API
ParamTests.check_params(self, cls(), check_params_exist=False)
def _squared_distance(a, b):
if isinstance(a, Vector):
return a.squared_distance(b)
else:
return b.squared_distance(a)
class VectorTests(MLlibTestCase):
def _test_serialize(self, v):
self.assertEqual(v, ser.loads(ser.dumps(v)))
jvec = self.sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(bytearray(ser.dumps(v)))
nv = ser.loads(bytes(self.sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(jvec)))
self.assertEqual(v, nv)
vs = [v] * 100
jvecs = self.sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(bytearray(ser.dumps(vs)))
nvs = ser.loads(bytes(self.sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(jvecs)))
self.assertEqual(vs, nvs)
def test_serialize(self):
self._test_serialize(DenseVector(range(10)))
self._test_serialize(DenseVector(array([1., 2., 3., 4.])))
self._test_serialize(DenseVector(pyarray.array('d', range(10))))
self._test_serialize(SparseVector(4, {1: 1, 3: 2}))
self._test_serialize(SparseVector(3, {}))
self._test_serialize(DenseMatrix(2, 3, range(6)))
sm1 = SparseMatrix(
3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
self._test_serialize(sm1)
def test_dot(self):
sv = SparseVector(4, {1: 1, 3: 2})
dv = DenseVector(array([1., 2., 3., 4.]))
lst = DenseVector([1, 2, 3, 4])
mat = array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
arr = pyarray.array('d', [0, 1, 2, 3])
self.assertEqual(10.0, sv.dot(dv))
self.assertTrue(array_equal(array([3., 6., 9., 12.]), sv.dot(mat)))
self.assertEqual(30.0, dv.dot(dv))
self.assertTrue(array_equal(array([10., 20., 30., 40.]), dv.dot(mat)))
self.assertEqual(30.0, lst.dot(dv))
self.assertTrue(array_equal(array([10., 20., 30., 40.]), lst.dot(mat)))
self.assertEqual(7.0, sv.dot(arr))
def test_squared_distance(self):
sv = SparseVector(4, {1: 1, 3: 2})
dv = DenseVector(array([1., 2., 3., 4.]))
lst = DenseVector([4, 3, 2, 1])
lst1 = [4, 3, 2, 1]
arr = pyarray.array('d', [0, 2, 1, 3])
narr = array([0, 2, 1, 3])
self.assertEqual(15.0, _squared_distance(sv, dv))
self.assertEqual(25.0, _squared_distance(sv, lst))
self.assertEqual(20.0, _squared_distance(dv, lst))
self.assertEqual(15.0, _squared_distance(dv, sv))
self.assertEqual(25.0, _squared_distance(lst, sv))
self.assertEqual(20.0, _squared_distance(lst, dv))
self.assertEqual(0.0, _squared_distance(sv, sv))
self.assertEqual(0.0, _squared_distance(dv, dv))
self.assertEqual(0.0, _squared_distance(lst, lst))
self.assertEqual(25.0, _squared_distance(sv, lst1))
self.assertEqual(3.0, _squared_distance(sv, arr))
self.assertEqual(3.0, _squared_distance(sv, narr))
def test_hash(self):
v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
v4 = SparseVector(4, [(1, 1.0), (3, 2.5)])
self.assertEqual(hash(v1), hash(v2))
self.assertEqual(hash(v1), hash(v3))
self.assertEqual(hash(v2), hash(v3))
self.assertFalse(hash(v1) == hash(v4))
self.assertFalse(hash(v2) == hash(v4))
def test_eq(self):
v1 = DenseVector([0.0, 1.0, 0.0, 5.5])
v2 = SparseVector(4, [(1, 1.0), (3, 5.5)])
v3 = DenseVector([0.0, 1.0, 0.0, 5.5])
v4 = SparseVector(6, [(1, 1.0), (3, 5.5)])
v5 = DenseVector([0.0, 1.0, 0.0, 2.5])
v6 = SparseVector(4, [(1, 1.0), (3, 2.5)])
self.assertEqual(v1, v2)
self.assertEqual(v1, v3)
self.assertFalse(v2 == v4)
self.assertFalse(v1 == v5)
self.assertFalse(v1 == v6)
def test_equals(self):
indices = [1, 2, 4]
values = [1., 3., 2.]
self.assertTrue(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 1., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 3., 0., 2.]))
self.assertFalse(Vectors._equals(indices, values, list(range(5)), [0., 1., 3., 2., 2.]))
def test_conversion(self):
# numpy arrays should be automatically upcast to float64
# tests for fix of [SPARK-5089]
v = array([1, 2, 3, 4], dtype='float64')
dv = DenseVector(v)
self.assertTrue(dv.array.dtype == 'float64')
v = array([1, 2, 3, 4], dtype='float32')
dv = DenseVector(v)
self.assertTrue(dv.array.dtype == 'float64')
def test_sparse_vector_indexing(self):
sv = SparseVector(5, {1: 1, 3: 2})
self.assertEqual(sv[0], 0.)
self.assertEqual(sv[3], 2.)
self.assertEqual(sv[1], 1.)
self.assertEqual(sv[2], 0.)
self.assertEqual(sv[4], 0.)
self.assertEqual(sv[-1], 0.)
self.assertEqual(sv[-2], 2.)
self.assertEqual(sv[-3], 0.)
self.assertEqual(sv[-5], 0.)
for ind in [5, -6]:
self.assertRaises(IndexError, sv.__getitem__, ind)
for ind in [7.8, '1']:
self.assertRaises(TypeError, sv.__getitem__, ind)
zeros = SparseVector(4, {})
self.assertEqual(zeros[0], 0.0)
self.assertEqual(zeros[3], 0.0)
for ind in [4, -5]:
self.assertRaises(IndexError, zeros.__getitem__, ind)
empty = SparseVector(0, {})
for ind in [-1, 0, 1]:
self.assertRaises(IndexError, empty.__getitem__, ind)
def test_sparse_vector_iteration(self):
self.assertListEqual(list(SparseVector(3, [], [])), [0.0, 0.0, 0.0])
self.assertListEqual(list(SparseVector(5, [0, 3], [1.0, 2.0])), [1.0, 0.0, 0.0, 2.0, 0.0])
def test_matrix_indexing(self):
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
expected = [[0, 6], [1, 8], [4, 10]]
for i in range(3):
for j in range(2):
self.assertEqual(mat[i, j], expected[i][j])
for i, j in [(-1, 0), (4, 1), (3, 4)]:
self.assertRaises(IndexError, mat.__getitem__, (i, j))
def test_repr_dense_matrix(self):
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10])
self.assertTrue(
repr(mat),
'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)')
mat = DenseMatrix(3, 2, [0, 1, 4, 6, 8, 10], True)
self.assertTrue(
repr(mat),
'DenseMatrix(3, 2, [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], False)')
mat = DenseMatrix(6, 3, zeros(18))
self.assertTrue(
repr(mat),
'DenseMatrix(6, 3, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ..., \
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], False)')
def test_repr_sparse_matrix(self):
sm1t = SparseMatrix(
3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
isTransposed=True)
self.assertTrue(
repr(sm1t),
'SparseMatrix(3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0], True)')
indices = tile(arange(6), 3)
values = ones(18)
sm = SparseMatrix(6, 3, [0, 6, 12, 18], indices, values)
self.assertTrue(
repr(sm), "SparseMatrix(6, 3, [0, 6, 12, 18], \
[0, 1, 2, 3, 4, 5, 0, 1, ..., 4, 5, 0, 1, 2, 3, 4, 5], \
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, ..., \
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], False)")
self.assertTrue(
str(sm),
"6 X 3 CSCMatrix\n\
(0,0) 1.0\n(1,0) 1.0\n(2,0) 1.0\n(3,0) 1.0\n(4,0) 1.0\n(5,0) 1.0\n\
(0,1) 1.0\n(1,1) 1.0\n(2,1) 1.0\n(3,1) 1.0\n(4,1) 1.0\n(5,1) 1.0\n\
(0,2) 1.0\n(1,2) 1.0\n(2,2) 1.0\n(3,2) 1.0\n..\n..")
sm = SparseMatrix(1, 18, zeros(19), [], [])
self.assertTrue(
repr(sm),
'SparseMatrix(1, 18, \
[0, 0, 0, 0, 0, 0, 0, 0, ..., 0, 0, 0, 0, 0, 0, 0, 0], [], [], False)')
def test_sparse_matrix(self):
# Test sparse matrix creation.
sm1 = SparseMatrix(
3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0])
self.assertEqual(sm1.numRows, 3)
self.assertEqual(sm1.numCols, 4)
self.assertEqual(sm1.colPtrs.tolist(), [0, 2, 2, 4, 4])
self.assertEqual(sm1.rowIndices.tolist(), [1, 2, 1, 2])
self.assertEqual(sm1.values.tolist(), [1.0, 2.0, 4.0, 5.0])
self.assertTrue(
repr(sm1),
'SparseMatrix(3, 4, [0, 2, 2, 4, 4], [1, 2, 1, 2], [1.0, 2.0, 4.0, 5.0], False)')
# Test indexing
expected = [
[0, 0, 0, 0],
[1, 0, 4, 0],
[2, 0, 5, 0]]
for i in range(3):
for j in range(4):
self.assertEqual(expected[i][j], sm1[i, j])
self.assertTrue(array_equal(sm1.toArray(), expected))
for i, j in [(-1, 1), (4, 3), (3, 5)]:
self.assertRaises(IndexError, sm1.__getitem__, (i, j))
# Test conversion to dense and sparse.
smnew = sm1.toDense().toSparse()
self.assertEqual(sm1.numRows, smnew.numRows)
self.assertEqual(sm1.numCols, smnew.numCols)
self.assertTrue(array_equal(sm1.colPtrs, smnew.colPtrs))
self.assertTrue(array_equal(sm1.rowIndices, smnew.rowIndices))
self.assertTrue(array_equal(sm1.values, smnew.values))
sm1t = SparseMatrix(
3, 4, [0, 2, 3, 5], [0, 1, 2, 0, 2], [3.0, 2.0, 4.0, 9.0, 8.0],
isTransposed=True)
self.assertEqual(sm1t.numRows, 3)
self.assertEqual(sm1t.numCols, 4)
self.assertEqual(sm1t.colPtrs.tolist(), [0, 2, 3, 5])
self.assertEqual(sm1t.rowIndices.tolist(), [0, 1, 2, 0, 2])
self.assertEqual(sm1t.values.tolist(), [3.0, 2.0, 4.0, 9.0, 8.0])
expected = [
[3, 2, 0, 0],
[0, 0, 4, 0],
[9, 0, 8, 0]]
for i in range(3):
for j in range(4):
self.assertEqual(expected[i][j], sm1t[i, j])
self.assertTrue(array_equal(sm1t.toArray(), expected))
def test_dense_matrix_is_transposed(self):
mat1 = DenseMatrix(3, 2, [0, 4, 1, 6, 3, 9], isTransposed=True)
mat = DenseMatrix(3, 2, [0, 1, 3, 4, 6, 9])
self.assertEqual(mat1, mat)
expected = [[0, 4], [1, 6], [3, 9]]
for i in range(3):
for j in range(2):
self.assertEqual(mat1[i, j], expected[i][j])
self.assertTrue(array_equal(mat1.toArray(), expected))
sm = mat1.toSparse()
self.assertTrue(array_equal(sm.rowIndices, [1, 2, 0, 1, 2]))
self.assertTrue(array_equal(sm.colPtrs, [0, 2, 5]))
self.assertTrue(array_equal(sm.values, [1, 3, 4, 6, 9]))
def test_norms(self):
a = DenseVector([0, 2, 3, -1])
self.assertAlmostEqual(a.norm(2), 3.742, 3)
self.assertTrue(a.norm(1), 6)
self.assertTrue(a.norm(inf), 3)
a = SparseVector(4, [0, 2], [3, -4])
self.assertAlmostEqual(a.norm(2), 5)
self.assertTrue(a.norm(1), 7)
self.assertTrue(a.norm(inf), 4)
tmp = SparseVector(4, [0, 2], [3, 0])
self.assertEqual(tmp.numNonzeros(), 1)
class VectorUDTTests(MLlibTestCase):
dv0 = DenseVector([])
dv1 = DenseVector([1.0, 2.0])
sv0 = SparseVector(2, [], [])
sv1 = SparseVector(2, [1], [2.0])
udt = VectorUDT()
def test_json_schema(self):
self.assertEqual(VectorUDT.fromJson(self.udt.jsonValue()), self.udt)
def test_serialization(self):
for v in [self.dv0, self.dv1, self.sv0, self.sv1]:
self.assertEqual(v, self.udt.deserialize(self.udt.serialize(v)))
def test_infer_schema(self):
rdd = self.sc.parallelize([Row(label=1.0, features=self.dv1),
Row(label=0.0, features=self.sv1)])
df = rdd.toDF()
schema = df.schema
field = [f for f in schema.fields if f.name == "features"][0]
self.assertEqual(field.dataType, self.udt)
vectors = df.rdd.map(lambda p: p.features).collect()
self.assertEqual(len(vectors), 2)
for v in vectors:
if isinstance(v, SparseVector):
self.assertEqual(v, self.sv1)
elif isinstance(v, DenseVector):
self.assertEqual(v, self.dv1)
else:
raise TypeError("expecting a vector but got %r of type %r" % (v, type(v)))
class MatrixUDTTests(MLlibTestCase):
dm1 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10])
dm2 = DenseMatrix(3, 2, [0, 1, 4, 5, 9, 10], isTransposed=True)
sm1 = SparseMatrix(1, 1, [0, 1], [0], [2.0])
sm2 = SparseMatrix(2, 1, [0, 0, 1], [0], [5.0], isTransposed=True)
udt = MatrixUDT()
def test_json_schema(self):
self.assertEqual(MatrixUDT.fromJson(self.udt.jsonValue()), self.udt)
def test_serialization(self):
for m in [self.dm1, self.dm2, self.sm1, self.sm2]:
self.assertEqual(m, self.udt.deserialize(self.udt.serialize(m)))
def test_infer_schema(self):
rdd = self.sc.parallelize([("dense", self.dm1), ("sparse", self.sm1)])
df = rdd.toDF()
schema = df.schema
self.assertTrue(schema.fields[1].dataType, self.udt)
matrices = df.rdd.map(lambda x: x._2).collect()
self.assertEqual(len(matrices), 2)
for m in matrices:
if isinstance(m, DenseMatrix):
self.assertTrue(m, self.dm1)
elif isinstance(m, SparseMatrix):
self.assertTrue(m, self.sm1)
else:
raise ValueError("Expected a matrix but got type %r" % type(m))
class WrapperTests(MLlibTestCase):
def test_new_java_array(self):
# test array of strings
str_list = ["a", "b", "c"]
java_class = self.sc._gateway.jvm.java.lang.String
java_array = JavaWrapper._new_java_array(str_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), str_list)
# test array of integers
int_list = [1, 2, 3]
java_class = self.sc._gateway.jvm.java.lang.Integer
java_array = JavaWrapper._new_java_array(int_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), int_list)
# test array of floats
float_list = [0.1, 0.2, 0.3]
java_class = self.sc._gateway.jvm.java.lang.Double
java_array = JavaWrapper._new_java_array(float_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), float_list)
# test array of bools
bool_list = [False, True, True]
java_class = self.sc._gateway.jvm.java.lang.Boolean
java_array = JavaWrapper._new_java_array(bool_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), bool_list)
# test array of Java DenseVectors
v1 = DenseVector([0.0, 1.0])
v2 = DenseVector([1.0, 0.0])
vec_java_list = [_py2java(self.sc, v1), _py2java(self.sc, v2)]
java_class = self.sc._gateway.jvm.org.apache.spark.ml.linalg.DenseVector
java_array = JavaWrapper._new_java_array(vec_java_list, java_class)
self.assertEqual(_java2py(self.sc, java_array), [v1, v2])
# test empty array
java_class = self.sc._gateway.jvm.java.lang.Integer
java_array = JavaWrapper._new_java_array([], java_class)
self.assertEqual(_java2py(self.sc, java_array), [])
class ChiSquareTestTests(SparkSessionTestCase):
def test_chisquaretest(self):
data = [[0, Vectors.dense([0, 1, 2])],
[1, Vectors.dense([1, 1, 1])],
[2, Vectors.dense([2, 1, 0])]]
df = self.spark.createDataFrame(data, ['label', 'feat'])
res = ChiSquareTest.test(df, 'feat', 'label')
# This line is hitting the collect bug described in #17218, commented for now.
# pValues = res.select("degreesOfFreedom").collect())
self.assertIsInstance(res, DataFrame)
fieldNames = set(field.name for field in res.schema.fields)
expectedFields = ["pValues", "degreesOfFreedom", "statistics"]
self.assertTrue(all(field in fieldNames for field in expectedFields))
class UnaryTransformerTests(SparkSessionTestCase):
def test_unary_transformer_validate_input_type(self):
shiftVal = 3
transformer = MockUnaryTransformer(shiftVal=shiftVal)\
.setInputCol("input").setOutputCol("output")
# should not raise any errors
transformer.validateInputType(DoubleType())
with self.assertRaises(TypeError):
# passing the wrong input type should raise an error
transformer.validateInputType(IntegerType())
def test_unary_transformer_transform(self):
shiftVal = 3
transformer = MockUnaryTransformer(shiftVal=shiftVal)\
.setInputCol("input").setOutputCol("output")
df = self.spark.range(0, 10).toDF('input')
df = df.withColumn("input", df.input.cast(dataType="double"))
transformed_df = transformer.transform(df)
results = transformed_df.select("input", "output").collect()
for res in results:
self.assertEqual(res.input + shiftVal, res.output)
if __name__ == "__main__":
from pyspark.ml.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
|
{
"content_hash": "d896e64e7e83418488ad088d121143d5",
"timestamp": "",
"source": "github",
"line_count": 2279,
"max_line_length": 100,
"avg_line_length": 42.70206230802984,
"alnum_prop": 0.5995910314638607,
"repo_name": "cin/spark",
"id": "2f1f3af957e4dbc0c5de7e4a22e6b5d41ecb9052",
"size": "98129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/ml/tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "33781"
},
{
"name": "Batchfile",
"bytes": "25219"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "23957"
},
{
"name": "HTML",
"bytes": "10056"
},
{
"name": "Java",
"bytes": "3135901"
},
{
"name": "JavaScript",
"bytes": "141001"
},
{
"name": "Makefile",
"bytes": "7774"
},
{
"name": "PLpgSQL",
"bytes": "8788"
},
{
"name": "PowerShell",
"bytes": "3756"
},
{
"name": "Python",
"bytes": "2399587"
},
{
"name": "R",
"bytes": "1087862"
},
{
"name": "Roff",
"bytes": "14774"
},
{
"name": "SQLPL",
"bytes": "6233"
},
{
"name": "Scala",
"bytes": "24132257"
},
{
"name": "Shell",
"bytes": "157325"
},
{
"name": "Thrift",
"bytes": "33605"
}
],
"symlink_target": ""
}
|
import logging
from programy.clients.clients import BotClient
class ConsoleBotClient(BotClient):
def __init__(self):
BotClient.__init__(self)
self.clientid = "Console"
def set_environment(self):
self.bot.brain.predicates.pairs.append(["env", "Console"])
def run(self):
if self.arguments.noloop is False:
logging.info("Entering conversation loop...")
running = True
self.display_response(self.bot.get_version_string)
self.display_response(self.bot.brain.post_process_response(self.bot, self.clientid, self.bot.initial_question))
while running is True:
try:
question = self.get_question()
response = self.bot.ask_question(self.clientid, question)
if response is None:
self.display_response(self.bot.default_response)
self.log_unknown_response(question)
else:
self.display_response(response)
self.log_response(question, response)
except KeyboardInterrupt:
running = False
self.display_response(self.bot.exit_response)
except Exception as excep:
logging.exception(excep)
logging.error("Oops something bad happened !")
self.display_response(self.bot.default_response)
self.log_unknown_response(question)
def get_question(self, input_func=input):
ask = "%s "%self.bot.prompt
return input_func(ask)
def display_response(self, response, output_func=print):
output_func(response)
if __name__ == '__main__':
def run():
print("Loading, please wait...")
console_app = ConsoleBotClient()
console_app.run()
run()
|
{
"content_hash": "ca522c086f1e6dda78708d3e8c6d6eeb",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 123,
"avg_line_length": 36.24528301886792,
"alnum_prop": 0.5642894325871942,
"repo_name": "dkamotsky/program-y",
"id": "56065a254fdf823849c4be4f2de426c1bb46186f",
"size": "1921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/programy/clients/console.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "937"
},
{
"name": "HTML",
"bytes": "1583"
},
{
"name": "Python",
"bytes": "1131157"
},
{
"name": "Shell",
"bytes": "3481"
}
],
"symlink_target": ""
}
|
import math
# This is a fairly naive implementation of the algorithm described in
# "Effective Computation of Biased Quantiles over Data Streams"
class Quantiles(object):
def __init__(self, f=None):
self.points = None
self.n = 0
self.f = f or biased_quantiles_f
def insert(self, val):
if self.points is None or val < self.points.val:
# less than first
self.points = _point(val, 1, 0, self.points)
return
prev = self.points
cur = self.points.next
r = 0
while cur and cur.next:
if val < cur.val:
break
r += cur.delta
prev = cur
cur = cur.next
else: # ran off the end
(cur or prev).next = _point(val, 1, 0, None)
return
new = _point(val, 1, max(int(self.f(r, self.n)) - 1, 0), cur)
prev.next = new
self.n += 1
if self.n % COMPRESS_INTERVAL == 0:
self.compress()
def compress(self):
pointlist = []
cur = self.points
r = [0]
while cur:
pointlist.append(cur)
r.append(r[-1] + cur.delta)
cur = cur.next
for i in range(len(pointlist) - 2, 0, -1):
if pointlist[i].delta + pointlist[i + 1].delta + pointlist[i + 1].width <= self.f(r[i], self.n):
# merge
pointlist[i].next = pointlist[i + 1].next
pointlist[i].val = pointlist[i + 1].val
pointlist[i].delta += pointlist[i + 1].delta
pointlist[i].width = pointlist[i + 1].width
def query(self, p):
cur_r = 0
cur_point = self.points
while cur_point:
cur_r += cur_point.delta
if (cur_r + cur_point.next.delta + cur_point.next.width >
self.n * p + self.f(self.n * p, self.n) / 2):
return cur_point, (cur_r + cur_point.delta, cur_r + cur_point.delta + cur_point.width)
cur_point = cur_point.next
def get_pointlist(self):
if not self.points:
return []
p = [self.points]
while p[-1].next:
p.append(p[-1].next)
return p
import math
class DistributedQuantiles(object):
def __init__(self, error):
self.size = math.ceil(1 / error)
self.inbuf = []
self.qs = []
self.n = 0
def insert(self, val):
self.inbuf.append(val)
if len(self.inbuf) >= self.size:
self.inbuf.sort()
i, j = 0, 0
new_qs = []
if self.inbuf[0] < self.qs[0][0]:
while self.inbuf[i] < self.qs[j][0]:
new_qs.append((self.inbuf[i], i, i))
while i < len(self.inbuf) and j < len(self.qs):
nxtmin = i + self.qs[j][1]
nxtmax = i + self.qs[j][2] - 1
if self.inbuf[i] < self.qs[j][0]:
nxtval = self.inbuf[i]
i += 1
elif self.qs[j] < self.inbuf[i]:
nxtval = self.qs[j]
j += 1
else:
val = self.inbuf[i]
if len(inbuf) > i + 1 and inbuf[i + 1] == val:
if len(self.qs) > j + 1 and self.qs[j + 1] == val:
new_qs.append((val, ))
# the paper does not cover this case so a supplemental
# proof is provided
pass
new_qs.append((nxtval, nxtmin, nxtmax))
self.qs = new_qs
self.prune()
def prune(self):
pass
'''
Supplemental proof of correctness of equal value element case.
(notation is kept as close as possible to the original paper
within the limitations of ASCII)
MOTIVATION:
The error bound as given in the paper is dependent that
when merging any Q' and Q'', the next-lowest y_s of Q'' and
next-highest y_t of Q'' to any x of Q' must be consecutive.
However, in the general case there may be many elements whose
value are equal. Consider
Q' = [...,-1,0,1,...]
Q'' = [...,-1,0,0,1,...]
For x of Q' = 0, y_s = -1 and y_t = 1.
y_s and y_t are not consecutive.
The merge operation is no longer correct, because inductively
rmax_Q''(y_s) - rmin_Q''(y_t) <= 3 eps n'' ; which is not <= 2 eps n''
We therefore define a new merge operation for elements of Q' and Q''
which are equal.
This merge operation will also inductively guarantee that only
two consecutive elements may share the same value.
INTUITION:
The intuitive explanation of the meaning of multiple elements
with the same value is that it represents an unbroken
sequence of observations with that same value. The first
element represents the rank of the beginning of this sequence.
The second element represents the rank of the end of this
sequence.
Because of this, there should never be more than two elements
with the same value. (If there is a third element, it can be
discarded keeping only the maximum and minimum.)
This also means that if there are two elements with the same
value, rmin may be set to rmax for the lower ranked element,
and rmax may be set to rmin for the higher ranked element.
Because every rank between the two elements of the same
value is known to also be of that value, the top-most rank
in the range of the first value and bottom-most rank in the
range of the second value are known to be the exact locations
of an element of the given value.
NEW MERGE OPERATION:
The proposed new merge operation considers 4 points at a time:
x_r, x_r+1, y_t, y_t+1
where x_r is the minimum un-merged element of Q'
x_r+1 is the next consecutive element of Q'
y_t is the minimum un-merged element of Q''
y_t+1 is the next consecutive element of Q''
in order to generate z_i of Q
all points whose value are equal are consumed in a single step
there are four cases:
for notation convenience,
define the ADD operation of x of Q' and y of Q''
be z of Q:
rmax_Q(z) = rmax_Q'(x) + rmax_Q''(y)
rmin_Q(z) = rmin_Q'(x) + rmin_Q''(y)
CASE I:
x_r != y_t:
apply the merge operation as in the original paper
CASE II:
x_r = y_t, and x_r != x_r+1, and y_t != y_t+1:
z_i = ADD(x_r, y_t)
move on to x_r+1, y_t+1
case III:
x_r = y_t = x_r+1 != y_t+1:
generate two points, z_i and z_i'
z_i = ADD(x_r, y_t)
z_i' = ADD(x_r+1, y_t)
apply SHRINK to the pair of points,
append the result (either one or two points) to Q,
move on to x_r+2 and y_t+1
case IV:
x_r = y_t = y_t+1 != x_r+1:
generate two points z_i and z_i'
z_i = ADD(x_r, y_t)
z_i' = ADD(x_r, y_t+1)
apply SHRINK to the pair of points,
append the result (either one or two points) to Q,
move on to x_r+1, y_t+2
case V:
x_r = y_t = x_r+1 = y_t+1:
this means that there are two runs of the same value
in each of the merging sequences.
generate two points z_i, z_i':
z_i = ADD(x_r, y_t)
z_i' = ADD(x_r+1, y_t+1)
append both to Q
move on to x_r+2, y_t+2
PROOF:
Let eps be the error bound.
Let Q' and Q'' be two quantile summaries being merged, each of
which inductively have error <= eps.
Let x be an element of Q' and y be an element of Q'' such that.
Let n' be the number of observations covered by Q' and
n'' be the number of observations covered by Q''.
The proposed merge operation is:
rmax_Q(z_i) = rmax_Q'(x) + rmax_Q''(y)
rmin_Q(z_i) = rmin_Q'(x) + rmin_Q''(y)
We wish to show that rmax_Q(z_i+1) - rmin_Q(z_i) <= 2 eps (n' + n'')
let r be the index of x in Q'
let s by the index of y in Q''
Several things must be proven about the new MERGE:
1- that z_i and z_i+1 are within the error bounds when z_i is the
result of CASE II, II, IV, or V, and z_i+1 is the result of CASE I
2- that z_i and z_i+1 are within the error bounds when z_i is the
result of CASE I, and z_i+1 is the result of CASE II, III, IV, or V
3- that z_i and z_i+1 are within the error bound
There are 3 cases:
case I:
case I: z_i+1 was merged using the new merge operation
case II: z_i+1 was merged using the merge operation defined in the paper
case IIA: z_i+1 came from x_r+1 of Q'
case IIAi: z_i+1 = z_i
case IIAii: z_i+1 > z_i
case IIB: z_i+1 came from y_s+1 of Q''
case IIBi: z_i+1 = z_i
case IIBii: z_i+1 > z_i
(Note, if x_r+1 = x_r and y_s+1 = y_s, this is back to case I)
case I:
rmax_Q(z_i+1) - rmin_Q(z_i) = rmax_Q'(x_r+1) + rmax_Q''(y_s+1)
- rmin_Q'(x_r) - rmin_Q''(y_s)
= rmax_Q'(x_r+1) - rmin_Q'(x_r)
+ rmax_Q''(y_s+1) - rmin_Q''(y_s)
by inductive property of Q' and Q''
<= 2 n' eps + 2 n'' eps
<= 2 eps (n' + n'')
case IIAi:
if x_r = x_r+1:
by consecutive element contraction lemma:
if x_r and x_r+1 overlap, combine them
and there is no z_i, only z_i+1
otherwise, rmin_S(x_r) = rmax_S(x_r)
and rmin_S(x_r+1) = rmax_S(x_r+1)
rmax_Q(z_i+1) - rmin_Q(z_i)
=
consecutive elment contraction lemma:
let s and s' be consecutive elements of summary S,
such that s = s'
CASE I:
if rmin_S(s') < rmax_S(s):
maximum rank of s' = rmax_S(s)
minimum rank of s = rmin_S(s')
[s and s' are now identical, discard one]
CASE II:
if rmin_S(s') >= rmax_S(s):
maximum rank of s' = rmin_S(s')
minimum rank of s = rmax_S(s)
let '[' represent rmin_S, and ']' represent rmax_S:
[ x_r ]
[ x_r+1 ]
let a = the value of x_r and x_r+1;
We know that somewhere between rmax_S(x_r) and rmin_S(x_r+1)
there is a sequence of consecutive 'a' values in the data
set. Therefore, there is at least one a in that interval.
-----------------------
Notation:
for readability, everything after a "_" should be read as a subscript
For example, z_i+1 should be read z sub(i+1), not z sub(i) + 1
x of Q, should be read "x element of Q"
Also, the original paper assumes that every element is unique.
Therefore, rmin(v) is a function that takes value v and returns
the minimum rank. Because we allow two elements to have the same
value, v must be considered to be a tuple. rmin(v) is selecting
one item from that tuple, rmax(v) is selecting another element.
This does not affect the correctness of any of the proofs from the
original paper.
'''
class _point(object):
def __init__(self, val, delta, width, next):
self.val, self.delta, self.width, self.next = val, delta, width, next
def __repr__(self):
return "_point(val={0}, delta={1}, width={2})".format(self.val, self.delta, self.width)
def biased_quantiles_f(r_i, n):
return 2 * ERROR_RATE * r_i
def targeted_quantiles_f(percentiles_errors):
def f(r_i, n):
bounds = []
for p, e in percentiles_errors:
if r_i < p * n:
bounds.append(2 * e * (n - r_i) / (1 - e))
else:
bounds.append(2 * e * r_i / p)
return min(bounds)
return f
ERROR_RATE = 0.001
COMPRESS_INTERVAL = 10 # int(1 / ERROR_RATE)
# val is the current (approximate) value
# delta is the difference between the lowest possible rank of the current
# point/value and the previous point
# width is the differencet between the lowest and highest possible rank
# of the current point/value
# this data structure ensures that new points can be inserted into
# the middle of the linked list
# performance of naive algorithm is very bad -- 300 - 700 microseconds
# (0.3 to 0.7 ms). this is about 20-40x slower than python piece-wise
# parabolic algorithm; ~300x slower than C piece-wise parabolic
def test(q=None):
import random, time
data = [random.normalvariate(1.0, 1.0) for i in range(int(1e4))]
q = q or Quantiles()
start = time.time()
for d in data:
q.insert(d)
print (time.time() - start) * 1e6 / len(data), "microseconds per point"
return q
# about 400 microseconds per point
def test_targeted():
TARGETS = ((0.25, 0.001), (0.5, 0.001), (0.75, 0.001), (0.9, 0.001), (0.95, 0.001), (0.99, 0.001))
f = targeted_quantiles_f(TARGETS)
return test(q=Quantiles(f))
if __name__ == "__main__":
print 'biased quantile condition'
test()
print 'targeted quantile condition'
test_targeted()
|
{
"content_hash": "acd23e28cf61e145716b208d3b8d4e5a",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 108,
"avg_line_length": 32.05343511450382,
"alnum_prop": 0.5780741446376121,
"repo_name": "doublereedkurt/faststat",
"id": "90a3f1a377a77d9f6a520d972519b591cf44bfd0",
"size": "12597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/biased_quantile_stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "31525"
},
{
"name": "Python",
"bytes": "42951"
}
],
"symlink_target": ""
}
|
import functools
import httplib as http
import itertools
import waffle
from operator import itemgetter
from dateutil.parser import parse as parse_date
from django.utils import timezone
from flask import request, redirect
import pytz
from framework.database import autoload
from framework.exceptions import HTTPError
from framework.status import push_status_message
from osf import features
from osf.utils.sanitize import strip_html
from osf.utils.permissions import ADMIN
from osf.utils.functional import rapply
from osf.models import NodeLog, RegistrationSchema, DraftRegistration, Sanction
from website.project.decorators import (
must_be_valid_project,
must_be_contributor_and_not_group_member,
must_have_permission,
)
from website import language, settings
from website.ember_osf_web.decorators import ember_flag_is_active
from website.prereg import utils as prereg_utils
from website.project import utils as project_utils
from website.project.metadata.schemas import METASCHEMA_ORDERING
from website.project.metadata.utils import serialize_meta_schema, serialize_draft_registration
from website.project.utils import serialize_node
autoload_draft = functools.partial(autoload, DraftRegistration, 'draft_id', 'draft')
def get_schema_or_fail(schema_name, schema_version):
try:
meta_schema = RegistrationSchema.objects.get(name=schema_name, schema_version=schema_version)
except RegistrationSchema.DoesNotExist:
raise HTTPError(http.NOT_FOUND, data=dict(
message_long='No RegistrationSchema record matching that query could be found'
))
return meta_schema
def must_be_branched_from_node(func):
@autoload_draft
@must_be_valid_project
@functools.wraps(func)
def wrapper(*args, **kwargs):
node = kwargs['node']
draft = kwargs['draft']
if draft.deleted:
raise HTTPError(http.GONE)
if not draft.branched_from._id == node._id:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Not a draft of this node',
'message_long': 'This draft registration is not created from the given node.'
}
)
return func(*args, **kwargs)
return wrapper
def validate_embargo_end_date(end_date_string, node):
"""
Our reviewers have a window of time in which to review a draft reg. submission.
If an embargo end_date that is within that window is at risk of causing
validation errors down the line if the draft is approved and registered.
The draft registration approval window is always greater than the time span
for disallowed embargo end dates.
:raises: HTTPError if end_date is less than the approval window or greater than the
max embargo end date
"""
end_date = parse_date(end_date_string, ignoretz=True).replace(tzinfo=pytz.utc)
today = timezone.now()
if (end_date - today) <= settings.DRAFT_REGISTRATION_APPROVAL_PERIOD:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid embargo end date',
'message_long': 'Embargo end date for this submission must be at least {0} days in the future.'.format(settings.DRAFT_REGISTRATION_APPROVAL_PERIOD)
})
elif not node._is_embargo_date_valid(end_date):
max_end_date = today + settings.DRAFT_REGISTRATION_APPROVAL_PERIOD
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid embargo end date',
'message_long': 'Embargo end date must on or before {0}.'.format(max_end_date.isoformat())
})
def validate_registration_choice(registration_choice):
if registration_choice not in ('embargo', 'immediate'):
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': "Invalid 'registrationChoice'",
'message_long': "Values for 'registrationChoice' must be either 'embargo' or 'immediate'."
}
)
def check_draft_state(draft):
registered_and_deleted = draft.registered_node and draft.registered_node.is_deleted
if draft.registered_node and not registered_and_deleted:
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft has already been registered',
'message_long': 'This draft has already been registered and cannot be modified.'
})
if draft.is_pending_review:
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft is pending review',
'message_long': 'This draft is pending review and cannot be modified.'
})
if draft.requires_approval and draft.is_approved and (not registered_and_deleted):
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft has already been approved',
'message_long': 'This draft has already been approved and cannot be modified.'
})
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
@must_be_branched_from_node
def submit_draft_for_review(auth, node, draft, *args, **kwargs):
"""Submit for approvals and/or notifications
:return: serialized registration
:rtype: dict
:raises: HTTPError if embargo end date is invalid
"""
if waffle.switch_is_active(features.OSF_PREREGISTRATION):
raise HTTPError(http.GONE, data={
'message_short': 'The Prereg Challenge has ended',
'message_long': 'The Prereg Challenge has ended. No new submissions are accepted at this time.'
})
json_data = request.get_json()
if 'data' not in json_data:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long='Payload must include "data".'))
data = json_data['data']
if 'attributes' not in data:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long='Payload must include "data/attributes".'))
attributes = data['attributes']
meta = {}
registration_choice = attributes['registration_choice']
validate_registration_choice(registration_choice)
if registration_choice == 'embargo':
# Initiate embargo
end_date_string = attributes['lift_embargo']
validate_embargo_end_date(end_date_string, node)
meta['embargo_end_date'] = end_date_string
meta['registration_choice'] = registration_choice
if draft.registered_node and not draft.registered_node.is_deleted:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long='This draft has already been registered, if you wish to '
'register it again or submit it for review please create '
'a new draft.'))
# Don't allow resubmission unless submission was rejected
if draft.approval and draft.approval.state != Sanction.REJECTED:
raise HTTPError(http.CONFLICT, data=dict(message_long='Cannot resubmit previously submitted draft.'))
draft.submit_for_review(
initiated_by=auth.user,
meta=meta,
save=True
)
if prereg_utils.get_prereg_schema() == draft.registration_schema:
node.add_log(
action=NodeLog.PREREG_REGISTRATION_INITIATED,
params={'node': node._primary_key},
auth=auth,
save=False
)
node.save()
push_status_message(language.AFTER_SUBMIT_FOR_REVIEW,
kind='info',
trust=False,
id='registration_submitted')
return {
'data': {
'links': {
'html': node.web_url_for('node_registrations', _guid=True)
}
},
'status': 'initiated',
}, http.ACCEPTED
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
@must_be_branched_from_node
def draft_before_register_page(auth, node, draft, *args, **kwargs):
"""Allow the user to select an embargo period and confirm registration
:return: serialized Node + DraftRegistration
:rtype: dict
"""
ret = serialize_node(node, auth, primary=True)
ret['draft'] = serialize_draft_registration(draft, auth)
return ret
@must_have_permission(ADMIN)
@must_be_branched_from_node
def get_draft_registration(auth, node, draft, *args, **kwargs):
"""Return a single draft registration
:return: serialized draft registration
:rtype: dict
"""
return serialize_draft_registration(draft, auth), http.OK
@must_have_permission(ADMIN)
@must_be_valid_project
def get_draft_registrations(auth, node, *args, **kwargs):
"""List draft registrations for a node
:return: serialized draft registrations
:rtype: dict
"""
#'updated': '2016-08-03T14:24:12Z'
count = request.args.get('count', 100)
drafts = itertools.islice(node.draft_registrations_active, 0, count)
serialized_drafts = [serialize_draft_registration(d, auth) for d in drafts]
sorted_serialized_drafts = sorted(serialized_drafts, key=itemgetter('updated'), reverse=True)
return {
'drafts': sorted_serialized_drafts
}, http.OK
@must_have_permission(ADMIN)
@must_be_valid_project
@must_be_contributor_and_not_group_member
@ember_flag_is_active(features.EMBER_CREATE_DRAFT_REGISTRATION)
def new_draft_registration(auth, node, *args, **kwargs):
"""Create a new draft registration for the node
:return: Redirect to the new draft's edit page
:rtype: flask.redirect
:raises: HTTPError
"""
if node.is_registration:
raise HTTPError(http.FORBIDDEN, data={
'message_short': "Can't create draft",
'message_long': 'Creating draft registrations on registered projects is not allowed.'
})
data = request.values
schema_name = data.get('schema_name')
if not schema_name:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Must specify a schema_name',
'message_long': 'Please specify a schema_name'
}
)
schema_version = data.get('schema_version', 2)
meta_schema = get_schema_or_fail(schema_name, int(schema_version))
draft = DraftRegistration.create_from_node(
node,
user=auth.user,
schema=meta_schema,
data={}
)
return redirect(node.web_url_for('edit_draft_registration_page', draft_id=draft._id, _guid=True))
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
@ember_flag_is_active(features.EMBER_EDIT_DRAFT_REGISTRATION)
@must_be_branched_from_node
def edit_draft_registration_page(auth, node, draft, **kwargs):
"""Draft registration editor
:return: serialized DraftRegistration
:rtype: dict
"""
check_draft_state(draft)
ret = project_utils.serialize_node(node, auth, primary=True)
ret['draft'] = serialize_draft_registration(draft, auth)
return ret
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
@must_be_branched_from_node
def update_draft_registration(auth, node, draft, *args, **kwargs):
"""Update an existing draft registration
:return: serialized draft registration
:rtype: dict
:raises: HTTPError
"""
check_draft_state(draft)
data = request.get_json()
schema_data = data.get('schema_data', {})
schema_data = rapply(schema_data, strip_html)
schema_name = data.get('schema_name')
schema_version = data.get('schema_version', 1)
if schema_name:
meta_schema = get_schema_or_fail(schema_name, schema_version)
existing_schema = draft.registration_schema
if (existing_schema.name, existing_schema.schema_version) != (meta_schema.name, meta_schema.schema_version):
draft.registration_schema = meta_schema
draft.update_metadata(schema_data)
draft.save()
return serialize_draft_registration(draft, auth), http.OK
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
@must_be_branched_from_node
def delete_draft_registration(auth, node, draft, *args, **kwargs):
"""Permanently delete a draft registration
:return: None
:rtype: NoneType
"""
if draft.registered_node and not draft.registered_node.is_deleted:
raise HTTPError(
http.FORBIDDEN,
data={
'message_short': 'Can\'t delete draft',
'message_long': 'This draft has already been registered and cannot be deleted.'
}
)
draft.deleted = timezone.now()
draft.save(update_fields=['deleted'])
return None, http.NO_CONTENT
def get_metaschemas(*args, **kwargs):
"""
List metaschemas with which a draft registration may be created. Only fetch the newest version for each schema.
:return: serialized metaschemas
:rtype: dict
"""
count = request.args.get('count', 100)
include = request.args.get('include', 'latest')
meta_schemas = RegistrationSchema.objects.filter(active=True)
if include == 'latest':
meta_schemas = RegistrationSchema.objects.get_latest_versions()
meta_schemas = sorted(meta_schemas, key=lambda x: METASCHEMA_ORDERING.index(x.name))
return {
'meta_schemas': [
serialize_meta_schema(ms) for ms in meta_schemas[:count]
]
}, http.OK
|
{
"content_hash": "63ace23db4e763d514ca5c6780df518d",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 159,
"avg_line_length": 37.20670391061452,
"alnum_prop": 0.6653903903903904,
"repo_name": "mattclark/osf.io",
"id": "06bae1bf0da354748eca54cf34658f404778ef7e",
"size": "13320",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/project/views/drafts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "8456"
},
{
"name": "HTML",
"bytes": "317371"
},
{
"name": "JavaScript",
"bytes": "1792241"
},
{
"name": "Mako",
"bytes": "654772"
},
{
"name": "Python",
"bytes": "10166997"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from . import managers
class RelatedType(models.Model):
title = models.CharField(max_length=100)
def __unicode__(self):
return self.title
class RelatedContent(models.Model):
related_type = models.ForeignKey(RelatedType)
order = models.IntegerField(default=0)
source_type = models.ForeignKey(ContentType, related_name="from")
source_id = models.PositiveIntegerField()
source_object = generic.GenericForeignKey('source_type', 'source_id')
destination_type = models.ForeignKey(ContentType, related_name="to")
destination_id = models.PositiveIntegerField()
destination_object = generic.GenericForeignKey('destination_type',
'destination_id')
objects = managers.RelatedContentManager()
class Meta:
ordering = ["order"]
def __unicode__(self):
return u"%s (%d): %s" % (self.related_type, self.order,
self.destination_object)
|
{
"content_hash": "b1e589ead830d7951278e80f4445c16e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 73,
"avg_line_length": 31.91176470588235,
"alnum_prop": 0.6940092165898617,
"repo_name": "armstrong/armstrong.apps.related_content",
"id": "aff0641b697977318a8a9a82ff72da16fb5cba95",
"size": "1085",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "armstrong/apps/related_content/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32907"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
}
|
from datetime import date
from sphinx_readable_theme import get_html_theme_path
from topology import __version__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'autoapi.sphinx',
'plantweb.directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Topology Framework'
author = 'Hewlett Packard Enterprise Development LP'
copyright = '2015-{}, {}'.format(date.today().year, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output --------------------------------------------------
html_theme = 'readable'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# -- Custom Options -----------------------------------------------------------
# Add style overrides
def setup(app):
app.add_stylesheet('styles/custom.css')
# autoapi configuration
autoapi_common_options = {
'output': 'reference',
'prune': True,
}
autoapi_modules = {
'topology': autoapi_common_options,
'topology_docker': autoapi_common_options,
'topology_connect': autoapi_common_options,
'topology_lib_ping': autoapi_common_options,
'topology_lib_ip': autoapi_common_options,
}
# Configure Graphviz
graphviz_output_format = 'svg'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None)
}
|
{
"content_hash": "8276377fdb710bd2817b89fb1cebb650",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 30.83941605839416,
"alnum_prop": 0.7053254437869823,
"repo_name": "HPENetworking/topology",
"id": "a626bdf65ffd7568d4b090b8f18a3d5bdc27b845",
"size": "4867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "205921"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.requests import RequestException
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability, clean_symbols
from flexget.utils.tools import parse_filesize
log = logging.getLogger('limetorrents')
class Limetorrents(object):
"""
Limetorrents search plugin.
"""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'category': {'type': 'string', 'enum': ['all', 'anime', 'applications', 'games', 'movies', 'music',
'tv', 'other'], 'default': 'all'},
'order_by': {'type': 'string', 'enum': ['date', 'seeds'], 'default': 'date'}
},
'additionalProperties': False
}
]
}
base_url = 'https://www.limetorrents.cc/'
errors = False
@plugin.internet(log)
def search(self, task, entry, config):
"""
Search for entries on Limetorrents
"""
if not isinstance(config, dict):
config = {'category': config}
order_by = ''
if isinstance(config.get('order_by'), str):
if config['order_by'] != 'date':
order_by = '{0}/1'.format(config['order_by'])
category = 'all'
if isinstance(config.get('category'), str):
category = '{0}'.format(config['category'])
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
# No special characters - use dashes instead of %20
cleaned_search_string = clean_symbols(search_string).replace(' ', '-')
query = 'search/{0}/{1}/{2}'.format(category, cleaned_search_string.encode('utf8'), order_by)
log.debug('Using search: %s; category: %s; ordering: %s', cleaned_search_string, category, order_by or 'default')
try:
page = task.requests.get(self.base_url + query)
log.debug('requesting: %s', page.url)
except RequestException as e:
log.error('Limetorrents request failed: %s', e)
continue
soup = get_soup(page.content)
if soup.find('a', attrs={'class': 'csprite_dl14'}) is not None:
for link in soup.findAll('a', attrs={'class': 'csprite_dl14'}):
row = link.find_parent('tr')
info_url = str(link.get('href'))
# Get the title from the URL as it's complete versus the actual Title text which gets cut off
title = str(link.next_sibling.get('href'))
title = title[:title.rfind('-torrent')].replace('-', ' ')
title = title[1:]
data = row.findAll('td', attrs={'class': 'tdnormal'})
size = str(data[1].text).replace(',', '')
seeds = int(row.find('td', attrs={'class': 'tdseed'}).text.replace(',', ''))
leeches = int(row.find('td', attrs={'class': 'tdleech'}).text.replace(',', ''))
size = parse_filesize(size)
e = Entry()
e['url'] = info_url
e['title'] = title
e['torrent_seeds'] = seeds
e['torrent_leeches'] = leeches
e['search_sort'] = torrent_availability(e['torrent_seeds'], e['torrent_leeches'])
e['content_size'] = size
entries.add(e)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(Limetorrents, 'limetorrents', interfaces=['search'], api_ver=2)
|
{
"content_hash": "a316bcbfc526b21f9fed71f42f29f8a6",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 125,
"avg_line_length": 37.2962962962963,
"alnum_prop": 0.5240814299900695,
"repo_name": "jawilson/Flexget",
"id": "e162352f0cca435caad169c39c82985a12efdb9a",
"size": "4028",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flexget/plugins/sites/limetorrents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "1988"
},
{
"name": "HTML",
"bytes": "79800"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3364620"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1576"
}
],
"symlink_target": ""
}
|
from django.db import models
import pickle
class RestModelBase(models.Model):
"A base model which includes the ability to have a schema free extension field called rest"
rest = models.TextField(verbose_name="JSON data.", default=pickle.dumps({}, 0),
help_text="Additional fields without fixed schema")
class Meta:
abstract = True
def getRest(self):
return pickle.loads(self.rest)
def setRest(self, d):
self.rest = pickle.dumps(d, 1)
def asJSON(self):
d = self.getRest()
d.update(self.__dict__)
return repr(d)
def updateFromJSON(self, newData, replaceRest=False):
if replaceRest:
d = {}
else:
d = self.getRest()
for k, v in newData.items()
if k in hasattr(self, k):
setattr(self, k, v)
else:
d[k] = v
self.setRest(d)
class DataChannel(RestModelBase):
"Information about a single data source"
chanID = models.IntegerField(primary_key=True, unique=True, verbose_name="Channel ID",
help_text="Unique integer primary key for the channel, you can specify but it must be unique.")
name = models.CharField(max_length=255, verbose_name="Name of the data channel",
help_text="Human readable name of the data channel")
derived = models.BooleanField(help_text="Channel data is derived from other channels",
default=False)
source = models.CharField(max_length=255, verbose_name="Source code URL", blank=True,
help_text="Where to get source code for this channel's data generator")
class GeoZone(models.Model):
"""A geographic zone which has been calculated from specific GeoLocations"""
lat = models.FloatField(verbose_name="Latitude", help_text="Latitude of GeoZone average in degrees",
null=False, editable=True)
lon = models.FloatField(verbose_name="Longitude", help_text="Longitude of GeoZone average in degrees",
null=False, editable=True)
radius = models.FloatField(help_text="Radius of the zone in meters",
null = False, editable=True)
def __repr__(self):
return self.__dict__
class GeoLocation(models.Model):
"""Store information about the location of a data entry"""
lat = models.FloatField(verbose_name="Latitude", help_text="Latitude of data point in degrees",
editable=False, null=False)
lon = models.FloatField(verbose_name="Longitude", help_text="Longitude of data point in degrees",
editable=False, null=False)
zone = models.ForeignKey('GeoZone', blank=True, null=True, editable=True)
def __repr__(self):
return self.__dict__
class DataEntry(RestModelBase):
"""Table for storing context data, either raw or derrived.
This table is designed for flexibility so the schema is not complete, most of the channel data is stored as JSON."""
time = models.DateTimeField(auto_now_add=True, editable=False,
verbose_name="Timestamp for this data entry")
interval = models.TimeDeltaField(default=0, editable=False,
verbose_name="Time interval width for this data entry.")
geoloc = models.ForeignKey('GeoLocation', blank=True, null=True, editable=False,
verbose_name="Geo-location for this data entry.")
channel = models.ForeignKey('DataChannel', editable=False, verbose_name="Data channel")
|
{
"content_hash": "f5af1a1807d90949e5ccfd333c580a3b",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 116,
"avg_line_length": 46.794871794871796,
"alnum_prop": 0.6249315068493151,
"repo_name": "DanielCasner/Context-Server",
"id": "4e5d35677f26302700da0286d3980c578ce7e005",
"size": "3650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4698"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem051.py
#
# Prime digit replacements
# ========================
# Published on Friday, 29th August 2003, 06:00 pm
#
# By replacing the 1st digit of the 2-digit number *3, it turns out that six of
# the nine possible values: 13, 23, 43, 53, 73, and 83, are all prime. By
# replacing the 3rd and 4th digits of 56**3 with the same digit, this 5-digit
# number is the first example having seven primes among the ten generated
# numbers, yielding the family: 56003, 56113, 56333, 56443, 56663, 56773, and
# 56993. Consequently 56003, being the first member of this family, is the
# smallest prime with this property. Find the smallest prime which, by
# replacing part of the number (not necessarily adjacent digits) with the same
# digit, is part of an eight prime value family.
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
|
{
"content_hash": "84d1051ee26414562d72ebf40e06d574",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 38.291666666666664,
"alnum_prop": 0.7018498367791077,
"repo_name": "olduvaihand/ProjectEuler",
"id": "318438ec7acd4d940a3d47006f3871fcf8acb506",
"size": "921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/problem051.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "0"
},
{
"name": "Python",
"bytes": "422751"
}
],
"symlink_target": ""
}
|
import logging
from itertools import chain
from unittest import mock
from django.contrib.auth.models import Group, Permission
from django.contrib.messages import constants as message_constants
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.test import TestCase, override_settings
from django.urls import reverse
from wagtail.core.models import Page, PageRevision
from wagtail.core.signals import page_published
from wagtail.tests.testapp.models import SimplePage
from wagtail.tests.utils import WagtailTestUtils
from wagtail.users.models import UserProfile
class TestApproveRejectModeration(TestCase, WagtailTestUtils):
def setUp(self):
self.submitter = self.create_superuser(
username='submitter',
email='submitter@email.com',
password='password',
)
self.user = self.login()
# Create a page and submit it for moderation
root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug='hello-world',
content="hello",
live=False,
has_unpublished_changes=True,
)
root_page.add_child(instance=self.page)
self.page.save_revision(user=self.submitter, submitted_for_moderation=True)
self.revision = self.page.get_latest_revision()
def test_approve_moderation_view(self):
"""
This posts to the approve moderation view and checks that the page was approved
"""
# Connect a mock signal handler to page_published signal
mock_handler = mock.MagicMock()
page_published.connect(mock_handler)
# Post
response = self.client.post(reverse('wagtailadmin_pages:approve_moderation', args=(self.revision.id, )))
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
page = Page.objects.get(id=self.page.id)
# Page must be live
self.assertTrue(page.live, "Approving moderation failed to set live=True")
# Page should now have no unpublished changes
self.assertFalse(
page.has_unpublished_changes,
"Approving moderation failed to set has_unpublished_changes=False"
)
# Check that the page_published signal was fired
self.assertEqual(mock_handler.call_count, 1)
mock_call = mock_handler.mock_calls[0][2]
self.assertEqual(mock_call['sender'], self.page.specific_class)
self.assertEqual(mock_call['instance'], self.page)
self.assertIsInstance(mock_call['instance'], self.page.specific_class)
def test_approve_moderation_when_later_revision_exists(self):
self.page.title = "Goodbye world!"
self.page.save_revision(user=self.submitter, submitted_for_moderation=False)
response = self.client.post(reverse('wagtailadmin_pages:approve_moderation', args=(self.revision.id, )))
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
page = Page.objects.get(id=self.page.id)
# Page must be live
self.assertTrue(page.live, "Approving moderation failed to set live=True")
# Page content should be the submitted version, not the published one
self.assertEqual(page.title, "Hello world!")
# Page should still have unpublished changes
self.assertTrue(
page.has_unpublished_changes,
"has_unpublished_changes incorrectly cleared on approve_moderation when a later revision exists"
)
def test_approve_moderation_view_bad_revision_id(self):
"""
This tests that the approve moderation view handles invalid revision ids correctly
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages:approve_moderation', args=(12345, )))
# Check that the user received a 404 response
self.assertEqual(response.status_code, 404)
def test_approve_moderation_view_bad_permissions(self):
"""
This tests that the approve moderation view doesn't allow users without moderation permissions
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Post
response = self.client.post(reverse('wagtailadmin_pages:approve_moderation', args=(self.revision.id, )))
# Check that the user received a 302 redirected response
self.assertEqual(response.status_code, 302)
def test_reject_moderation_view(self):
"""
This posts to the reject moderation view and checks that the page was rejected
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages:reject_moderation', args=(self.revision.id, )))
# Check that the user was redirected to the dashboard
self.assertRedirects(response, reverse('wagtailadmin_home'))
# Page must not be live
self.assertFalse(Page.objects.get(id=self.page.id).live)
# Revision must no longer be submitted for moderation
self.assertFalse(PageRevision.objects.get(id=self.revision.id).submitted_for_moderation)
def test_reject_moderation_view_bad_revision_id(self):
"""
This tests that the reject moderation view handles invalid revision ids correctly
"""
# Post
response = self.client.post(reverse('wagtailadmin_pages:reject_moderation', args=(12345, )))
# Check that the user received a 404 response
self.assertEqual(response.status_code, 404)
def test_reject_moderation_view_bad_permissions(self):
"""
This tests that the reject moderation view doesn't allow users without moderation permissions
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Post
response = self.client.post(reverse('wagtailadmin_pages:reject_moderation', args=(self.revision.id, )))
# Check that the user received a 302 redirected response
self.assertEqual(response.status_code, 302)
def test_preview_for_moderation(self):
response = self.client.get(reverse('wagtailadmin_pages:preview_for_moderation', args=(self.revision.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'tests/simple_page.html')
self.assertContains(response, "Hello world!")
class TestNotificationPreferences(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Login
self.user = self.login()
# Create two moderator users for testing 'submitted' email
self.moderator = self.create_superuser('moderator', 'moderator@email.com', 'password')
self.moderator2 = self.create_superuser('moderator2', 'moderator2@email.com', 'password')
# Create a submitter for testing 'rejected' and 'approved' emails
self.submitter = self.create_user('submitter', 'submitter@email.com', 'password')
# User profiles for moderator2 and the submitter
self.moderator2_profile = UserProfile.get_for_user(self.moderator2)
self.submitter_profile = UserProfile.get_for_user(self.submitter)
# Create a page and submit it for moderation
self.child_page = SimplePage(
title="Hello world!",
slug='hello-world',
content="hello",
live=False,
)
self.root_page.add_child(instance=self.child_page)
# POST data to edit the page
self.post_data = {
'title': "I've been edited!",
'content': "Some content",
'slug': 'hello-world',
'action-submit': "Submit",
}
def submit(self):
return self.client.post(reverse('wagtailadmin_pages:edit', args=(self.child_page.id, )), self.post_data)
def silent_submit(self):
"""
Sets up the child_page as needing moderation, without making a request
"""
self.child_page.save_revision(user=self.submitter, submitted_for_moderation=True)
self.revision = self.child_page.get_latest_revision()
def approve(self):
return self.client.post(reverse('wagtailadmin_pages:approve_moderation', args=(self.revision.id, )))
def reject(self):
return self.client.post(reverse('wagtailadmin_pages:reject_moderation', args=(self.revision.id, )))
def test_vanilla_profile(self):
# Check that the vanilla profile has rejected notifications on
self.assertEqual(self.submitter_profile.rejected_notifications, True)
# Check that the vanilla profile has approved notifications on
self.assertEqual(self.submitter_profile.approved_notifications, True)
def test_approved_notifications(self):
# Set up the page version
self.silent_submit()
# Approve
self.approve()
# Submitter must receive an approved email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['submitter@email.com'])
self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been approved')
def test_approved_notifications_preferences_respected(self):
# Submitter doesn't want 'approved' emails
self.submitter_profile.approved_notifications = False
self.submitter_profile.save()
# Set up the page version
self.silent_submit()
# Approve
self.approve()
# No email to send
self.assertEqual(len(mail.outbox), 0)
def test_rejected_notifications(self):
# Set up the page version
self.silent_submit()
# Reject
self.reject()
# Submitter must receive a rejected email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['submitter@email.com'])
self.assertEqual(mail.outbox[0].subject, 'The page "Hello world!" has been rejected')
def test_rejected_notification_preferences_respected(self):
# Submitter doesn't want 'rejected' emails
self.submitter_profile.rejected_notifications = False
self.submitter_profile.save()
# Set up the page version
self.silent_submit()
# Reject
self.reject()
# No email to send
self.assertEqual(len(mail.outbox), 0)
@override_settings(WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS=False)
def test_disable_superuser_notification(self):
# Add one of the superusers to the moderator group
self.moderator.groups.add(Group.objects.get(name='Moderators'))
response = self.submit()
# Should be redirected to explorer page
self.assertEqual(response.status_code, 302)
# Check that the non-moderator superuser is not being notified
expected_emails = 1
self.assertEqual(len(mail.outbox), expected_emails)
# Use chain as the 'to' field is a list of recipients
email_to = list(chain.from_iterable([m.to for m in mail.outbox]))
self.assertIn(self.moderator.email, email_to)
self.assertNotIn(self.moderator2.email, email_to)
@mock.patch.object(EmailMultiAlternatives, 'send', side_effect=IOError('Server down'))
def test_email_send_error(self, mock_fn):
logging.disable(logging.CRITICAL)
# Approve
self.silent_submit()
response = self.approve()
logging.disable(logging.NOTSET)
# An email that fails to send should return a message rather than crash the page
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse('wagtailadmin_home'))
# There should be one "approved" message and one "failed to send notifications"
messages = list(response.context['messages'])
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].level, message_constants.SUCCESS)
self.assertEqual(messages[1].level, message_constants.ERROR)
def test_email_headers(self):
# Submit
self.submit()
msg_headers = set(mail.outbox[0].message().items())
headers = {('Auto-Submitted', 'auto-generated')}
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing the Auto-Submitted header.',)
|
{
"content_hash": "4fd77133927c834f19cf7d6f80839030",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 115,
"avg_line_length": 39.56346749226006,
"alnum_prop": 0.6638234603646608,
"repo_name": "kaedroho/wagtail",
"id": "becbc58660017821ed0ced240f47ad6fc7dd425d",
"size": "12779",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "wagtail/admin/tests/pages/test_moderation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3323"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "505436"
},
{
"name": "JavaScript",
"bytes": "279901"
},
{
"name": "Makefile",
"bytes": "977"
},
{
"name": "Python",
"bytes": "4671883"
},
{
"name": "SCSS",
"bytes": "201389"
},
{
"name": "Shell",
"bytes": "7662"
},
{
"name": "TypeScript",
"bytes": "30266"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import json
import logging
import warnings
from pip._vendor import six
from pip._vendor.six.moves import zip_longest
from pip._internal.basecommand import Command
from pip._internal.cmdoptions import index_group, make_option_group
from pip._internal.exceptions import CommandError
from pip._internal.index import PackageFinder
from pip._internal.utils.deprecation import RemovedInPip11Warning
from pip._internal.utils.misc import (
dist_is_editable, get_installed_distributions,
)
from pip._internal.utils.packaging import get_installer
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(
'--format',
action='store',
dest='list_format',
default="columns",
choices=('legacy', 'columns', 'freeze', 'json'),
help="Select the output format among: columns (default), freeze, "
"json, or legacy.",
)
cmd_opts.add_option(
'--not-required',
action='store_true',
dest='not_required',
help="List packages that are not dependencies of "
"installed packages.",
)
cmd_opts.add_option(
'--exclude-editable',
action='store_false',
dest='include_editable',
help='Exclude editable package from output.',
)
cmd_opts.add_option(
'--include-editable',
action='store_true',
dest='include_editable',
help='Include editable package from output.',
default=True,
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.list_format == "legacy":
warnings.warn(
"The legacy format has been deprecated and will be removed "
"in the future.",
RemovedInPip11Warning,
)
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
include_editables=options.include_editable,
)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
if options.not_required:
packages = self.get_not_required(packages, options)
self.output_package_listing(packages, options)
def get_outdated(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.parsed_version
]
def get_uptodate(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.parsed_version
]
def get_not_required(self, packages, options):
dep_keys = set()
for dist in packages:
dep_keys.update(requirement.key for requirement in dist.requires())
return {pkg for pkg in packages if pkg.key not in dep_keys}
def iter_packages_latest_infos(self, packages, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in packages:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
for dist in packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
if not all_candidates:
continue
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
dist.latest_filetype = typ
yield dist
def output_legacy(self, dist, options):
if options.verbose >= 1:
return '%s (%s, %s, %s)' % (
dist.project_name,
dist.version,
dist.location,
get_installer(dist),
)
elif dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
return '%s (%s)' % (dist.project_name, dist.version)
def output_legacy_latest(self, dist, options):
return '%s - Latest: %s [%s]' % (
self.output_legacy(dist, options),
dist.latest_version,
dist.latest_filetype,
)
def output_package_listing(self, packages, options):
packages = sorted(
packages,
key=lambda dist: dist.project_name.lower(),
)
if options.list_format == 'columns' and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == 'freeze':
for dist in packages:
if options.verbose >= 1:
logger.info("%s==%s (%s)", dist.project_name,
dist.version, dist.location)
else:
logger.info("%s==%s", dist.project_name, dist.version)
elif options.list_format == 'json':
logger.info(format_for_json(packages, options))
elif options.list_format == "legacy":
for dist in packages:
if options.outdated:
logger.info(self.output_legacy_latest(dist, options))
else:
logger.info(self.output_legacy(dist, options))
def output_package_listing_columns(self, data, header):
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
for val in pkg_strings:
logger.info(val)
def tabulate(vals):
# From pfmoore on GitHub:
# https://github.com/pypa/pip/issues/3651#issuecomment-216932564
assert len(vals) > 0
sizes = [0] * max(len(x) for x in vals)
for row in vals:
sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)]
result = []
for row in vals:
display = " ".join([str(c).ljust(s) if c is not None else ''
for s, c in zip_longest(sizes, row)])
result.append(display)
return result, sizes
def format_for_columns(pkgs, options):
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
running_outdated = options.outdated
# Adjust the header for the `pip list --outdated` case.
if running_outdated:
header = ["Package", "Version", "Latest", "Type"]
else:
header = ["Package", "Version"]
data = []
if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs):
header.append("Location")
if options.verbose >= 1:
header.append("Installer")
for proj in pkgs:
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.project_name, proj.version]
if running_outdated:
row.append(proj.latest_version)
row.append(proj.latest_filetype)
if options.verbose >= 1 or dist_is_editable(proj):
row.append(proj.location)
if options.verbose >= 1:
row.append(get_installer(proj))
data.append(row)
return data, header
def format_for_json(packages, options):
data = []
for dist in packages:
info = {
'name': dist.project_name,
'version': six.text_type(dist.version),
}
if options.verbose >= 1:
info['location'] = dist.location
info['installer'] = get_installer(dist)
if options.outdated:
info['latest_version'] = six.text_type(dist.latest_version)
info['latest_filetype'] = dist.latest_filetype
data.append(info)
return json.dumps(data)
|
{
"content_hash": "ad311edecff2e6d78deedb4e9cfdbf3a",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 79,
"avg_line_length": 33.8600583090379,
"alnum_prop": 0.5559669364560014,
"repo_name": "kushalbhola/MyStuff",
"id": "09f633f392d1489f65da21a7391539e42c4f1f21",
"size": "11614",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Practice/PythonApplication/env/Lib/site-packages/pip/_internal/commands/list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1330"
},
{
"name": "C#",
"bytes": "332967"
},
{
"name": "CSS",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "7539"
},
{
"name": "Java",
"bytes": "14860"
},
{
"name": "JavaScript",
"bytes": "9843"
},
{
"name": "Jupyter Notebook",
"bytes": "374013"
},
{
"name": "PowerShell",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "6511820"
},
{
"name": "Tcl",
"bytes": "24289"
},
{
"name": "TypeScript",
"bytes": "15697"
}
],
"symlink_target": ""
}
|
'''The script reads alignments in PSL format and prints out
unmapped sequences in FASTA format.
'''
import sys
from Bio import SeqIO
def parse_mapped(pslfile):
mapped = set()
for line in open(pslfile):
name = line.strip().split()[9]
mapped.add(name)
return mapped
def select_seq(mapped_seqs, fasta_file):
for record in SeqIO.parse(fasta_file, 'fasta'):
if record.id not in mapped_seqs:
print >> sys.stderr, record.id
yield record
if __name__=='__main__':
try:
pslfile = sys.argv[1]
fastafile = sys.argv[2]
except IndexError:
print >> sys.stderr, 'unmapped_seq.py <psl file> <fasta file>'
mapped_seqs = parse_mapped(pslfile)
print >> sys.stderr, 'Writing unmapped sequences ...'
SeqIO.write(select_seq(mapped_seqs, fastafile), sys.stdout, 'fasta')
|
{
"content_hash": "fd918468168e8083d1c0835c2e5855c0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 72,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.6325581395348837,
"repo_name": "likit/BioUtils",
"id": "8ccc043fb083396171f8c3a79fce7e19cf95a1ae",
"size": "860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unmapped_seq.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "55417"
},
{
"name": "Shell",
"bytes": "305"
}
],
"symlink_target": ""
}
|
from networktables.util import ChooserControl
autonomous_seconds = 15
def test_all_autonomous(control, fake_time, robot):
'''
This test runs all possible autonomous modes that can be selected
by the autonomous switcher.
This should work for most robots. If it doesn't work for yours,
and it's not a code issue with your robot, please file a bug on
github.
'''
class AutonomousTester:
def __init__(self):
self.initialized = False
self.init_time = None
self.chooser = None
self.state = 'auto'
self.currentChoice = None
self.until = None
def initialize_chooser(self, tm):
if self.chooser is None:
self.chooser = ChooserControl('Autonomous Mode')
self.choices = self.chooser.getChoices()
if len(self.choices) == 0:
return False
self.state = 'disabled'
self.currentChoice = -1
self.until = tm
self.init_time = tm
self.initialized = True
return True
def on_step(self, tm):
if not self.initialized:
if not self.initialize_chooser(tm):
assert tm < 10, "Robot didn't create a chooser within 10 seconds, probably an error"
return True
if self.state == 'auto':
if tm >= self.until:
self.until = tm + 1
self.state = 'disabled'
control.set_operator_control(enabled=False)
elif self.state == 'disabled':
if tm >= self.until:
control.set_autonomous()
self.state = 'auto'
self.until = tm + autonomous_seconds
self.currentChoice += 1
if self.currentChoice >= len(self.choices):
return False
self.chooser.setSelected(self.choices[self.currentChoice])
return True
controller = control.run_test(AutonomousTester)
# Make sure they ran for the correct amount of time
assert int(fake_time.get()) == int(len(controller.choices)*(autonomous_seconds+1) + controller.init_time)
|
{
"content_hash": "b52e7004f6ada760d7e5d649dbf50f5e",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 109,
"avg_line_length": 34.027027027027025,
"alnum_prop": 0.49682287529785546,
"repo_name": "Twinters007/robotpy-wpilib-utilities",
"id": "c7109459238d82ee1309ff81362c79b2ce288f2e",
"size": "2519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robotpy_ext/autonomous/selector_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "250056"
},
{
"name": "Shell",
"bytes": "957"
}
],
"symlink_target": ""
}
|
import json
import sys
import six
from keystone.common import dependency
from keystone import config
from keystone import exception
from keystone import token
from keystone.token import provider
from keystone import trust
from keystone.openstack.common import log
from keystone.openstack.common import timeutils
LOG = log.getLogger(__name__)
CONF = config.CONF
class V2TokenDataHelper(object):
"""Creates V2 token data."""
@classmethod
def format_token(cls, token_ref, roles_ref=None, catalog_ref=None):
user_ref = token_ref['user']
metadata_ref = token_ref['metadata']
if roles_ref is None:
roles_ref = []
expires = token_ref.get('expires', token.default_expire_time())
if expires is not None:
if not isinstance(expires, six.text_type):
expires = timeutils.isotime(expires)
o = {'access': {'token': {'id': token_ref['id'],
'expires': expires,
'issued_at': timeutils.strtime()
},
'user': {'id': user_ref['id'],
'name': user_ref['name'],
'username': user_ref['name'],
'roles': roles_ref,
'roles_links': metadata_ref.get('roles_links',
[])
}
}
}
if 'bind' in token_ref:
o['access']['token']['bind'] = token_ref['bind']
if 'tenant' in token_ref and token_ref['tenant']:
token_ref['tenant']['enabled'] = True
o['access']['token']['tenant'] = token_ref['tenant']
if catalog_ref is not None:
o['access']['serviceCatalog'] = V2TokenDataHelper.format_catalog(
catalog_ref)
if metadata_ref:
if 'is_admin' in metadata_ref:
o['access']['metadata'] = {'is_admin':
metadata_ref['is_admin']}
else:
o['access']['metadata'] = {'is_admin': 0}
if 'roles' in metadata_ref:
o['access']['metadata']['roles'] = metadata_ref['roles']
if CONF.trust.enabled and 'trust_id' in metadata_ref:
o['access']['trust'] = {'trustee_user_id':
metadata_ref['trustee_user_id'],
'id': metadata_ref['trust_id']
}
return o
@classmethod
def format_catalog(cls, catalog_ref):
"""Munge catalogs from internal to output format
Internal catalogs look like::
{$REGION: {
{$SERVICE: {
$key1: $value1,
...
}
}
}
The legacy api wants them to look like::
[{'name': $SERVICE[name],
'type': $SERVICE,
'endpoints': [{
'tenantId': $tenant_id,
...
'region': $REGION,
}],
'endpoints_links': [],
}]
"""
if not catalog_ref:
return []
services = {}
for region, region_ref in six.iteritems(catalog_ref):
for service, service_ref in six.iteritems(region_ref):
new_service_ref = services.get(service, {})
new_service_ref['name'] = service_ref.pop('name')
new_service_ref['type'] = service
new_service_ref['endpoints_links'] = []
service_ref['region'] = region
endpoints_ref = new_service_ref.get('endpoints', [])
endpoints_ref.append(service_ref)
new_service_ref['endpoints'] = endpoints_ref
services[service] = new_service_ref
return services.values()
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'trust_api')
class V3TokenDataHelper(object):
"""Token data helper."""
def __init__(self):
if CONF.trust.enabled:
self.trust_api = trust.Manager()
def _get_filtered_domain(self, domain_id):
domain_ref = self.identity_api.get_domain(domain_id)
return {'id': domain_ref['id'], 'name': domain_ref['name']}
def _get_filtered_project(self, project_id):
project_ref = self.assignment_api.get_project(project_id)
filtered_project = {
'id': project_ref['id'],
'name': project_ref['name']}
filtered_project['domain'] = self._get_filtered_domain(
project_ref['domain_id'])
return filtered_project
def _populate_scope(self, token_data, domain_id, project_id):
if 'domain' in token_data or 'project' in token_data:
# scope already exist, no need to populate it again
return
if domain_id:
token_data['domain'] = self._get_filtered_domain(domain_id)
if project_id:
token_data['project'] = self._get_filtered_project(project_id)
def _get_roles_for_user(self, user_id, domain_id, project_id):
roles = []
if domain_id:
roles = self.assignment_api.get_roles_for_user_and_domain(
user_id, domain_id)
if project_id:
roles = self.assignment_api.get_roles_for_user_and_project(
user_id, project_id)
return [self.assignment_api.get_role(role_id) for role_id in roles]
def _populate_user(self, token_data, user_id, trust):
if 'user' in token_data:
# no need to repopulate user if it already exists
return
user_ref = self.identity_api.get_user(user_id)
if CONF.trust.enabled and trust and 'OS-TRUST:trust' not in token_data:
trustor_user_ref = (self.identity_api.get_user(
trust['trustor_user_id']))
if not trustor_user_ref['enabled']:
raise exception.Forbidden(_('Trustor is disabled.'))
if trust['impersonation']:
user_ref = trustor_user_ref
token_data['OS-TRUST:trust'] = (
{
'id': trust['id'],
'trustor_user': {'id': trust['trustor_user_id']},
'trustee_user': {'id': trust['trustee_user_id']},
'impersonation': trust['impersonation']
})
filtered_user = {
'id': user_ref['id'],
'name': user_ref['name'],
'domain': self._get_filtered_domain(user_ref['domain_id'])}
token_data['user'] = filtered_user
def _populate_oauth_section(self, token_data, access_token):
if access_token:
access_token_id = access_token['id']
consumer_id = access_token['consumer_id']
token_data['OS-OAUTH1'] = ({'access_token_id': access_token_id,
'consumer_id': consumer_id})
def _populate_roles(self, token_data, user_id, domain_id, project_id,
trust, access_token):
if 'roles' in token_data:
# no need to repopulate roles
return
if access_token:
filtered_roles = []
authed_role_ids = json.loads(access_token['role_ids'])
all_roles = self.assignment_api.list_roles()
for role in all_roles:
for authed_role in authed_role_ids:
if authed_role == role['id']:
filtered_roles.append({'id': role['id'],
'name': role['name']})
token_data['roles'] = filtered_roles
return
if CONF.trust.enabled and trust:
token_user_id = trust['trustor_user_id']
token_project_id = trust['project_id']
#trusts do not support domains yet
token_domain_id = None
else:
token_user_id = user_id
token_project_id = project_id
token_domain_id = domain_id
if token_domain_id or token_project_id:
roles = self._get_roles_for_user(token_user_id,
token_domain_id,
token_project_id)
filtered_roles = []
if CONF.trust.enabled and trust:
for trust_role in trust['roles']:
match_roles = [x for x in roles
if x['id'] == trust_role['id']]
if match_roles:
filtered_roles.append(match_roles[0])
else:
raise exception.Forbidden(
_('Trustee has no delegated roles.'))
else:
for role in roles:
filtered_roles.append({'id': role['id'],
'name': role['name']})
# user has no project or domain roles, therefore access denied
if not filtered_roles:
if token_project_id:
msg = _('User %(user_id)s has no access '
'to project %(project_id)s') % {
'user_id': user_id,
'project_id': token_project_id}
else:
msg = _('User %(user_id)s has no access '
'to domain %(domain_id)s') % {
'user_id': user_id,
'domain_id': token_domain_id}
LOG.debug(msg)
raise exception.Unauthorized(msg)
token_data['roles'] = filtered_roles
def _populate_service_catalog(self, token_data, user_id,
domain_id, project_id, trust):
if 'catalog' in token_data:
# no need to repopulate service catalog
return
if CONF.trust.enabled and trust:
user_id = trust['trustor_user_id']
if project_id or domain_id:
try:
service_catalog = self.catalog_api.get_v3_catalog(
user_id, project_id)
except exception.NotImplemented:
service_catalog = {}
# TODO(gyee): v3 service catalog is not quite completed yet
# TODO(ayoung): Enforce Endpoints for trust
token_data['catalog'] = service_catalog
def _populate_token_dates(self, token_data, expires=None, trust=None):
if not expires:
expires = token.default_expire_time()
if not isinstance(expires, six.string_types):
expires = timeutils.isotime(expires, subsecond=True)
token_data['expires_at'] = expires
token_data['issued_at'] = timeutils.isotime(subsecond=True)
def get_token_data(self, user_id, method_names, extras,
domain_id=None, project_id=None, expires=None,
trust=None, token=None, include_catalog=True,
bind=None, access_token=None):
token_data = {'methods': method_names,
'extras': extras}
# We've probably already written these to the token
if token:
for x in ('roles', 'user', 'catalog', 'project', 'domain'):
if x in token:
token_data[x] = token[x]
if CONF.trust.enabled and trust:
if user_id != trust['trustee_user_id']:
raise exception.Forbidden(_('User is not a trustee.'))
if bind:
token_data['bind'] = bind
self._populate_scope(token_data, domain_id, project_id)
self._populate_user(token_data, user_id, trust)
self._populate_roles(token_data, user_id, domain_id, project_id, trust,
access_token)
if include_catalog:
self._populate_service_catalog(token_data, user_id, domain_id,
project_id, trust)
self._populate_token_dates(token_data, expires=expires, trust=trust)
self._populate_oauth_section(token_data, access_token)
return {'token': token_data}
@dependency.optional('oauth_api')
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'token_api', 'trust_api')
class BaseProvider(provider.Provider):
def __init__(self, *args, **kwargs):
super(BaseProvider, self).__init__(*args, **kwargs)
if CONF.trust.enabled:
self.trust_api = trust.Manager()
self.v3_token_data_helper = V3TokenDataHelper()
self.v2_token_data_helper = V2TokenDataHelper()
def get_token_version(self, token_data):
if token_data and isinstance(token_data, dict):
if 'token_version' in token_data:
if token_data['token_version'] in token.provider.VERSIONS:
return token_data['token_version']
# FIXME(morganfainberg): deprecate the following logic in future
# revisions. It is better to just specify the token_version in
# the token_data itself. This way we can support future versions
# that might have the same fields.
if 'access' in token_data:
return token.provider.V2
if 'token' in token_data and 'methods' in token_data['token']:
return token.provider.V3
raise token.provider.UnsupportedTokenVersionException()
def issue_v2_token(self, token_ref, roles_ref=None,
catalog_ref=None):
token_data = self.v2_token_data_helper.format_token(
token_ref, roles_ref, catalog_ref)
token_id = self._get_token_id(token_data)
token_data['access']['token']['id'] = token_id
try:
expiry = token_data['access']['token']['expires']
if isinstance(expiry, six.string_types):
expiry = timeutils.normalize_time(
timeutils.parse_isotime(expiry))
data = dict(key=token_id,
id=token_id,
expires=expiry,
user=token_ref['user'],
tenant=token_ref['tenant'],
metadata=token_ref['metadata'],
token_data=token_data,
bind=token_ref.get('bind'),
trust_id=token_ref['metadata'].get('trust_id'),
token_version=token.provider.V2)
self.token_api.create_token(token_id, data)
except Exception:
exc_info = sys.exc_info()
# an identical token may have been created already.
# if so, return the token_data as it is also identical
try:
self.token_api.get_token(token_id)
except exception.TokenNotFound:
raise exc_info[0], exc_info[1], exc_info[2]
return (token_id, token_data)
def issue_v3_token(self, user_id, method_names, expires_at=None,
project_id=None, domain_id=None, auth_context=None,
trust=None, metadata_ref=None, include_catalog=True):
# for V2, trust is stashed in metadata_ref
if (CONF.trust.enabled and not trust and metadata_ref and
'trust_id' in metadata_ref):
trust = self.trust_api.get_trust(metadata_ref['trust_id'])
access_token = None
if 'oauth1' in method_names:
if self.oauth_api:
access_token_id = auth_context['access_token_id']
access_token = self.oauth_api.get_access_token(access_token_id)
else:
raise exception.Forbidden(_('Oauth is disabled.'))
token_data = self.v3_token_data_helper.get_token_data(
user_id,
method_names,
auth_context.get('extras') if auth_context else None,
domain_id=domain_id,
project_id=project_id,
expires=expires_at,
trust=trust,
bind=auth_context.get('bind') if auth_context else None,
include_catalog=include_catalog,
access_token=access_token)
token_id = self._get_token_id(token_data)
try:
expiry = token_data['token']['expires_at']
if isinstance(expiry, six.string_types):
expiry = timeutils.normalize_time(
timeutils.parse_isotime(expiry))
# FIXME(gyee): is there really a need to store roles in metadata?
role_ids = []
if metadata_ref is None:
metadata_ref = {}
if 'project' in token_data['token']:
# project-scoped token, fill in the v2 token data
# all we care are the role IDs
role_ids = [r['id'] for r in token_data['token']['roles']]
metadata_ref = {'roles': role_ids}
if trust:
metadata_ref.setdefault('trust_id', trust['id'])
metadata_ref.setdefault('trustee_user_id',
trust['trustee_user_id'])
data = dict(key=token_id,
id=token_id,
expires=expiry,
user=token_data['token']['user'],
tenant=token_data['token'].get('project'),
metadata=metadata_ref,
token_data=token_data,
trust_id=trust['id'] if trust else None,
token_version=token.provider.V3)
self.token_api.create_token(token_id, data)
except Exception:
exc_info = sys.exc_info()
# an identical token may have been created already.
# if so, return the token_data as it is also identical
try:
self.token_api.get_token(token_id)
except exception.TokenNotFound:
raise exc_info[0], exc_info[1], exc_info[2]
return (token_id, token_data)
def _verify_token(self, token_id):
"""Verify the given token and return the token_ref."""
token_ref = self.token_api.get_token(token_id)
return self._verify_token_ref(token_ref)
def _verify_token_ref(self, token_ref):
"""Verify and return the given token_ref."""
if not token_ref:
raise exception.Unauthorized()
return token_ref
def revoke_token(self, token_id):
self.token_api.delete_token(token_id=token_id)
def _assert_default_domain(self, token_ref):
"""Make sure we are operating on default domain only."""
if (token_ref.get('token_data') and
self.get_token_version(token_ref.get('token_data')) ==
token.provider.V3):
# this is a V3 token
msg = _('Non-default domain is not supported')
# user in a non-default is prohibited
if (token_ref['token_data']['token']['user']['domain']['id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
# domain scoping is prohibited
if token_ref['token_data']['token'].get('domain'):
raise exception.Unauthorized(
_('Domain scoped token is not supported'))
# project in non-default domain is prohibited
if token_ref['token_data']['token'].get('project'):
project = token_ref['token_data']['token']['project']
project_domain_id = project['domain']['id']
# scoped to project in non-default domain is prohibited
if project_domain_id != CONF.identity.default_domain_id:
raise exception.Unauthorized(msg)
# if token is scoped to trust, both trustor and trustee must
# be in the default domain. Furthermore, the delegated project
# must also be in the default domain
metadata_ref = token_ref['metadata']
if CONF.trust.enabled and 'trust_id' in metadata_ref:
trust_ref = self.trust_api.get_trust(metadata_ref['trust_id'])
trustee_user_ref = self.identity_api.get_user(
trust_ref['trustee_user_id'])
if (trustee_user_ref['domain_id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
trustor_user_ref = self.identity_api.get_user(
trust_ref['trustor_user_id'])
if (trustor_user_ref['domain_id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
project_ref = self.assignment_api.get_project(
trust_ref['project_id'])
if (project_ref['domain_id'] !=
CONF.identity.default_domain_id):
raise exception.Unauthorized(msg)
def validate_v2_token(self, token_id):
token_ref = self._verify_token(token_id)
return self._validate_v2_token_ref(token_ref)
def _validate_v2_token_ref(self, token_ref):
try:
self._assert_default_domain(token_ref)
# FIXME(gyee): performance or correctness? Should we return the
# cached token or reconstruct it? Obviously if we are going with
# the cached token, any role, project, or domain name changes
# will not be reflected. One may argue that with PKI tokens,
# we are essentially doing cached token validation anyway.
# Lets go with the cached token strategy. Since token
# management layer is now pluggable, one can always provide
# their own implementation to suit their needs.
token_data = token_ref.get('token_data')
if (not token_data or
self.get_token_version(token_data) !=
token.provider.V2):
# token is created by old v2 logic
metadata_ref = token_ref['metadata']
roles_ref = []
for role_id in metadata_ref.get('roles', []):
roles_ref.append(self.assignment_api.get_role(role_id))
# Get a service catalog if possible
# This is needed for on-behalf-of requests
catalog_ref = None
if token_ref.get('tenant'):
catalog_ref = self.catalog_api.get_catalog(
token_ref['user']['id'],
token_ref['tenant']['id'],
metadata_ref)
token_data = self.v2_token_data_helper.format_token(
token_ref, roles_ref, catalog_ref)
return token_data
except exception.ValidationError as e:
LOG.exception(_('Failed to validate token'))
raise exception.TokenNotFound(e)
def validate_v3_token(self, token_id):
try:
token_ref = self._verify_token(token_id)
token_data = self._validate_v3_token_ref(token_ref)
return token_data
except (exception.ValidationError,
exception.UserNotFound):
LOG.exception(_('Failed to validate token'))
def _validate_v3_token_ref(self, token_ref):
# FIXME(gyee): performance or correctness? Should we return the
# cached token or reconstruct it? Obviously if we are going with
# the cached token, any role, project, or domain name changes
# will not be reflected. One may argue that with PKI tokens,
# we are essentially doing cached token validation anyway.
# Lets go with the cached token strategy. Since token
# management layer is now pluggable, one can always provide
# their own implementation to suit their needs.
token_data = token_ref.get('token_data')
if not token_data or 'token' not in token_data:
# token ref is created by V2 API
project_id = None
project_ref = token_ref.get('tenant')
if project_ref:
project_id = project_ref['id']
token_data = self.v3_token_data_helper.get_token_data(
token_ref['user']['id'],
['password', 'token'],
{},
project_id=project_id,
bind=token_ref.get('bind'),
expires=token_ref['expires'])
return token_data
def validate_token(self, token_id):
token_ref = self._verify_token(token_id)
version = self.get_token_version(token_ref)
if version == token.provider.V3:
return self._validate_v3_token_ref(token_ref)
elif version == token.provider.V2:
return self._validate_v2_token_ref(token_ref)
raise token.provider.UnsupportedTokenVersionException()
|
{
"content_hash": "becbf4aa3313a6276d7c7d9a0b1e756a",
"timestamp": "",
"source": "github",
"line_count": 579,
"max_line_length": 79,
"avg_line_length": 43.59240069084629,
"alnum_prop": 0.5265055467511885,
"repo_name": "derekchiang/keystone",
"id": "48925ac6d03b261454c8a7aeca895d6e04b3771a",
"size": "25871",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/token/providers/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2833790"
},
{
"name": "Shell",
"bytes": "10512"
}
],
"symlink_target": ""
}
|
import json
import pycurl
import re
import sys
if sys.version_info >= (3,):
from StringIO import BytesIO
else:
from StringIO import StringIO
import occi
# for curl helper callback function
header = []
def get_header2(buff):
global header
header.append(buff)
def get_header3(buff):
global header
header.append(buff.decode('iso-8859-1'))
class Transport:
"""Transport base class. Curl is used, keystone authentication supported.
Available methods: delete(), get(), post(), put().
"""
reEncoding = re.compile(r';\s*charset=(\S+)')
reHeader = re.compile(r'([^:]*)\s*:\s*(.*)')
reStatus = re.compile(r'^HTTP')
def dprint(self, s):
if self.verbose:
print '[pOCCI.curl] %s' % s
def __init__(self, config):
self.auth = {}
self.authtype = config['authtype']
self.config = config
self.retry = False
self.verbose = False
if 'curlverbose' in config:
self.verbose = config['curlverbose']
if self.authtype == 'basic':
if 'user' not in config:
raise occi.TransportError('User and password is required for "basic" authentication')
elif self.authtype == 'x509':
if 'cert' not in config:
raise occi.TransportError('SSL certificate and key is required for "x509" authentication')
self.curl = pycurl.Curl()
curl = self.curl
curl.setopt(pycurl.VERBOSE, self.verbose)
if 'connectiontimeout' in config:
curl.setopt(pycurl.CONNECTTIMEOUT, config['connectiontimeout'])
if 'timeout' in config:
curl.setopt(pycurl.TIMEOUT, config['timeout'])
if 'capath' in config and config['capath']:
curl.setopt(pycurl.CAPATH, config['capath'])
if 'cachain' in config and config['cachain']:
curl.setopt(pycurl.CAINFO, config['cachain'])
# Disable check of SSL certificate
if 'ignoressl' in config and config['ignoressl']:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
self.dprint('authtype: %s' % self.authtype)
if self.authtype == 'basic':
user = self.config['user']
password = ''
if 'passwd' in self.config:
password = self.config['passwd']
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
curl.setopt(pycurl.USERPWD, '%s:%s' % (user, password))
self.dprint('user: %s' % user)
elif self.authtype == 'x509':
if 'cert' in config and config['cert']:
curl.setopt(pycurl.SSLCERT, config['cert'])
self.dprint('cert: %s' % config['cert'])
if 'key' in config and config['key']:
curl.setopt(pycurl.SSLKEY, config['key'])
self.dprint('key: %s' % config['key'])
if 'passphrase' in config and config['passphrase']:
curl.setopt(pycurl.SSLCERTPASSWD, config['passphrase'])
# HTTP header response
if sys.version_info >= (3,):
curl.setopt(pycurl.HEADERFUNCTION, get_header3)
else:
curl.setopt(pycurl.HEADERFUNCTION, get_header2)
def clean(self):
self.retry = False
curl = self.curl
curl.unsetopt(pycurl.CUSTOMREQUEST)
curl.setopt(pycurl.HTTPHEADER, [])
curl.setopt(pycurl.POST, False)
def perform(self):
global header
curl = self.curl
if sys.version_info >= (3,):
buffer = BytesIO()
else:
buffer = StringIO()
curl.setopt(pycurl.WRITEFUNCTION, buffer.write)
header = []
try:
curl.perform()
except pycurl.error as pe:
raise occi.TransportError(pe)
# 'Server: Apache/2.2.22 (Debian)\r\n'
h = {}
http_status = None
for item in header:
m = Transport.reHeader.match(item.rstrip())
if m and len(m.groups()) >= 2:
key = m.group(1)
value = m.group(2)
h[key.lower()] = value
else:
if Transport.reStatus.match(item):
http_status = item.rstrip()
content_type = None
if 'content-type' in h:
content_type = re.split(';', h['content-type'])[0]
body = buffer.getvalue()
buffer.close()
if sys.version_info >= (3,):
encoding = 'iso-8859-1'
if content_type:
match = Transport.reEncoding.search(h['content-type'])
if match:
encoding = match.group(1)
body = body.decode(encoding)
return [body, header, http_status, content_type, h]
def auth_keystone(self, url, tenants=True):
if not url.endswith('/'):
url += '/'
self.auth['url'] = url
self.dprint('Keystone URL: %s' % self.auth['url'])
version = 'v2.0'
url += version
if self.authtype == 'basic':
user = self.config['user']
password = ''
if 'passwd' in self.config:
password = self.config['passwd']
body = {
'auth': {
'passwordCredentials': {
'username': user,
'password': password,
},
},
}
elif self.authtype == 'x509':
body = {
'auth': {
'voms': True,
},
}
if 'keystone' in self.config:
body['auth']['tenantName'] = self.config['keystone']
curl = self.curl
self.clean()
self.retry = True
curl.setopt(pycurl.HTTPHEADER, ['Content-Type: application/json'])
curl.setopt(pycurl.URL, url + '/tokens')
curl.setopt(pycurl.POST, 1)
body = json.dumps(body, indent=4)
curl.setopt(pycurl.POSTFIELDS, body)
self.dprint('Keystone sending: %s' % body)
body, header_list, http_status, content_type, header = self.perform()
if self.verbose:
self.dprint('Keystone result: %s' % http_status)
#self.dprint(' headers: ' + str(header))
#self.dprint(' body: ' + body)
if re.match(r'200 OK', http_status) is not None:
raise occi.TransportError('Keystone failed: %s' % http_status)
keystone = json.loads(body)
if 'access' not in keystone or 'token' not in keystone['access'] or 'id' not in keystone['access']['token']:
raise occi.TransportError("Can't get keystone token from: %s" % body)
self.auth['token'] = keystone['access']['token']['id']
if tenants and 'tenants' not in self.auth:
# request tenants, if not already in the response
if 'tenant' not in keystone['access']['token']:
self.clean()
self.retry = True
curl.setopt(pycurl.HTTPHEADER, ['Content-Type: application/json', 'X-Auth-Token: %s' % self.auth['token']])
curl.setopt(pycurl.URL, url + '/tenants')
self.dprint('Keystone exploring tenants')
body, header_list, http_status, content_type, header = self.perform()
#self.dprint(' ==> body: %s' % body)
tenants = json.loads(body)
#self.dprint(' ==> json: %s' % tenants['tenants'])
self.auth['tenants'] = tenants['tenants']
else:
self.auth['tenants'] = [keystone['access']['token']['tenant']]
self.auth['tenants_list'] = []
for t in self.auth['tenants']:
if 'enabled' not in t or t['enabled']:
self.auth['tenants_list'] += [t['name']]
self.dprint(' ==> tenants: %s' % ','.join(self.auth['tenants_list']))
def request(self, url=None, mimetype=None, headers=[]):
if url is None:
url = self.config['url']
if not url.endswith('/'):
url += '/'
url += '-/'
if mimetype is None and 'mimetype' in self.config:
mimetype = self.config['mimetype']
curl = self.curl
curl.setopt(pycurl.URL, url)
# Keystone
if 'token' in self.auth:
headers = ['X-Auth-Token: %s' % self.auth['token']] + headers
# Set appropriate mime type
if mimetype:
headers = ['Accept: %s' % mimetype] + headers
else:
headers = ['Accept: */*'] + headers
# Set requested HTTP headers
if headers:
curl.setopt(pycurl.HTTPHEADER, headers)
body, header_list, http_status, content_type, header = self.perform()
self.dprint('First request status: %s' % http_status)
if re.match(r'HTTP/.* 401 .*', http_status) and 'www-authenticate' in header:
self.dprint('WWW-Authenticate extension detected')
m = re.match(r'Keystone uri=\'([^\']*)\'', header['www-authenticate'])
if m and m.group(1):
self.dprint('Keystone detected')
self.auth_keystone(url=m.group(1))
return [None, None, http_status, content_type]
else:
return [body.splitlines(), header_list, http_status, content_type]
def delete(self, url=None, mimetype=None, headers=[], body=None):
"""Send HTTP DELETE request
:param string base_url: OCCI server URL (default: from config)
:param string url: URL
:param string mimetype: accepted mimetype (empty string='\*/\*')
:param string headers[]: HTTP Headers
:return: [body, header, HTTP status, content type]
:rtype: [string[], string[], string, string]
"""
self.clean()
curl = self.curl
if body:
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.POSTFIELDS, body)
if self.verbose:
print "[pOCCI.curl] === DELETE ==="
print body
print "[pOCCI.curl] =============="
curl.setopt(pycurl.CUSTOMREQUEST, 'DELETE')
l = self.request(url=url, mimetype=mimetype, headers=headers)
if self.retry:
self.dprint('repeating the DELETE request...')
self.clean()
curl.setopt(pycurl.CUSTOMREQUEST, 'DELETE')
l = self.request(url=url, mimetype=mimetype, headers=headers)
return l
def get(self, url=None, mimetype=None, headers=[]):
"""Send HTTP GET request
:param string base_url: OCCI server URL (default: from config)
:param string url: URL
:param string mimetype: accepted mimetype (empty string='\*/\*')
:param string headers[]: HTTP Headers
:return: [body, header, HTTP status, content type]
:rtype: [string[], string[], string, string]
"""
self.clean()
l = self.request(url=url, mimetype=mimetype, headers=headers)
if self.retry:
self.dprint('repeating the GET request...')
self.clean()
l = self.request(url=url, mimetype=mimetype, headers=headers)
return l
def post(self, url=None, mimetype=None, headers=[], body='OK'):
"""Send HTTP POST request
:param string base_url: OCCI server URL (default: from config)
:param string url: URL
:param string mimetype: accepted mimetype (empty string='\*/\*')
:param string headers[]: HTTP Headers
:param string post: HTTP Body
:return: [body, header, HTTP status, content type]
:rtype: [string[], string[], string, string]
"""
self.clean()
curl = self.curl
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.POSTFIELDS, body)
if self.verbose:
print "[pOCCI.curl] ==== POST ===="
print body
print "[pOCCI.curl] =============="
l = self.request(url=url, mimetype=mimetype, headers=headers)
if self.retry:
self.dprint('repeating the POST request...')
self.clean()
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.POSTFIELDS, body)
l = self.request(url=url, mimetype=mimetype, headers=headers)
return l
def put(self, url=None, mimetype=None, headers=[], body='OK'):
"""Send HTTP POST request
:param string base_url: OCCI server URL (default: from config)
:param string url: URL
:param string mimetype: accepted mimetype (empty string='\*/\*')
:param string headers[]: HTTP Headers
:param string post: HTTP Body
:return: [body, header, HTTP status, content type]
:rtype: [string[], string[], string, string]
"""
self.clean()
curl = self.curl
curl.setopt(pycurl.CUSTOMREQUEST, 'PUT')
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.POSTFIELDS, body)
if self.verbose:
print "[pOCCI.curl] ==== PUT ===="
print body
print "[pOCCI.curl] ============="
l = self.request(url=url, mimetype=mimetype, headers=headers)
if self.retry:
self.dprint('repeating the PUT request...')
self.clean()
curl.setopt(pycurl.CUSTOMREQUEST, 'PUT')
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.POSTFIELDS, body)
l = self.request(url=url, mimetype=mimetype, headers=headers)
return l
|
{
"content_hash": "ad0404064e6223570872b575c0e33399",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 123,
"avg_line_length": 34.19799498746867,
"alnum_prop": 0.5419567607182117,
"repo_name": "CESNET/pOCCI",
"id": "1b9258ee2025c9450a0d65c53ce742de0f584333",
"size": "13645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pOCCI/transport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6785"
},
{
"name": "Python",
"bytes": "136411"
},
{
"name": "Roff",
"bytes": "4881"
},
{
"name": "Shell",
"bytes": "387"
}
],
"symlink_target": ""
}
|
from filter import FilterException
from common import PluginType
from yapsy.IPlugin import IPlugin
import logging
class CanonicalFilter(IPlugin):
category = PluginType.HEADER
id = "canonical"
def __init__(self):
self.__log = logging.getLogger(__name__)
self.__conf = None
def setConf(self, conf):
self.__conf = conf
def setJournal(self, journal):
pass
def filter(self, transaction, headers):
if 'Link' in headers:
parts = headers['Link'].split(';')
if parts[1].strip() == 'rel="canonical"':
canonical = parts[0][1:-1]
# Link: <http://example.com/page.html>; rel="canonical"
transaction.changePrimaryUri(canonical)
|
{
"content_hash": "0fde75c31c7e59310a8189e32460f2cd",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 71,
"avg_line_length": 27.035714285714285,
"alnum_prop": 0.5970937912813739,
"repo_name": "eghuro/crawlcheck",
"id": "a06e9562ff5fb8f682107b78e99a6914535fde5c",
"size": "757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/checker/plugin/headers/canonical.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113212"
}
],
"symlink_target": ""
}
|
import os
import sys
from pprint import pprint
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
# WARNING!
# This example measures the round-trip time by fetching the orderbook from an exchange
# In order to measure the speed of requests it disables the rate-limiting
# Disabling the rate-limiter is required to do an accurate measurement
# If you keep running without a rate limiter for a long time the exchange will ban you
# In a live production system always use either the built-in rate limiter or make your own
def main():
# the exchange instance has to be reused
# do not recreate the exchange before each call!
exchange = ccxt.binance({
# if you do not rate-limit your requests the exchange can ban you!
'enableRateLimit': False, # https://github.com/ccxt/ccxt/wiki/Manual#rate-limit
})
exchange.load_markets() # https://github.com/ccxt/ccxt/wiki/Manual#loading-markets
# exchange.verbose = True # uncomment for debugging purposes if needed
symbol = 'BTC/USDT'
results = []
num_iterations = 50
for i in range(0, num_iterations):
started = exchange.milliseconds()
orderbook = exchange.fetch_order_book(symbol)
ended = exchange.milliseconds()
elapsed = ended - started
print(elapsed, 'ms')
results.append(elapsed)
pprint(results)
rtt = int(sum(results) / len(results))
print('Successfully tested', num_iterations, 'calls, the average round-trip time per call is', rtt, 'milliseconds')
main()
|
{
"content_hash": "53e76d63d609502a03f38bc743348593",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 119,
"avg_line_length": 30.203703703703702,
"alnum_prop": 0.6971183323114654,
"repo_name": "ccxt/ccxt",
"id": "13d51adffc99574cb5d8fa6eedfa466c045e2b6d",
"size": "1656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/py/fetch-order-book-rtt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1724"
},
{
"name": "HTML",
"bytes": "246"
},
{
"name": "JavaScript",
"bytes": "11619228"
},
{
"name": "PHP",
"bytes": "10272973"
},
{
"name": "Python",
"bytes": "9037496"
},
{
"name": "Shell",
"bytes": "6887"
}
],
"symlink_target": ""
}
|
from twisted.trial import unittest
class AppTests(unittest.TestCase):
def test_something(self):
pass
|
{
"content_hash": "a04a2c2a6db1cd7362614a4fe6837fd8",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 34,
"avg_line_length": 19,
"alnum_prop": 0.7280701754385965,
"repo_name": "fiorix/cyclone",
"id": "fa5c841623261200f908d9c199bebda0b0737375",
"size": "696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyclone/tests/test_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2386"
},
{
"name": "HTML",
"bytes": "32384"
},
{
"name": "Makefile",
"bytes": "642"
},
{
"name": "Python",
"bytes": "518718"
},
{
"name": "Shell",
"bytes": "9517"
}
],
"symlink_target": ""
}
|
from flask import Blueprint, g, url_for
from service import get_flashes
class APIBlueprint(Blueprint):
def route(self, rule, versions, **options):
'''
works just like the original route decorator but registers for
multiple prefixes based on the version parameter.
Endpoints get a _# appended to the name. The unedited endpoint name
will reference the max version uri.
'''
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
for version in versions:
# assumes rules start with a /
vrule = '/v{version}{rule}'.format(
version=version,
rule=rule)
vendpoint = '{endpoint}_{version}'.format(
endpoint=endpoint,
version=version)
self.add_url_rule(vrule, vendpoint, f, **options)
# higest versioned url prefix is for vanilla endpoint name
mrule = '/v{version}{rule}'.format(
version=max(versions),
rule=rule)
self.add_url_rule(mrule, endpoint, f, **options)
return f
return decorator
# non-public blueprints
admin = Blueprint(
'admin', 'application', subdomain='nerv',
template_folder='template',
static_folder='static/admin',
static_url_path='/static',
)
manager = Blueprint(
'manager', 'application', subdomain='<application>.manager',
template_folder='template',
static_folder='static/manager',
static_url_path='/static',
)
cms = Blueprint(
'cms', 'application', subdomain='<application>.manager',
template_folder='template',
static_folder='static/manager',
static_url_path='/static',
url_prefix='/cms',
)
app = Blueprint(
'app', 'application', subdomain='<application>.app'
)
api = APIBlueprint('api', 'application', subdomain='api')
# context processors
@admin.context_processor
def admin_context():
return {'get_flashes': get_flashes,
'u': url_for,}
@manager.context_processor
@cms.context_processor
def manager_context():
return {'g': g,
'account': g.account,
'get_flashes': get_flashes,
'u': url_for,}
|
{
"content_hash": "c9e93807a48bea4aeac07787ad009ef4",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 75,
"avg_line_length": 28.11111111111111,
"alnum_prop": 0.5893719806763285,
"repo_name": "glennyonemitsu/MarkupHiveServer",
"id": "75c85c33b1059daa870c1adf9862a00849ec8f84",
"size": "2277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/application/blueprint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "119550"
},
{
"name": "JavaScript",
"bytes": "558423"
},
{
"name": "PHP",
"bytes": "2278"
},
{
"name": "Python",
"bytes": "311722"
},
{
"name": "Ruby",
"bytes": "2284"
}
],
"symlink_target": ""
}
|
import logging
from struct import pack, unpack
import androguard.decompiler.dad.util as util
from androguard.decompiler.dad.instruction import (ArrayLengthExpression,
ArrayLoadExpression, ArrayStoreInstruction,
AssignExpression, BaseClass, BinaryCompExpression,
BinaryExpression, BinaryExpression2Addr,
BinaryExpressionLit, CastExpression,
CheckCastExpression, ConditionalExpression,
ConditionalZExpression, Constant,
FillArrayExpression, FilledArrayExpression,
InstanceExpression, InstanceInstruction,
InvokeInstruction, InvokeDirectInstruction,
InvokeRangeInstruction, InvokeStaticInstruction,
MonitorEnterExpression, MonitorExitExpression,
MoveExceptionExpression, MoveExpression,
MoveResultExpression, NewArrayExpression,
NewInstance, NopExpression, ThrowExpression,
Variable, ReturnInstruction, StaticExpression,
StaticInstruction, SwitchExpression, ThisParam,
UnaryExpression)
logger = logging.getLogger('dad.opcode_ins')
class Op(object):
CMP = 'cmp'
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
MOD = '%'
AND = '&'
OR = '|'
XOR = '^'
EQUAL = '=='
NEQUAL = '!='
GREATER = '>'
LOWER = '<'
GEQUAL = '>='
LEQUAL = '<='
NEG = '-'
NOT = '~'
INTSHL = '<<' # '(%s << ( %s & 0x1f ))'
INTSHR = '>>' # '(%s >> ( %s & 0x1f ))'
LONGSHL = '<<' # '(%s << ( %s & 0x3f ))'
LONGSHR = '>>' # '(%s >> ( %s & 0x3f ))'
def get_variables(vmap, *variables):
res = []
for variable in variables:
res.append(vmap.setdefault(variable, Variable(variable)))
if len(res) == 1:
return res[0]
return res
def assign_const(dest_reg, cst, vmap):
return AssignExpression(get_variables(vmap, dest_reg), cst)
def assign_cmp(val_a, val_b, val_c, cmp_type, vmap):
reg_a, reg_b, reg_c = get_variables(vmap, val_a, val_b, val_c)
exp = BinaryCompExpression(Op.CMP, reg_b, reg_c, cmp_type)
return AssignExpression(reg_a, exp)
def load_array_exp(val_a, val_b, val_c, ar_type, vmap):
reg_a, reg_b, reg_c = get_variables(vmap, val_a, val_b, val_c)
return AssignExpression(reg_a, ArrayLoadExpression(reg_b, reg_c, ar_type))
def store_array_inst(val_a, val_b, val_c, ar_type, vmap):
reg_a, reg_b, reg_c = get_variables(vmap, val_a, val_b, val_c)
return ArrayStoreInstruction(reg_a, reg_b, reg_c, ar_type)
def assign_cast_exp(val_a, val_b, val_op, op_type, vmap):
reg_a, reg_b = get_variables(vmap, val_a, val_b)
return AssignExpression(reg_a, CastExpression(val_op, op_type, reg_b))
def assign_binary_exp(ins, val_op, op_type, vmap):
reg_a, reg_b, reg_c = get_variables(vmap, ins.AA, ins.BB, ins.CC)
return AssignExpression(reg_a, BinaryExpression(val_op, reg_b,
reg_c, op_type))
def assign_binary_2addr_exp(ins, val_op, op_type, vmap):
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return AssignExpression(reg_a, BinaryExpression2Addr(val_op, reg_a,
reg_b, op_type))
def assign_lit(op_type, val_cst, val_a, val_b, vmap):
cst = Constant(val_cst, 'I')
var_a, var_b = get_variables(vmap, val_a, val_b)
return AssignExpression(var_a, BinaryExpressionLit(op_type, var_b, cst))
# nop
def nop(ins, vmap):
return NopExpression()
# move vA, vB ( 4b, 4b )
def move(ins, vmap):
logger.debug('Move %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return MoveExpression(reg_a, reg_b)
# move/from16 vAA, vBBBB ( 8b, 16b )
def movefrom16(ins, vmap):
logger.debug('MoveFrom16 %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move/16 vAAAA, vBBBB ( 16b, 16b )
def move16(ins, vmap):
logger.debug('Move16 %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AAAA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-wide vA, vB ( 4b, 4b )
def movewide(ins, vmap):
logger.debug('MoveWide %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return MoveExpression(reg_a, reg_b)
# move-wide/from16 vAA, vBBBB ( 8b, 16b )
def movewidefrom16(ins, vmap):
logger.debug('MoveWideFrom16 : %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-wide/16 vAAAA, vBBBB ( 16b, 16b )
def movewide16(ins, vmap):
logger.debug('MoveWide16 %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AAAA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-object vA, vB ( 4b, 4b )
def moveobject(ins, vmap):
logger.debug('MoveObject %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return MoveExpression(reg_a, reg_b)
# move-object/from16 vAA, vBBBB ( 8b, 16b )
def moveobjectfrom16(ins, vmap):
logger.debug('MoveObjectFrom16 : %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-object/16 vAAAA, vBBBB ( 16b, 16b )
def moveobject16(ins, vmap):
logger.debug('MoveObject16 : %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.AAAA, ins.BBBB)
return MoveExpression(reg_a, reg_b)
# move-result vAA ( 8b )
def moveresult(ins, vmap, ret):
logger.debug('MoveResult : %s', ins.get_output())
return MoveResultExpression(get_variables(vmap, ins.AA), ret)
# move-result-wide vAA ( 8b )
def moveresultwide(ins, vmap, ret):
logger.debug('MoveResultWide : %s', ins.get_output())
return MoveResultExpression(get_variables(vmap, ins.AA), ret)
# move-result-object vAA ( 8b )
def moveresultobject(ins, vmap, ret):
logger.debug('MoveResultObject : %s', ins.get_output())
return MoveResultExpression(get_variables(vmap, ins.AA), ret)
# move-exception vAA ( 8b )
def moveexception(ins, vmap, _type):
logger.debug('MoveException : %s', ins.get_output())
return MoveExceptionExpression(get_variables(vmap, ins.AA), _type)
# return-void
def returnvoid(ins, vmap):
logger.debug('ReturnVoid')
return ReturnInstruction(None)
# return vAA ( 8b )
def return_reg(ins, vmap):
logger.debug('Return : %s', ins.get_output())
return ReturnInstruction(get_variables(vmap, ins.AA))
# return-wide vAA ( 8b )
def returnwide(ins, vmap):
logger.debug('ReturnWide : %s', ins.get_output())
return ReturnInstruction(get_variables(vmap, ins.AA))
# return-object vAA ( 8b )
def returnobject(ins, vmap):
logger.debug('ReturnObject : %s', ins.get_output())
return ReturnInstruction(get_variables(vmap, ins.AA))
# const/4 vA, #+B ( 4b, 4b )
def const4(ins, vmap):
logger.debug('Const4 : %s', ins.get_output())
cst = Constant(ins.B, 'I')
return assign_const(ins.A, cst, vmap)
# const/16 vAA, #+BBBB ( 8b, 16b )
def const16(ins, vmap):
logger.debug('Const16 : %s', ins.get_output())
cst = Constant(ins.BBBB, 'I')
return assign_const(ins.AA, cst, vmap)
# const vAA, #+BBBBBBBB ( 8b, 32b )
def const(ins, vmap):
logger.debug('Const : %s', ins.get_output())
value = unpack("=f", pack("=i", ins.BBBBBBBB))[0]
cst = Constant(value, 'I', ins.BBBBBBBB)
return assign_const(ins.AA, cst, vmap)
# const/high16 vAA, #+BBBB0000 ( 8b, 16b )
def consthigh16(ins, vmap):
logger.debug('ConstHigh16 : %s', ins.get_output())
value = unpack('=f', pack('=i', ins.BBBB<<16))[0]
cst = Constant(value, 'I', ins.BBBB<<16)
return assign_const(ins.AA, cst, vmap)
# const-wide/16 vAA, #+BBBB ( 8b, 16b )
def constwide16(ins, vmap):
logger.debug('ConstWide16 : %s', ins.get_output())
value = unpack('=d', pack('=d', ins.BBBB))[0]
cst = Constant(value, 'J', ins.BBBB)
return assign_const(ins.AA, cst, vmap)
# const-wide/32 vAA, #+BBBBBBBB ( 8b, 32b )
def constwide32(ins, vmap):
logger.debug('ConstWide32 : %s', ins.get_output())
value = unpack('=d', pack('=d', ins.BBBBBBBB))[0]
cst = Constant(value, 'J', ins.BBBBBBBB)
return assign_const(ins.AA, cst, vmap)
# const-wide vAA, #+BBBBBBBBBBBBBBBB ( 8b, 64b )
def constwide(ins, vmap):
logger.debug('ConstWide : %s', ins.get_output())
value = unpack('=d', pack('=q', ins.BBBBBBBBBBBBBBBB))[0]
cst = Constant(value, 'D', ins.BBBBBBBBBBBBBBBB)
return assign_const(ins.AA, cst, vmap)
# const-wide/high16 vAA, #+BBBB000000000000 ( 8b, 16b )
def constwidehigh16(ins, vmap):
logger.debug('ConstWideHigh16 : %s', ins.get_output())
value = unpack('=d',
'\x00\x00\x00\x00\x00\x00' + pack('=h', ins.BBBB))[0]
cst = Constant(value, 'D', ins.BBBB)
return assign_const(ins.AA, cst, vmap)
# const-string vAA ( 8b )
def conststring(ins, vmap):
logger.debug('ConstString : %s', ins.get_output())
cst = Constant(ins.get_raw_string(), 'Ljava/lang/String;')
return assign_const(ins.AA, cst, vmap)
# const-string/jumbo vAA ( 8b )
def conststringjumbo(ins, vmap):
logger.debug('ConstStringJumbo %s', ins.get_output())
cst = Constant(ins.get_raw_string(), 'Ljava/lang/String;')
return assign_const(ins.AA, cst, vmap)
# const-class vAA, type@BBBB ( 8b )
def constclass(ins, vmap):
logger.debug('ConstClass : %s', ins.get_output())
cst = Constant(util.get_type(ins.get_string()), 'Ljava/lang/Class;',
descriptor=ins.get_string())
return assign_const(ins.AA, cst, vmap)
# monitor-enter vAA ( 8b )
def monitorenter(ins, vmap):
logger.debug('MonitorEnter : %s', ins.get_output())
return MonitorEnterExpression(get_variables(vmap, ins.AA))
# monitor-exit vAA ( 8b )
def monitorexit(ins, vmap):
logger.debug('MonitorExit : %s', ins.get_output())
a = get_variables(vmap, ins.AA)
return MonitorExitExpression(a)
# check-cast vAA ( 8b )
def checkcast(ins, vmap):
logger.debug('CheckCast: %s', ins.get_output())
cast_type = util.get_type(ins.get_translated_kind())
cast_var = get_variables(vmap, ins.AA)
cast_expr = CheckCastExpression(cast_var, cast_type,
descriptor=ins.get_translated_kind())
return AssignExpression(cast_var, cast_expr)
# instance-of vA, vB ( 4b, 4b )
def instanceof(ins, vmap):
logger.debug('InstanceOf : %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
reg_c = BaseClass(util.get_type(ins.get_translated_kind()),
descriptor=ins.get_translated_kind())
exp = BinaryExpression('instanceof', reg_b, reg_c, 'Z')
return AssignExpression(reg_a, exp)
# array-length vA, vB ( 4b, 4b )
def arraylength(ins, vmap):
logger.debug('ArrayLength: %s', ins.get_output())
reg_a, reg_b = get_variables(vmap, ins.A, ins.B)
return AssignExpression(reg_a, ArrayLengthExpression(reg_b))
# new-instance vAA ( 8b )
def newinstance(ins, vmap):
logger.debug('NewInstance : %s', ins.get_output())
reg_a = get_variables(vmap, ins.AA)
ins_type = ins.cm.get_type(ins.BBBB)
return AssignExpression(reg_a, NewInstance(ins_type))
# new-array vA, vB ( 8b, size )
def newarray(ins, vmap):
logger.debug('NewArray : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = NewArrayExpression(b, ins.cm.get_type(ins.CCCC))
return AssignExpression(a, exp)
# filled-new-array {vD, vE, vF, vG, vA} ( 4b each )
def fillednewarray(ins, vmap, ret):
logger.debug('FilledNewArray : %s', ins.get_output())
c, d, e, f, g = get_variables(vmap, ins.C, ins.D,
ins.E, ins.F, ins.G)
array_type = ins.cm.get_type(ins.BBBB)
exp = FilledArrayExpression(ins.A, array_type, [c, d, e, f, g][:ins.A])
return AssignExpression(ret, exp)
# filled-new-array/range {vCCCC..vNNNN} ( 16b )
def fillednewarrayrange(ins, vmap, ret):
logger.debug('FilledNewArrayRange : %s', ins.get_output())
a, c, n = get_variables(vmap, ins.AA, ins.CCCC, ins.NNNN)
array_type = ins.cm.get_type(ins.BBBB)
exp = FilledArrayExpression(a, array_type, [c, n])
return AssignExpression(ret, exp)
# fill-array-data vAA, +BBBBBBBB ( 8b, 32b )
def fillarraydata(ins, vmap, value):
logger.debug('FillArrayData : %s', ins.get_output())
return FillArrayExpression(get_variables(vmap, ins.AA), value)
# fill-array-data-payload vAA, +BBBBBBBB ( 8b, 32b )
def fillarraydatapayload(ins, vmap):
logger.debug('FillArrayDataPayload : %s', ins.get_output())
return FillArrayExpression(None)
# throw vAA ( 8b )
def throw(ins, vmap):
logger.debug('Throw : %s', ins.get_output())
return ThrowExpression(get_variables(vmap, ins.AA))
# goto +AA ( 8b )
def goto(ins, vmap):
return NopExpression()
# goto/16 +AAAA ( 16b )
def goto16(ins, vmap):
return NopExpression()
# goto/32 +AAAAAAAA ( 32b )
def goto32(ins, vmap):
return NopExpression()
# packed-switch vAA, +BBBBBBBB ( reg to test, 32b )
def packedswitch(ins, vmap):
logger.debug('PackedSwitch : %s', ins.get_output())
reg_a = get_variables(vmap, ins.AA)
return SwitchExpression(reg_a, ins.BBBBBBBB)
# sparse-switch vAA, +BBBBBBBB ( reg to test, 32b )
def sparseswitch(ins, vmap):
logger.debug('SparseSwitch : %s', ins.get_output())
reg_a = get_variables(vmap, ins.AA)
return SwitchExpression(reg_a, ins.BBBBBBBB)
# cmpl-float vAA, vBB, vCC ( 8b, 8b, 8b )
def cmplfloat(ins, vmap):
logger.debug('CmpglFloat : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'F', vmap)
# cmpg-float vAA, vBB, vCC ( 8b, 8b, 8b )
def cmpgfloat(ins, vmap):
logger.debug('CmpgFloat : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'F', vmap)
# cmpl-double vAA, vBB, vCC ( 8b, 8b, 8b )
def cmpldouble(ins, vmap):
logger.debug('CmplDouble : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'D', vmap)
# cmpg-double vAA, vBB, vCC ( 8b, 8b, 8b )
def cmpgdouble(ins, vmap):
logger.debug('CmpgDouble : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'D', vmap)
# cmp-long vAA, vBB, vCC ( 8b, 8b, 8b )
def cmplong(ins, vmap):
logger.debug('CmpLong : %s', ins.get_output())
return assign_cmp(ins.AA, ins.BB, ins.CC, 'J', vmap)
# if-eq vA, vB, +CCCC ( 4b, 4b, 16b )
def ifeq(ins, vmap):
logger.debug('IfEq : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.EQUAL, a, b)
# if-ne vA, vB, +CCCC ( 4b, 4b, 16b )
def ifne(ins, vmap):
logger.debug('IfNe : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.NEQUAL, a, b)
# if-lt vA, vB, +CCCC ( 4b, 4b, 16b )
def iflt(ins, vmap):
logger.debug('IfLt : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.LOWER, a, b)
# if-ge vA, vB, +CCCC ( 4b, 4b, 16b )
def ifge(ins, vmap):
logger.debug('IfGe : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.GEQUAL, a, b)
# if-gt vA, vB, +CCCC ( 4b, 4b, 16b )
def ifgt(ins, vmap):
logger.debug('IfGt : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.GREATER, a, b)
# if-le vA, vB, +CCCC ( 4b, 4b, 16b )
def ifle(ins, vmap):
logger.debug('IfLe : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
return ConditionalExpression(Op.LEQUAL, a, b)
# if-eqz vAA, +BBBB ( 8b, 16b )
def ifeqz(ins, vmap):
logger.debug('IfEqz : %s', ins.get_output())
return ConditionalZExpression(Op.EQUAL, get_variables(vmap, ins.AA))
# if-nez vAA, +BBBB ( 8b, 16b )
def ifnez(ins, vmap):
logger.debug('IfNez : %s', ins.get_output())
return ConditionalZExpression(Op.NEQUAL, get_variables(vmap, ins.AA))
# if-ltz vAA, +BBBB ( 8b, 16b )
def ifltz(ins, vmap):
logger.debug('IfLtz : %s', ins.get_output())
return ConditionalZExpression(Op.LOWER, get_variables(vmap, ins.AA))
# if-gez vAA, +BBBB ( 8b, 16b )
def ifgez(ins, vmap):
logger.debug('IfGez : %s', ins.get_output())
return ConditionalZExpression(Op.GEQUAL, get_variables(vmap, ins.AA))
# if-gtz vAA, +BBBB ( 8b, 16b )
def ifgtz(ins, vmap):
logger.debug('IfGtz : %s', ins.get_output())
return ConditionalZExpression(Op.GREATER, get_variables(vmap, ins.AA))
# if-lez vAA, +BBBB (8b, 16b )
def iflez(ins, vmap):
logger.debug('IfLez : %s', ins.get_output())
return ConditionalZExpression(Op.LEQUAL, get_variables(vmap, ins.AA))
#TODO: check type for all aget
# aget vAA, vBB, vCC ( 8b, 8b, 8b )
def aget(ins, vmap):
logger.debug('AGet : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, None, vmap)
# aget-wide vAA, vBB, vCC ( 8b, 8b, 8b )
def agetwide(ins, vmap):
logger.debug('AGetWide : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'W', vmap)
# aget-object vAA, vBB, vCC ( 8b, 8b, 8b )
def agetobject(ins, vmap):
logger.debug('AGetObject : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'O', vmap)
# aget-boolean vAA, vBB, vCC ( 8b, 8b, 8b )
def agetboolean(ins, vmap):
logger.debug('AGetBoolean : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'Z', vmap)
# aget-byte vAA, vBB, vCC ( 8b, 8b, 8b )
def agetbyte(ins, vmap):
logger.debug('AGetByte : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'B', vmap)
# aget-char vAA, vBB, vCC ( 8b, 8b, 8b )
def agetchar(ins, vmap):
logger.debug('AGetChar : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'C', vmap)
# aget-short vAA, vBB, vCC ( 8b, 8b, 8b )
def agetshort(ins, vmap):
logger.debug('AGetShort : %s', ins.get_output())
return load_array_exp(ins.AA, ins.BB, ins.CC, 'S', vmap)
# aput vAA, vBB, vCC
def aput(ins, vmap):
logger.debug('APut : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, None, vmap)
# aput-wide vAA, vBB, vCC ( 8b, 8b, 8b )
def aputwide(ins, vmap):
logger.debug('APutWide : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'W', vmap)
# aput-object vAA, vBB, vCC ( 8b, 8b, 8b )
def aputobject(ins, vmap):
logger.debug('APutObject : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'O', vmap)
# aput-boolean vAA, vBB, vCC ( 8b, 8b, 8b )
def aputboolean(ins, vmap):
logger.debug('APutBoolean : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'Z', vmap)
# aput-byte vAA, vBB, vCC ( 8b, 8b, 8b )
def aputbyte(ins, vmap):
logger.debug('APutByte : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'B', vmap)
# aput-char vAA, vBB, vCC ( 8b, 8b, 8b )
def aputchar(ins, vmap):
logger.debug('APutChar : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'C', vmap)
# aput-short vAA, vBB, vCC ( 8b, 8b, 8b )
def aputshort(ins, vmap):
logger.debug('APutShort : %s', ins.get_output())
return store_array_inst(ins.AA, ins.BB, ins.CC, 'S', vmap)
# iget vA, vB ( 4b, 4b )
def iget(ins, vmap):
logger.debug('IGet : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-wide vA, vB ( 4b, 4b )
def igetwide(ins, vmap):
logger.debug('IGetWide : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-object vA, vB ( 4b, 4b )
def igetobject(ins, vmap):
logger.debug('IGetObject : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-boolean vA, vB ( 4b, 4b )
def igetboolean(ins, vmap):
logger.debug('IGetBoolean : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-byte vA, vB ( 4b, 4b )
def igetbyte(ins, vmap):
logger.debug('IGetByte : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-char vA, vB ( 4b, 4b )
def igetchar(ins, vmap):
logger.debug('IGetChar : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iget-short vA, vB ( 4b, 4b )
def igetshort(ins, vmap):
logger.debug('IGetShort : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
exp = InstanceExpression(b, klass, ftype, name)
return AssignExpression(a, exp)
# iput vA, vB ( 4b, 4b )
def iput(ins, vmap):
logger.debug('IPut %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-wide vA, vB ( 4b, 4b )
def iputwide(ins, vmap):
logger.debug('IPutWide %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-object vA, vB ( 4b, 4b )
def iputobject(ins, vmap):
logger.debug('IPutObject %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-boolean vA, vB ( 4b, 4b )
def iputboolean(ins, vmap):
logger.debug('IPutBoolean %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-byte vA, vB ( 4b, 4b )
def iputbyte(ins, vmap):
logger.debug('IPutByte %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-char vA, vB ( 4b, 4b )
def iputchar(ins, vmap):
logger.debug('IPutChar %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# iput-short vA, vB ( 4b, 4b )
def iputshort(ins, vmap):
logger.debug('IPutShort %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.CCCC)
a, b = get_variables(vmap, ins.A, ins.B)
return InstanceInstruction(a, b, klass, atype, name)
# sget vAA ( 8b )
def sget(ins, vmap):
logger.debug('SGet : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-wide vAA ( 8b )
def sgetwide(ins, vmap):
logger.debug('SGetWide : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-object vAA ( 8b )
def sgetobject(ins, vmap):
logger.debug('SGetObject : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-boolean vAA ( 8b )
def sgetboolean(ins, vmap):
logger.debug('SGetBoolean : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-byte vAA ( 8b )
def sgetbyte(ins, vmap):
logger.debug('SGetByte : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-char vAA ( 8b )
def sgetchar(ins, vmap):
logger.debug('SGetChar : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sget-short vAA ( 8b )
def sgetshort(ins, vmap):
logger.debug('SGetShort : %s', ins.get_output())
klass, atype, name = ins.cm.get_field(ins.BBBB)
exp = StaticExpression(klass, atype, name)
a = get_variables(vmap, ins.AA)
return AssignExpression(a, exp)
# sput vAA ( 8b )
def sput(ins, vmap):
logger.debug('SPut : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-wide vAA ( 8b )
def sputwide(ins, vmap):
logger.debug('SPutWide : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-object vAA ( 8b )
def sputobject(ins, vmap):
logger.debug('SPutObject : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-boolean vAA ( 8b )
def sputboolean(ins, vmap):
logger.debug('SPutBoolean : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-wide vAA ( 8b )
def sputbyte(ins, vmap):
logger.debug('SPutByte : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-char vAA ( 8b )
def sputchar(ins, vmap):
logger.debug('SPutChar : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
# sput-short vAA ( 8b )
def sputshort(ins, vmap):
logger.debug('SPutShort : %s', ins.get_output())
klass, ftype, name = ins.cm.get_field(ins.BBBB)
a = get_variables(vmap, ins.AA)
return StaticInstruction(a, klass, ftype, name)
def get_args(vmap, param_type, largs):
num_param = 0
args = []
if len(param_type) > len(largs):
logger.warning('len(param_type) > len(largs) !')
return args
for type_ in param_type:
param = largs[num_param]
args.append(param)
num_param += util.get_type_size(type_)
if len(param_type) == 1:
return [get_variables(vmap, *args)]
return get_variables(vmap, *args)
# invoke-virtual {vD, vE, vF, vG, vA} ( 4b each )
def invokevirtual(ins, vmap, ret):
logger.debug('InvokeVirtual : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
c = get_variables(vmap, ins.C)
returned = None if ret_type == 'V' else ret.new()
exp = InvokeInstruction(cls_name, name, c, ret_type,
param_type, args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-super {vD, vE, vF, vG, vA} ( 4b each )
def invokesuper(ins, vmap, ret):
logger.debug('InvokeSuper : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
superclass = BaseClass('super')
returned = None if ret_type == 'V' else ret.new()
exp = InvokeInstruction(cls_name, name, superclass, ret_type,
param_type, args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-direct {vD, vE, vF, vG, vA} ( 4b each )
def invokedirect(ins, vmap, ret):
logger.debug('InvokeDirect : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
base = get_variables(vmap, ins.C)
if ret_type == 'V':
if isinstance(base, ThisParam):
returned = None
else:
returned = base
ret.set_to(base)
else:
returned = ret.new()
exp = InvokeDirectInstruction(cls_name, name, base, ret_type,
param_type, args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-static {vD, vE, vF, vG, vA} ( 4b each )
def invokestatic(ins, vmap, ret):
logger.debug('InvokeStatic : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.C, ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
base = BaseClass(cls_name, descriptor=method.get_class_name())
returned = None if ret_type == 'V' else ret.new()
exp = InvokeStaticInstruction(cls_name, name, base, ret_type,
param_type, args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-interface {vD, vE, vF, vG, vA} ( 4b each )
def invokeinterface(ins, vmap, ret):
logger.debug('InvokeInterface : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = [ins.D, ins.E, ins.F, ins.G]
args = get_args(vmap, param_type, largs)
c = get_variables(vmap, ins.C)
returned = None if ret_type == 'V' else ret.new()
exp = InvokeInstruction(cls_name, name, c, ret_type,
param_type, args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-virtual/range {vCCCC..vNNNN} ( 16b each )
def invokevirtualrange(ins, vmap, ret):
logger.debug('InvokeVirtualRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = list(range(ins.CCCC, ins.NNNN + 1))
this_arg = get_variables(vmap, largs[0])
args = get_args(vmap, param_type, largs[1:])
returned = None if ret_type == 'V' else ret.new()
exp = InvokeRangeInstruction(cls_name, name, ret_type,
param_type, [this_arg] + args,
method.get_triple())
return AssignExpression(returned, exp)
# invoke-super/range {vCCCC..vNNNN} ( 16b each )
def invokesuperrange(ins, vmap, ret):
logger.debug('InvokeSuperRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = list(range(ins.CCCC, ins.NNNN + 1))
args = get_args(vmap, param_type, largs[1:])
base = get_variables(vmap, ins.CCCC)
if ret_type != 'V':
returned = ret.new()
else:
returned = base
ret.set_to(base)
superclass = BaseClass('super')
exp = InvokeRangeInstruction(cls_name, name, ret_type,
param_type, [superclass] + args,
method.get_triple())
return AssignExpression(returned, exp)
# invoke-direct/range {vCCCC..vNNNN} ( 16b each )
def invokedirectrange(ins, vmap, ret):
logger.debug('InvokeDirectRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = list(range(ins.CCCC, ins.NNNN + 1))
this_arg = get_variables(vmap, largs[0])
args = get_args(vmap, param_type, largs[1:])
base = get_variables(vmap, ins.CCCC)
if ret_type != 'V':
returned = ret.new()
else:
returned = base
ret.set_to(base)
exp = InvokeRangeInstruction(cls_name, name, ret_type,
param_type, [this_arg] + args,
method.get_triple())
return AssignExpression(returned, exp)
# invoke-static/range {vCCCC..vNNNN} ( 16b each )
def invokestaticrange(ins, vmap, ret):
logger.debug('InvokeStaticRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = list(range(ins.CCCC, ins.NNNN + 1))
args = get_args(vmap, param_type, largs)
base = BaseClass(cls_name, descriptor=method.get_class_name())
returned = None if ret_type == 'V' else ret.new()
exp = InvokeStaticInstruction(cls_name, name, base, ret_type,
param_type, args, method.get_triple())
return AssignExpression(returned, exp)
# invoke-interface/range {vCCCC..vNNNN} ( 16b each )
def invokeinterfacerange(ins, vmap, ret):
logger.debug('InvokeInterfaceRange : %s', ins.get_output())
method = ins.cm.get_method_ref(ins.BBBB)
cls_name = util.get_type(method.get_class_name())
name = method.get_name()
param_type, ret_type = method.get_proto()
param_type = util.get_params_type(param_type)
largs = list(range(ins.CCCC, ins.NNNN + 1))
base_arg = get_variables(vmap, largs[0])
args = get_args(vmap, param_type, largs[1:])
returned = None if ret_type == 'V' else ret.new()
exp = InvokeRangeInstruction(cls_name, name, ret_type,
param_type, [base_arg] + args,
method.get_triple())
return AssignExpression(returned, exp)
# neg-int vA, vB ( 4b, 4b )
def negint(ins, vmap):
logger.debug('NegInt : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NEG, b, 'I')
return AssignExpression(a, exp)
# not-int vA, vB ( 4b, 4b )
def notint(ins, vmap):
logger.debug('NotInt : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NOT, b, 'I')
return AssignExpression(a, exp)
# neg-long vA, vB ( 4b, 4b )
def neglong(ins, vmap):
logger.debug('NegLong : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NEG, b, 'J')
return AssignExpression(a, exp)
# not-long vA, vB ( 4b, 4b )
def notlong(ins, vmap):
logger.debug('NotLong : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NOT, b, 'J')
return AssignExpression(a, exp)
# neg-float vA, vB ( 4b, 4b )
def negfloat(ins, vmap):
logger.debug('NegFloat : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NEG, b, 'F')
return AssignExpression(a, exp)
# neg-double vA, vB ( 4b, 4b )
def negdouble(ins, vmap):
logger.debug('NegDouble : %s', ins.get_output())
a, b = get_variables(vmap, ins.A, ins.B)
exp = UnaryExpression(Op.NEG, b, 'D')
return AssignExpression(a, exp)
# int-to-long vA, vB ( 4b, 4b )
def inttolong(ins, vmap):
logger.debug('IntToLong : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(long)', 'J', vmap)
# int-to-float vA, vB ( 4b, 4b )
def inttofloat(ins, vmap):
logger.debug('IntToFloat : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(float)', 'F', vmap)
# int-to-double vA, vB ( 4b, 4b )
def inttodouble(ins, vmap):
logger.debug('IntToDouble : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(double)', 'D', vmap)
# long-to-int vA, vB ( 4b, 4b )
def longtoint(ins, vmap):
logger.debug('LongToInt : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(int)', 'I', vmap)
# long-to-float vA, vB ( 4b, 4b )
def longtofloat(ins, vmap):
logger.debug('LongToFloat : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(float)', 'F', vmap)
# long-to-double vA, vB ( 4b, 4b )
def longtodouble(ins, vmap):
logger.debug('LongToDouble : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(double)', 'D', vmap)
# float-to-int vA, vB ( 4b, 4b )
def floattoint(ins, vmap):
logger.debug('FloatToInt : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(int)', 'I', vmap)
# float-to-long vA, vB ( 4b, 4b )
def floattolong(ins, vmap):
logger.debug('FloatToLong : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(long)', 'J', vmap)
# float-to-double vA, vB ( 4b, 4b )
def floattodouble(ins, vmap):
logger.debug('FloatToDouble : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(double)', 'D', vmap)
# double-to-int vA, vB ( 4b, 4b )
def doubletoint(ins, vmap):
logger.debug('DoubleToInt : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(int)', 'I', vmap)
# double-to-long vA, vB ( 4b, 4b )
def doubletolong(ins, vmap):
logger.debug('DoubleToLong : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(long)', 'J', vmap)
# double-to-float vA, vB ( 4b, 4b )
def doubletofloat(ins, vmap):
logger.debug('DoubleToFloat : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(float)', 'F', vmap)
# int-to-byte vA, vB ( 4b, 4b )
def inttobyte(ins, vmap):
logger.debug('IntToByte : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(byte)', 'B', vmap)
# int-to-char vA, vB ( 4b, 4b )
def inttochar(ins, vmap):
logger.debug('IntToChar : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(char)', 'C', vmap)
# int-to-short vA, vB ( 4b, 4b )
def inttoshort(ins, vmap):
logger.debug('IntToShort : %s', ins.get_output())
return assign_cast_exp(ins.A, ins.B, '(short)', 'S', vmap)
# add-int vAA, vBB, vCC ( 8b, 8b, 8b )
def addint(ins, vmap):
logger.debug('AddInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.ADD, 'I', vmap)
# sub-int vAA, vBB, vCC ( 8b, 8b, 8b )
def subint(ins, vmap):
logger.debug('SubInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.SUB, 'I', vmap)
# mul-int vAA, vBB, vCC ( 8b, 8b, 8b )
def mulint(ins, vmap):
logger.debug('MulInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.MUL, 'I', vmap)
# div-int vAA, vBB, vCC ( 8b, 8b, 8b )
def divint(ins, vmap):
logger.debug('DivInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.DIV, 'I', vmap)
# rem-int vAA, vBB, vCC ( 8b, 8b, 8b )
def remint(ins, vmap):
logger.debug('RemInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.MOD, 'I', vmap)
# and-int vAA, vBB, vCC ( 8b, 8b, 8b )
def andint(ins, vmap):
logger.debug('AndInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.AND, 'I', vmap)
# or-int vAA, vBB, vCC ( 8b, 8b, 8b )
def orint(ins, vmap):
logger.debug('OrInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.OR, 'I', vmap)
# xor-int vAA, vBB, vCC ( 8b, 8b, 8b )
def xorint(ins, vmap):
logger.debug('XorInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.XOR, 'I', vmap)
# shl-int vAA, vBB, vCC ( 8b, 8b, 8b )
def shlint(ins, vmap):
logger.debug('ShlInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.INTSHL, 'I', vmap)
# shr-int vAA, vBB, vCC ( 8b, 8b, 8b )
def shrint(ins, vmap):
logger.debug('ShrInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.INTSHR, 'I', vmap)
# ushr-int vAA, vBB, vCC ( 8b, 8b, 8b )
def ushrint(ins, vmap):
logger.debug('UShrInt : %s', ins.get_output())
return assign_binary_exp(ins, Op.INTSHR, 'I', vmap)
# add-long vAA, vBB, vCC ( 8b, 8b, 8b )
def addlong(ins, vmap):
logger.debug('AddLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.ADD, 'J', vmap)
# sub-long vAA, vBB, vCC ( 8b, 8b, 8b )
def sublong(ins, vmap):
logger.debug('SubLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.SUB, 'J', vmap)
# mul-long vAA, vBB, vCC ( 8b, 8b, 8b )
def mullong(ins, vmap):
logger.debug('MulLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.MUL, 'J', vmap)
# div-long vAA, vBB, vCC ( 8b, 8b, 8b )
def divlong(ins, vmap):
logger.debug('DivLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.DIV, 'J', vmap)
# rem-long vAA, vBB, vCC ( 8b, 8b, 8b )
def remlong(ins, vmap):
logger.debug('RemLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.MOD, 'J', vmap)
# and-long vAA, vBB, vCC ( 8b, 8b, 8b )
def andlong(ins, vmap):
logger.debug('AndLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.AND, 'J', vmap)
# or-long vAA, vBB, vCC ( 8b, 8b, 8b )
def orlong(ins, vmap):
logger.debug('OrLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.OR, 'J', vmap)
# xor-long vAA, vBB, vCC ( 8b, 8b, 8b )
def xorlong(ins, vmap):
logger.debug('XorLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.XOR, 'J', vmap)
# shl-long vAA, vBB, vCC ( 8b, 8b, 8b )
def shllong(ins, vmap):
logger.debug('ShlLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.LONGSHL, 'J', vmap)
# shr-long vAA, vBB, vCC ( 8b, 8b, 8b )
def shrlong(ins, vmap):
logger.debug('ShrLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.LONGSHR, 'J', vmap)
# ushr-long vAA, vBB, vCC ( 8b, 8b, 8b )
def ushrlong(ins, vmap):
logger.debug('UShrLong : %s', ins.get_output())
return assign_binary_exp(ins, Op.LONGSHR, 'J', vmap)
# add-float vAA, vBB, vCC ( 8b, 8b, 8b )
def addfloat(ins, vmap):
logger.debug('AddFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.ADD, 'F', vmap)
# sub-float vAA, vBB, vCC ( 8b, 8b, 8b )
def subfloat(ins, vmap):
logger.debug('SubFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.SUB, 'F', vmap)
# mul-float vAA, vBB, vCC ( 8b, 8b, 8b )
def mulfloat(ins, vmap):
logger.debug('MulFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.MUL, 'F', vmap)
# div-float vAA, vBB, vCC ( 8b, 8b, 8b )
def divfloat(ins, vmap):
logger.debug('DivFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.DIV, 'F', vmap)
# rem-float vAA, vBB, vCC ( 8b, 8b, 8b )
def remfloat(ins, vmap):
logger.debug('RemFloat : %s', ins.get_output())
return assign_binary_exp(ins, Op.MOD, 'F', vmap)
# add-double vAA, vBB, vCC ( 8b, 8b, 8b )
def adddouble(ins, vmap):
logger.debug('AddDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.ADD, 'D', vmap)
# sub-double vAA, vBB, vCC ( 8b, 8b, 8b )
def subdouble(ins, vmap):
logger.debug('SubDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.SUB, 'D', vmap)
# mul-double vAA, vBB, vCC ( 8b, 8b, 8b )
def muldouble(ins, vmap):
logger.debug('MulDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.MUL, 'D', vmap)
# div-double vAA, vBB, vCC ( 8b, 8b, 8b )
def divdouble(ins, vmap):
logger.debug('DivDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.DIV, 'D', vmap)
# rem-double vAA, vBB, vCC ( 8b, 8b, 8b )
def remdouble(ins, vmap):
logger.debug('RemDouble : %s', ins.get_output())
return assign_binary_exp(ins, Op.MOD, 'D', vmap)
# add-int/2addr vA, vB ( 4b, 4b )
def addint2addr(ins, vmap):
logger.debug('AddInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.ADD, 'I', vmap)
# sub-int/2addr vA, vB ( 4b, 4b )
def subint2addr(ins, vmap):
logger.debug('SubInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.SUB, 'I', vmap)
# mul-int/2addr vA, vB ( 4b, 4b )
def mulint2addr(ins, vmap):
logger.debug('MulInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MUL, 'I', vmap)
# div-int/2addr vA, vB ( 4b, 4b )
def divint2addr(ins, vmap):
logger.debug('DivInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.DIV, 'I', vmap)
# rem-int/2addr vA, vB ( 4b, 4b )
def remint2addr(ins, vmap):
logger.debug('RemInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MOD, 'I', vmap)
# and-int/2addr vA, vB ( 4b, 4b )
def andint2addr(ins, vmap):
logger.debug('AndInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.AND, 'I', vmap)
# or-int/2addr vA, vB ( 4b, 4b )
def orint2addr(ins, vmap):
logger.debug('OrInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.OR, 'I', vmap)
# xor-int/2addr vA, vB ( 4b, 4b )
def xorint2addr(ins, vmap):
logger.debug('XorInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.XOR, 'I', vmap)
# shl-int/2addr vA, vB ( 4b, 4b )
def shlint2addr(ins, vmap):
logger.debug('ShlInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.INTSHL, 'I', vmap)
# shr-int/2addr vA, vB ( 4b, 4b )
def shrint2addr(ins, vmap):
logger.debug('ShrInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.INTSHR, 'I', vmap)
# ushr-int/2addr vA, vB ( 4b, 4b )
def ushrint2addr(ins, vmap):
logger.debug('UShrInt2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.INTSHR, 'I', vmap)
# add-long/2addr vA, vB ( 4b, 4b )
def addlong2addr(ins, vmap):
logger.debug('AddLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.ADD, 'J', vmap)
# sub-long/2addr vA, vB ( 4b, 4b )
def sublong2addr(ins, vmap):
logger.debug('SubLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.SUB, 'J', vmap)
# mul-long/2addr vA, vB ( 4b, 4b )
def mullong2addr(ins, vmap):
logger.debug('MulLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MUL, 'J', vmap)
# div-long/2addr vA, vB ( 4b, 4b )
def divlong2addr(ins, vmap):
logger.debug('DivLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.DIV, 'J', vmap)
# rem-long/2addr vA, vB ( 4b, 4b )
def remlong2addr(ins, vmap):
logger.debug('RemLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MOD, 'J', vmap)
# and-long/2addr vA, vB ( 4b, 4b )
def andlong2addr(ins, vmap):
logger.debug('AndLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.AND, 'J', vmap)
# or-long/2addr vA, vB ( 4b, 4b )
def orlong2addr(ins, vmap):
logger.debug('OrLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.OR, 'J', vmap)
# xor-long/2addr vA, vB ( 4b, 4b )
def xorlong2addr(ins, vmap):
logger.debug('XorLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.XOR, 'J', vmap)
# shl-long/2addr vA, vB ( 4b, 4b )
def shllong2addr(ins, vmap):
logger.debug('ShlLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.LONGSHL, 'J', vmap)
# shr-long/2addr vA, vB ( 4b, 4b )
def shrlong2addr(ins, vmap):
logger.debug('ShrLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.LONGSHR, 'J', vmap)
# ushr-long/2addr vA, vB ( 4b, 4b )
def ushrlong2addr(ins, vmap):
logger.debug('UShrLong2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.LONGSHR, 'J', vmap)
# add-float/2addr vA, vB ( 4b, 4b )
def addfloat2addr(ins, vmap):
logger.debug('AddFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.ADD, 'F', vmap)
# sub-float/2addr vA, vB ( 4b, 4b )
def subfloat2addr(ins, vmap):
logger.debug('SubFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.SUB, 'F', vmap)
# mul-float/2addr vA, vB ( 4b, 4b )
def mulfloat2addr(ins, vmap):
logger.debug('MulFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MUL, 'F', vmap)
# div-float/2addr vA, vB ( 4b, 4b )
def divfloat2addr(ins, vmap):
logger.debug('DivFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.DIV, 'F', vmap)
# rem-float/2addr vA, vB ( 4b, 4b )
def remfloat2addr(ins, vmap):
logger.debug('RemFloat2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MOD, 'F', vmap)
# add-double/2addr vA, vB ( 4b, 4b )
def adddouble2addr(ins, vmap):
logger.debug('AddDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.ADD, 'D', vmap)
# sub-double/2addr vA, vB ( 4b, 4b )
def subdouble2addr(ins, vmap):
logger.debug('subDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.SUB, 'D', vmap)
# mul-double/2addr vA, vB ( 4b, 4b )
def muldouble2addr(ins, vmap):
logger.debug('MulDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MUL, 'D', vmap)
# div-double/2addr vA, vB ( 4b, 4b )
def divdouble2addr(ins, vmap):
logger.debug('DivDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.DIV, 'D', vmap)
# rem-double/2addr vA, vB ( 4b, 4b )
def remdouble2addr(ins, vmap):
logger.debug('RemDouble2Addr : %s', ins.get_output())
return assign_binary_2addr_exp(ins, Op.MOD, 'D', vmap)
# add-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def addintlit16(ins, vmap):
logger.debug('AddIntLit16 : %s', ins.get_output())
return assign_lit(Op.ADD, ins.CCCC, ins.A, ins.B, vmap)
# rsub-int vA, vB, #+CCCC ( 4b, 4b, 16b )
def rsubint(ins, vmap):
logger.debug('RSubInt : %s', ins.get_output())
var_a, var_b = get_variables(vmap, ins.A, ins.B)
cst = Constant(ins.CCCC, 'I')
return AssignExpression(var_a, BinaryExpressionLit(Op.SUB, cst, var_b))
# mul-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def mulintlit16(ins, vmap):
logger.debug('MulIntLit16 : %s', ins.get_output())
return assign_lit(Op.MUL, ins.CCCC, ins.A, ins.B, vmap)
# div-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def divintlit16(ins, vmap):
logger.debug('DivIntLit16 : %s', ins.get_output())
return assign_lit(Op.DIV, ins.CCCC, ins.A, ins.B, vmap)
# rem-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def remintlit16(ins, vmap):
logger.debug('RemIntLit16 : %s', ins.get_output())
return assign_lit(Op.MOD, ins.CCCC, ins.A, ins.B, vmap)
# and-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def andintlit16(ins, vmap):
logger.debug('AndIntLit16 : %s', ins.get_output())
return assign_lit(Op.AND, ins.CCCC, ins.A, ins.B, vmap)
# or-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def orintlit16(ins, vmap):
logger.debug('OrIntLit16 : %s', ins.get_output())
return assign_lit(Op.OR, ins.CCCC, ins.A, ins.B, vmap)
# xor-int/lit16 vA, vB, #+CCCC ( 4b, 4b, 16b )
def xorintlit16(ins, vmap):
logger.debug('XorIntLit16 : %s', ins.get_output())
return assign_lit(Op.XOR, ins.CCCC, ins.A, ins.B, vmap)
# add-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def addintlit8(ins, vmap):
logger.debug('AddIntLit8 : %s', ins.get_output())
literal, op = [(ins.CC, Op.ADD), (-ins.CC, Op.SUB)][ins.CC < 0]
return assign_lit(op, literal, ins.AA, ins.BB, vmap)
# rsub-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def rsubintlit8(ins, vmap):
logger.debug('RSubIntLit8 : %s', ins.get_output())
var_a, var_b = get_variables(vmap, ins.AA, ins.BB)
cst = Constant(ins.CC, 'I')
return AssignExpression(var_a, BinaryExpressionLit(Op.SUB, cst, var_b))
# mul-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def mulintlit8(ins, vmap):
logger.debug('MulIntLit8 : %s', ins.get_output())
return assign_lit(Op.MUL, ins.CC, ins.AA, ins.BB, vmap)
# div-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def divintlit8(ins, vmap):
logger.debug('DivIntLit8 : %s', ins.get_output())
return assign_lit(Op.DIV, ins.CC, ins.AA, ins.BB, vmap)
# rem-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def remintlit8(ins, vmap):
logger.debug('RemIntLit8 : %s', ins.get_output())
return assign_lit(Op.MOD, ins.CC, ins.AA, ins.BB, vmap)
# and-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def andintlit8(ins, vmap):
logger.debug('AndIntLit8 : %s', ins.get_output())
return assign_lit(Op.AND, ins.CC, ins.AA, ins.BB, vmap)
# or-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def orintlit8(ins, vmap):
logger.debug('OrIntLit8 : %s', ins.get_output())
return assign_lit(Op.OR, ins.CC, ins.AA, ins.BB, vmap)
# xor-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def xorintlit8(ins, vmap):
logger.debug('XorIntLit8 : %s', ins.get_output())
return assign_lit(Op.XOR, ins.CC, ins.AA, ins.BB, vmap)
# shl-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def shlintlit8(ins, vmap):
logger.debug('ShlIntLit8 : %s', ins.get_output())
return assign_lit(Op.INTSHL, ins.CC, ins.AA, ins.BB, vmap)
# shr-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def shrintlit8(ins, vmap):
logger.debug('ShrIntLit8 : %s', ins.get_output())
return assign_lit(Op.INTSHR, ins.CC, ins.AA, ins.BB, vmap)
# ushr-int/lit8 vAA, vBB, #+CC ( 8b, 8b, 8b )
def ushrintlit8(ins, vmap):
logger.debug('UShrIntLit8 : %s', ins.get_output())
return assign_lit(Op.INTSHR, ins.CC, ins.AA, ins.BB, vmap)
INSTRUCTION_SET = [
# 0x00
nop, # nop
move, # move
movefrom16, # move/from16
move16, # move/16
movewide, # move-wide
movewidefrom16, # move-wide/from16
movewide16, # move-wide/16
moveobject, # move-object
moveobjectfrom16, # move-object/from16
moveobject16, # move-object/16
moveresult, # move-result
moveresultwide, # move-result-wide
moveresultobject, # move-result-object
moveexception, # move-exception
returnvoid, # return-void
return_reg, # return
# 0x10
returnwide, # return-wide
returnobject, # return-object
const4, # const/4
const16, # const/16
const, # const
consthigh16, # const/high16
constwide16, # const-wide/16
constwide32, # const-wide/32
constwide, # const-wide
constwidehigh16, # const-wide/high16
conststring, # const-string
conststringjumbo, # const-string/jumbo
constclass, # const-class
monitorenter, # monitor-enter
monitorexit, # monitor-exit
checkcast, # check-cast
# 0x20
instanceof, # instance-of
arraylength, # array-length
newinstance, # new-instance
newarray, # new-array
fillednewarray, # filled-new-array
fillednewarrayrange, # filled-new-array/range
fillarraydata, # fill-array-data
throw, # throw
goto, # goto
goto16, # goto/16
goto32, # goto/32
packedswitch, # packed-switch
sparseswitch, # sparse-switch
cmplfloat, # cmpl-float
cmpgfloat, # cmpg-float
cmpldouble, # cmpl-double
# 0x30
cmpgdouble, # cmpg-double
cmplong, # cmp-long
ifeq, # if-eq
ifne, # if-ne
iflt, # if-lt
ifge, # if-ge
ifgt, # if-gt
ifle, # if-le
ifeqz, # if-eqz
ifnez, # if-nez
ifltz, # if-ltz
ifgez, # if-gez
ifgtz, # if-gtz
iflez, # if-l
nop, # unused
nop, # unused
# 0x40
nop, # unused
nop, # unused
nop, # unused
nop, # unused
aget, # aget
agetwide, # aget-wide
agetobject, # aget-object
agetboolean, # aget-boolean
agetbyte, # aget-byte
agetchar, # aget-char
agetshort, # aget-short
aput, # aput
aputwide, # aput-wide
aputobject, # aput-object
aputboolean, # aput-boolean
aputbyte, # aput-byte
# 0x50
aputchar, # aput-char
aputshort, # aput-short
iget, # iget
igetwide, # iget-wide
igetobject, # iget-object
igetboolean, # iget-boolean
igetbyte, # iget-byte
igetchar, # iget-char
igetshort, # iget-short
iput, # iput
iputwide, # iput-wide
iputobject, # iput-object
iputboolean, # iput-boolean
iputbyte, # iput-byte
iputchar, # iput-char
iputshort, # iput-short
# 0x60
sget, # sget
sgetwide, # sget-wide
sgetobject, # sget-object
sgetboolean, # sget-boolean
sgetbyte, # sget-byte
sgetchar, # sget-char
sgetshort, # sget-short
sput, # sput
sputwide, # sput-wide
sputobject, # sput-object
sputboolean, # sput-boolean
sputbyte, # sput-byte
sputchar, # sput-char
sputshort, # sput-short
invokevirtual, # invoke-virtual
invokesuper, # invoke-super
# 0x70
invokedirect, # invoke-direct
invokestatic, # invoke-static
invokeinterface, # invoke-interface
nop, # unused
invokevirtualrange, # invoke-virtual/range
invokesuperrange, # invoke-super/range
invokedirectrange, # invoke-direct/range
invokestaticrange, # invoke-static/range
invokeinterfacerange, # invoke-interface/range
nop, # unused
nop, # unused
negint, # neg-int
notint, # not-int
neglong, # neg-long
notlong, # not-long
negfloat, # neg-float
# 0x80
negdouble, # neg-double
inttolong, # int-to-long
inttofloat, # int-to-float
inttodouble, # int-to-double
longtoint, # long-to-int
longtofloat, # long-to-float
longtodouble, # long-to-double
floattoint, # float-to-int
floattolong, # float-to-long
floattodouble, # float-to-double
doubletoint, # double-to-int
doubletolong, # double-to-long
doubletofloat, # double-to-float
inttobyte, # int-to-byte
inttochar, # int-to-char
inttoshort, # int-to-short
# 0x90
addint, # add-int
subint, # sub-int
mulint, # mul-int
divint, # div-int
remint, # rem-int
andint, # and-int
orint, # or-int
xorint, # xor-int
shlint, # shl-int
shrint, # shr-int
ushrint, # ushr-int
addlong, # add-long
sublong, # sub-long
mullong, # mul-long
divlong, # div-long
remlong, # rem-long
# 0xa0
andlong, # and-long
orlong, # or-long
xorlong, # xor-long
shllong, # shl-long
shrlong, # shr-long
ushrlong, # ushr-long
addfloat, # add-float
subfloat, # sub-float
mulfloat, # mul-float
divfloat, # div-float
remfloat, # rem-float
adddouble, # add-double
subdouble, # sub-double
muldouble, # mul-double
divdouble, # div-double
remdouble, # rem-double
# 0xb0
addint2addr, # add-int/2addr
subint2addr, # sub-int/2addr
mulint2addr, # mul-int/2addr
divint2addr, # div-int/2addr
remint2addr, # rem-int/2addr
andint2addr, # and-int/2addr
orint2addr, # or-int/2addr
xorint2addr, # xor-int/2addr
shlint2addr, # shl-int/2addr
shrint2addr, # shr-int/2addr
ushrint2addr, # ushr-int/2addr
addlong2addr, # add-long/2addr
sublong2addr, # sub-long/2addr
mullong2addr, # mul-long/2addr
divlong2addr, # div-long/2addr
remlong2addr, # rem-long/2addr
# 0xc0
andlong2addr, # and-long/2addr
orlong2addr, # or-long/2addr
xorlong2addr, # xor-long/2addr
shllong2addr, # shl-long/2addr
shrlong2addr, # shr-long/2addr
ushrlong2addr, # ushr-long/2addr
addfloat2addr, # add-float/2addr
subfloat2addr, # sub-float/2addr
mulfloat2addr, # mul-float/2addr
divfloat2addr, # div-float/2addr
remfloat2addr, # rem-float/2addr
adddouble2addr, # add-double/2addr
subdouble2addr, # sub-double/2addr
muldouble2addr, # mul-double/2addr
divdouble2addr, # div-double/2addr
remdouble2addr, # rem-double/2addr
# 0xd0
addintlit16, # add-int/lit16
rsubint, # rsub-int
mulintlit16, # mul-int/lit16
divintlit16, # div-int/lit16
remintlit16, # rem-int/lit16
andintlit16, # and-int/lit16
orintlit16, # or-int/lit16
xorintlit16, # xor-int/lit16
addintlit8, # add-int/lit8
rsubintlit8, # rsub-int/lit8
mulintlit8, # mul-int/lit8
divintlit8, # div-int/lit8
remintlit8, # rem-int/lit8
andintlit8, # and-int/lit8
orintlit8, # or-int/lit8
xorintlit8, # xor-int/lit8
# 0xe0
shlintlit8, # shl-int/lit8
shrintlit8, # shr-int/lit8
ushrintlit8, # ushr-int/lit8
]
|
{
"content_hash": "969635270c0ef02954d0e46f21fa1904",
"timestamp": "",
"source": "github",
"line_count": 1961,
"max_line_length": 78,
"avg_line_length": 31.79704232534421,
"alnum_prop": 0.6179715816146518,
"repo_name": "xysec/androguard",
"id": "e37d456daf2e803767c22a64e12f2c6ee05d3801",
"size": "63030",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "androguard/decompiler/dad/opcode_ins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "384130"
},
{
"name": "C++",
"bytes": "57006"
},
{
"name": "Makefile",
"bytes": "6008"
},
{
"name": "Python",
"bytes": "27560597"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import Group
from django.contrib.auth import get_user_model
from rest_framework import status
from coupons.tests.base import BasicTest
class CouponRedeemedSettingsTests(BasicTest):
def setUp(self):
u = get_user_model()
u.objects.create_superuser('admin', 'john@snow.com', self.PW)
self.user = u.objects.create_user('user', 'me@snow.com', self.PW)
self.user2 = u.objects.create_user('user1', 'me1@snow.com', self.PW)
with self.settings(ROOT_URLCONF='coupons.urls'):
coupon = {
'code': 'ASDF',
'type': 'percent',
}
self.login(username='admin')
response = self.client.post('/coupon', coupon, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.coupon_id = response.data['id']
self.logout()
self.login(username='user')
response = self.client.put('/coupon/%s/redeem' % self.coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.logout()
self.login(username='user1')
response = self.client.put('/coupon/%s/redeem' % self.coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.logout()
self.login(username='admin')
response = self.client.get('/coupon/%s/redeemed' % self.coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(2, len(response.data))
self.logout()
def test_cant_redeemed_coupon_if_not_in_group(self):
"""
Verify the user can restrict permissions.
"""
with self.settings(ROOT_URLCONF='coupons.urls'):
with self.settings(COUPON_PERMISSIONS={'REDEEMED': ['group_a']}):
self.login(username='user')
response = self.client.get('/coupon/%s/redeemed' % self.coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, len(response.data))
self.assertEqual(self.user.id, response.data[0]['user'])
self.logout()
def test_cant_redeemed_coupon_if_group_empty(self):
"""
Verify the user can restrict permissions.
"""
with self.settings(ROOT_URLCONF='coupons.urls'):
with self.settings(COUPON_PERMISSIONS={'REDEEMED': []}):
self.login(username='user')
response = self.client.get('/coupon/%s/redeemed' % self.coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, len(response.data))
self.assertEqual(self.user.id, response.data[0]['user'])
self.logout()
def test_can_redeemed_coupon_if_in_group(self):
"""
Verify the user can restrict permissions.
"""
g, _ = Group.objects.get_or_create(name='group_a')
g.user_set.add(self.user)
with self.settings(ROOT_URLCONF='coupons.urls'):
with self.settings(COUPON_PERMISSIONS={'REDEEMED': ['group_a']}):
self.login(username='user')
response = self.client.get('/coupon/%s/redeemed' % self.coupon_id, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(2, len(response.data))
self.logout()
|
{
"content_hash": "809c0fccebbde28b017de476746bc162",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 97,
"avg_line_length": 40.166666666666664,
"alnum_prop": 0.5883817427385892,
"repo_name": "pstrinkle/drf-coupons",
"id": "dfd46690c3fcb737517add07c09e265ba51e2c4b",
"size": "3673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coupons/tests/test_coupon_redeemed_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "99076"
}
],
"symlink_target": ""
}
|
"""
Graphical User Interface
Copyright (c) 2014, 2015 Andrew Hawkins
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Import Declarations
"""
import copy
import datetime as dt
import numpy as np
import sqlite3
import threading as td
import wx
from forsteri.interface import data as idata
from forsteri.interface import sql as isql
"""
Constant Declarations
"""
"""
Main Functions
"""
def runAllErrors():
"""
"""
# Create the progress dialog box.
progress_dlg = wx.ProgressDialog("Running Errors",
"Opening database connection.")
# Open a connection to the data database.
connection = sqlite3.connect(idata.MASTER)
progress_dlg.Update(10, "Connection initialized, running MLR errors.")
# Find the MLR errors.
idata.updateError("mlr", connection)
progress_dlg.Update(40, "MLR errors complete, running EMA errors.")
# Find the EMA errors.
idata.updateError("ema", connection)
progress_dlg.Update(70, "EMA errors complete, running Naive errors.")
# Find the Naive errors.
idata.updateError("naive", connection)
progress_dlg.Update(99, "Naive errors complete, commiting changes.")
# Commit and close the connection.
connection.commit()
connection.close()
progress_dlg.Update(100, "Error process complete.")
progress_dlg.Destroy()
return True
def runAll(products=None):
"""
"""
# Create the progress dialog box.
progress_dlg = wx.ProgressDialog("Running Models",
"Opening database connection.")
# Open a connection to the data database.
connection = sqlite3.connect(idata.MASTER)
progress_dlg.Update(5, "Connection initialized, gathering products.")
# Get all products if none are given.
if products is None:
products = isql.getProductNames()
progress_dlg.Update(10, "Products gathered, running EMA model.")
# Run the EMA model.
runEMA(products, connection)
progress_dlg.Update(40, "EMA model complete, running MLR model.")
# Run the MLR model.
runMLR(products, connection)
progress_dlg.Update(70, "MLR model complete, running Nieve model.")
# Run the Naive model.
runNaive(products, connection)
progress_dlg.Update(99, "All models complete, commiting changes.")
# Commit and close the connection.
connection.commit()
connection.close()
progress_dlg.Update(100, "Model process complete.")
progress_dlg.Destroy()
return True
def runEMA(products=None, connection=None):
"""
Run the exponential moving avergae model for the given products.
"""
# Open the master database if it is not supplied.
flag = False
if connection is None:
connection = sqlite3.connect(idata.MASTER)
flag = True
# Get all products if none are given.
if products is None:
products = isql.getProductNames()
# Iterate over each product.
for product in products:
# Get the data for the product.
data = idata.getData(product, "finished_goods", connection)
# If no data is held for a product skip it.
if len(data) == 0:
continue
# Convert the data to an overlapped numpy array.
data = overlap(data)
# Find the averages for each month.
average = eMA(data, alpha=0.7)
# Convert nan to NULL.
average = ["NULL" if np.isnan(x) else x for x in average]
# Add the forecasts to the database.
idata.updateForecast(product, "ema", average, connection)
# Close the connection.
if flag:
connection.commit()
connection.close()
return True
def runMLR(products=None, connection=None):
"""
Run the multiple linear regression model for the given products with all
available variables.
"""
# Open the master database if it is not supplied.
flag = False
if connection is None:
connection = sqlite3.connect(idata.MASTER)
flag = True
# Get all products if none are given.
if products is None:
products = isql.getProductNames()
# Iterate over each product.
for product in products:
# Get the data for the current product.
(header, data) = idata.getAllData(product)
# If there is no data for a product, skip to the next product.
if data is None:
continue
# Process the data into a the overlap form.
try:
dataNew = overlap3(data)
except IndexError:
continue
# Iterate over each month.
forecast = []
for i in range(0, 12):
try:
# Determine the coefficient values
(beta, fit) = mLR(dataNew[i][:, 0], dataNew[i][:, 1:])
# Determine the values to use for each variable.
vals = np.concatenate((np.array([1]), eMA(dataNew[i][:, 1:],
alpha=0.7)))
# Find the forecast.
forecast.append(np.dot(vals, beta))
except IndexError:
# Add nan to the forecast.
forecast.append(np.nan)
# Concert nan to NULL.
forecast = ["NULL" if np.isnan(x) else x for x in forecast]
# Add the forecast values to the database.
idata.updateForecast(product, "mlr", forecast, connection)
# Close the connection.
if flag:
connection.commit()
connection.close()
return True
def runNaive(products=None, connection=None):
"""
"""
# Open the master database if it is not supplied.
flag = False
if connection is None:
connection = sqlite3.connect(idata.MASTER)
flag = True
# Get all products if none are given.
if products is None:
products = isql.getProductNames()
# Get the date.
today = dt.date(1, 1, 1).today()
# Get the finished goods data.
for product in products:
data = idata.getData(product, "finished_goods_monthly", connection)
# Extract the last 12 data points.
forecast = data[-12:]
# Convert the dates to be just the months.
forecast = {dt.datetime.strptime(x[0], "%Y-%m-%d").month: x[1] for x\
in forecast}
# Pad forecast with NULLs if it is less than length 12.
if len(forecast) < 12:
for i in range(1, 13):
try:
forecast[i]
except KeyError:
forecast[i] = "NULL"
# Sort by month.
forecast = sorted(forecast.items(), key=lambda s: s[0])
# Add the forecast values to the database.
idata.updateForecast(product, "naive", [x[1] for x in forecast],
connection)
# Close the connection.
if flag:
connection.commit()
connection.close()
return True
"""
Model Functions
"""
def eMA(data, alpha=None):
"""
Find the exponential moving average of some data.
Args:
data (numpy.array): An array of data.
alpha (int, optional): The weighting factor.
Returns:
numpy.array:
"""
# Find the shape of the input data.
shape = np.shape(data)
# If the dimension is higher than one run the function recursively.
if len(shape) == 2:
average = []
for i in range(0, shape[1]):
average.append(eMA([x[i] for x in data], alpha))
return average
elif len(shape) > 2:
raise(IndexError("this function can only take up to a 2 dimensional \
array"))
# If no alpha is given determine the alpha.
if alpha is None:
alpha = 2 / (len(data) + 1.0)
# Find the compliment of alpha.
comp = 1 - alpha
# Find the first non nan index.
try:
index = np.where(np.isnan(data) == False)[0][0]
except IndexError:
return data[-1]
# Set the initial average.
average = data[index]
# Iterate over all data points and update the average.
for i in range(index + 1, len(data)):
# If the value is nan then do not change the average.
if np.isnan(data[i]):
pass
else:
average = comp * average + alpha * data[i]
return average
def mLR(dep, ind):
"""
"""
# Add a bias column to the independent variables.
(rows, cols) = np.shape(ind)
indB = np.ones((rows, cols + 1))
indB[:, 1:] = ind
# Determine the weighting coefficients.
beta = np.dot(np.dot(np.linalg.pinv(np.dot(indB.T, indB)), indB.T), dep)
# Determine the historical fit of data.
fit = np.dot(indB, beta)
return beta, fit
"""
Helper Functions
"""
def overlap(data):
"""
data is in the form [(date1, value1), (date2, value2), ...]
"""
# Make a copy of the data.
data2 = copy.copy(data)
# Extract the first and last year and month.
firstYear = int(data2[0][0][0 : 4])
firstMonth = int(data2[0][0][5 : 7])
lastYear = int(data2[-1][0][0 : 4])
lastMonth = int(data2[-1][0][5 : 7])
# If the first month is not one, add dates.
if firstMonth != 1:
temp = [(str(dt.date(firstYear, i, 1)), np.nan) for i in range(1,
firstMonth)]
temp.extend(data2)
data2 = copy.copy(temp)
# If the last month is not 12, add dates.
if lastMonth != 12:
data2.extend([(str(dt.date(lastYear, i, 1)), np.nan) for i in \
range(lastMonth + 1, 13)])
# Extract only the values from the data.
values = [x[1] for x in data2]
# Reshape the data.
new = np.array([values[i : i + 12] for i in range(0, len(values), 12)])
return new
def overlap2(data):
"""
"""
# Make a copy of the data.
data2 = copy.copy(data)
# Extract the first and last year and month.
firstYear = int(data2[0][0][0 : 4])
firstMonth = int(data2[0][0][5 : 7])
lastYear = int(data2[-1][0][0 : 4])
lastMonth = int(data2[-1][0][5 : 7])
naTemp = [np.nan] * (len(data2[0]) - 1)
# If the first month is not one, add dates with nan values.
if firstMonth != 1:
head = []
for i in range(1, firstMonth):
temp = [str(dt.date(firstYear, i, 1))]
temp.extend(naTemp)
head.append(tuple(temp))
head.extend(data2)
data2 = head.copy()
# If the last month is not 12, add dates with nan values.
if lastMonth != 12:
tail = []
for i in range(lastMonth + 1, 13):
temp = [str(dt.date(lastYear, i, 1))]
temp.extend(naTemp)
tail.append(tuple(temp))
data2.extend(tail)
final = [[] for i in range(0, 12)]
index = 1
for x in data2:
final[index % 12].append(x[1:])
index += 1
return final
def overlap3(data):
"""
"""
#
dataC = copy.copy(data)
temp = [[] for i in range(0, 12)]
index = int(dataC[0][0][5 : 7])
for x in dataC:
temp[(index % 12) - 1].append(x[1:])
index += 1
final = []
for x in temp:
final.append(np.array(x))
return final
def curtail(data):
"""
"""
# Make a copy of the data.
variables = data.copy()
# Find all of the start and end dates.
starts = []
ends = []
for variable in variables.values():
starts.append(variable[0][0])
ends.append(variable[-1][0])
# Determine the latest start and the earliest end.
first = max(starts)
last = min(ends)
# Extract the data.
newData = dict()
for (key, value) in variables.items():
temp = []
for point in value:
if point[0] < first or point[0] > last:
pass
else:
temp.append(point)
newData[key] = temp
return newData
def separate(data):
"""
"""
#
for (key, value) in data.items():
for i in range(0, 12):
months[i + 1] = np.column_stack(months[i + 1], )
|
{
"content_hash": "831b4286c26428f49d42c9c0e214decd",
"timestamp": "",
"source": "github",
"line_count": 488,
"max_line_length": 77,
"avg_line_length": 26.420081967213115,
"alnum_prop": 0.6046692003412705,
"repo_name": "achawkins/Forsteri",
"id": "038347cd82830d12ff4e5622c47d2fb2b573c92a",
"size": "12912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forsteri/process/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "33"
},
{
"name": "Python",
"bytes": "245733"
}
],
"symlink_target": ""
}
|
from rest_framework import viewsets, mixins
from .models import FileModel, ImageModel
from .serializers import FileSerializers, ImageSerializers
class FileViewSet(mixins.CreateModelMixin,
viewsets.GenericViewSet):
queryset = FileModel.objects.all()
serializer_class = FileSerializers
class ImageViewSet(mixins.CreateModelMixin,
viewsets.GenericViewSet):
queryset = ImageModel.objects.all()
serializer_class = ImageSerializers
|
{
"content_hash": "665392bb933855cb8494ff0a9fd90537",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 30.25,
"alnum_prop": 0.7458677685950413,
"repo_name": "DreamColl/WebBaihe",
"id": "2a33efccb3f15a3a3a2ecc5c745ef1add5ccb906",
"size": "484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baihe_api/baihe_api/apps/upload/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "272"
},
{
"name": "JavaScript",
"bytes": "23238"
},
{
"name": "Python",
"bytes": "33628"
},
{
"name": "Vue",
"bytes": "4782"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from ..views import servers as views
urlpatterns = [
url(r'^$',
views.ServerList.as_view(),
name='server-list'),
url(r'^/(?P<pk>\d+)/?$',
views.ServerDetail.as_view(),
name='server-detail'),
url(r'^/(?P<pk>\d+)/components/?$',
views.ServerComponentList.as_view(),
name='server-component-list'),
url(r'^/(?P<pk>\d+)/actions/?$',
views.ServerActions.as_view(),
name='server-actions'),
]
|
{
"content_hash": "d03b5129e6c6113361f6d5cd9d50791d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 44,
"avg_line_length": 23.952380952380953,
"alnum_prop": 0.5566600397614314,
"repo_name": "baffolobill/mb_test_1",
"id": "b6989e6f4ffd17054a3d38183b523376df5f03f8",
"size": "503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mbtest1/erp_test/urls/servers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "228768"
},
{
"name": "HTML",
"bytes": "54315"
},
{
"name": "JavaScript",
"bytes": "3483943"
},
{
"name": "Makefile",
"bytes": "135"
},
{
"name": "Python",
"bytes": "305003"
},
{
"name": "Shell",
"bytes": "187"
}
],
"symlink_target": ""
}
|
import sys
import time
import logging
logging.basicConfig(level=logging.DEBUG)
logging.info(time.time())
time.sleep(3)
print("exiting from echo !")
sys.exit(3)
|
{
"content_hash": "4b500a8299a9a62bb848a8c645ab63b0",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 40,
"avg_line_length": 16.2,
"alnum_prop": 0.7654320987654321,
"repo_name": "ybrs/single-beat",
"id": "fa18e83745162b0f285965d9c2480603670923ef",
"size": "162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/echo-exit-code.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24214"
},
{
"name": "Shell",
"bytes": "854"
}
],
"symlink_target": ""
}
|
import os
import six
import st2common.content.utils as content_utils
from st2common import log as logging
from st2common.constants.meta import ALLOWED_EXTS
from st2common.bootstrap.base import ResourceRegistrar
from st2common.models.api.action import ActionAliasAPI
from st2common.persistence.actionalias import ActionAlias
from st2common.exceptions.db import StackStormDBObjectNotFoundError
__all__ = [
'AliasesRegistrar',
'register_aliases'
]
LOG = logging.getLogger(__name__)
class AliasesRegistrar(ResourceRegistrar):
ALLOWED_EXTENSIONS = ALLOWED_EXTS
def register_from_packs(self, base_dirs):
"""
Discover all the packs in the provided directory and register aliases from all of the
discovered packs.
:return: Number of aliases registered.
:rtype: ``int``
"""
# Register packs first
self.register_packs(base_dirs=base_dirs)
registered_count = 0
content = self._pack_loader.get_content(base_dirs=base_dirs,
content_type='aliases')
for pack, aliases_dir in six.iteritems(content):
if not aliases_dir:
LOG.debug('Pack %s does not contain aliases.', pack)
continue
try:
LOG.debug('Registering aliases from pack %s:, dir: %s', pack, aliases_dir)
aliases = self._get_aliases_from_pack(aliases_dir)
count = self._register_aliases_from_pack(pack=pack, aliases=aliases)
registered_count += count
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all aliases from pack: %s', aliases_dir)
return registered_count
def register_from_pack(self, pack_dir):
"""
Register all the aliases from the provided pack.
:return: Number of aliases registered.
:rtype: ``int``
"""
pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir
_, pack = os.path.split(pack_dir)
aliases_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,
content_type='aliases')
# Register pack first
self.register_pack(pack_name=pack, pack_dir=pack_dir)
registered_count = 0
if not aliases_dir:
return registered_count
LOG.debug('Registering aliases from pack %s:, dir: %s', pack, aliases_dir)
try:
aliases = self._get_aliases_from_pack(aliases_dir=aliases_dir)
registered_count = self._register_aliases_from_pack(pack=pack, aliases=aliases)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed registering all aliases from pack: %s', aliases_dir)
return registered_count
return registered_count
def _get_aliases_from_pack(self, aliases_dir):
return self.get_resources_from_pack(resources_dir=aliases_dir)
def _get_action_alias_db(self, pack, action_alias):
"""
Retrieve ActionAliasDB object.
"""
content = self._meta_loader.load(action_alias)
pack_field = content.get('pack', None)
if not pack_field:
content['pack'] = pack
pack_field = pack
if pack_field != pack:
raise Exception('Model is in pack "%s" but field "pack" is different: %s' %
(pack, pack_field))
action_alias_api = ActionAliasAPI(**content)
action_alias_api.validate()
action_alias_db = ActionAliasAPI.to_model(action_alias_api)
return action_alias_db
def _register_action_alias(self, pack, action_alias):
action_alias_db = self._get_action_alias_db(pack=pack, action_alias=action_alias)
try:
action_alias_db.id = ActionAlias.get_by_name(action_alias_db.name).id
except StackStormDBObjectNotFoundError:
LOG.debug('ActionAlias %s not found. Creating new one.', action_alias)
try:
action_alias_db = ActionAlias.add_or_update(action_alias_db)
extra = {'action_alias_db': action_alias_db}
LOG.audit('Action alias updated. Action alias %s from %s.', action_alias_db,
action_alias, extra=extra)
except Exception:
LOG.exception('Failed to create action alias %s.', action_alias_db.name)
raise
def _register_aliases_from_pack(self, pack, aliases):
registered_count = 0
for alias in aliases:
try:
LOG.debug('Loading alias from %s.', alias)
self._register_action_alias(pack, alias)
except Exception as e:
if self._fail_on_failure:
msg = ('Failed to register alias "%s" from pack "%s": %s' % (alias, pack,
str(e)))
raise ValueError(msg)
LOG.exception('Unable to register alias: %s', alias)
continue
else:
registered_count += 1
return registered_count
def register_aliases(packs_base_paths=None, pack_dir=None, use_pack_cache=True,
fail_on_failure=False):
if packs_base_paths:
assert isinstance(packs_base_paths, list)
if not packs_base_paths:
packs_base_paths = content_utils.get_packs_base_paths()
registrar = AliasesRegistrar(use_pack_cache=use_pack_cache,
fail_on_failure=fail_on_failure)
if pack_dir:
result = registrar.register_from_pack(pack_dir=pack_dir)
else:
result = registrar.register_from_packs(base_dirs=packs_base_paths)
return result
|
{
"content_hash": "aa6f4a46359e496c291e47d6867202fb",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 93,
"avg_line_length": 35.66867469879518,
"alnum_prop": 0.5884141192366155,
"repo_name": "tonybaloney/st2",
"id": "d0857a9752d894565e441e531ce6074ab20fd98e",
"size": "6701",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "st2common/st2common/bootstrap/aliasesregistrar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "46066"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "4278891"
},
{
"name": "Shell",
"bytes": "47687"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
}
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class DepositSwitchGetRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'deposit_switch_id': (str,), # noqa: E501
'client_id': (str,), # noqa: E501
'secret': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'deposit_switch_id': 'deposit_switch_id', # noqa: E501
'client_id': 'client_id', # noqa: E501
'secret': 'secret', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, deposit_switch_id, *args, **kwargs): # noqa: E501
"""DepositSwitchGetRequest - a model defined in OpenAPI
Args:
deposit_switch_id (str): The ID of the deposit switch
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
client_id (str): Your Plaid API `client_id`. The `client_id` is required and may be provided either in the `PLAID-CLIENT-ID` header or as part of a request body.. [optional] # noqa: E501
secret (str): Your Plaid API `secret`. The `secret` is required and may be provided either in the `PLAID-SECRET` header or as part of a request body.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.deposit_switch_id = deposit_switch_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
{
"content_hash": "bc871dc360280a724ebea16d02baecce",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 199,
"avg_line_length": 41.23699421965318,
"alnum_prop": 0.5618166526492852,
"repo_name": "plaid/plaid-python",
"id": "91f21f01dcfbfe925c2289f676d9cc5d830a0b14",
"size": "7134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaid/model/deposit_switch_get_request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "622"
},
{
"name": "Mustache",
"bytes": "125163"
},
{
"name": "Python",
"bytes": "9342874"
}
],
"symlink_target": ""
}
|
class ScrapyBlPipeline(object):
def process_item(self, item, spider):
return item
|
{
"content_hash": "3581a6a42869eb9e985f15775850658d",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.6914893617021277,
"repo_name": "mblaauw/scrapy_bl",
"id": "ef369d4fdffdcb203d70f784b081b8b94a1b1323",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapy_bl/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "860"
}
],
"symlink_target": ""
}
|
"""
Tests for the fitting function classes defined in `plasmapy.analysis.fit_functions`.
"""
import numpy as np
import pytest
from abc import ABC, abstractmethod
from contextlib import nullcontext as does_not_raise
import plasmapy.analysis.fit_functions as ffuncs
class TestAbstractFitFunction:
"""
Tests for fit function class `plasmapy.analysis.fit_functions.AbstractFitFunction`.
Notes
-----
Since `AbstractFitFunction` can not be instantiated, its complete
functionality can not be directly tested. To resolve this, `BaseFFTests`,
which is the base test class for the fit function classes, will test all
the functionality within `AbstractFitFunction`.
"""
ff_class = ffuncs.AbstractFitFunction
def test_is_abc(self):
"""Test `AbstractFitFunction` is an abstract base class."""
assert issubclass(self.ff_class, ABC)
@pytest.mark.parametrize(
"name, isproperty",
[
("__call__", False),
("curve_fit", False),
("curve_fit_results", True),
("func", False),
("func_err", False),
("latex_str", True),
("param_errors", True),
("param_names", True),
("params", True),
("rsq", True),
("root_solve", False),
],
)
def test_methods(self, name, isproperty):
"""Test for required methods and properties."""
assert hasattr(self.ff_class, name)
if isproperty:
assert isinstance(getattr(self.ff_class, name), property)
@pytest.mark.parametrize(
"name",
["__str__", "func", "func_err", "latex_str"],
)
def test_abstractmethods(self, name):
"""Test for required abstract methods."""
assert name in self.ff_class.__abstractmethods__
class BaseFFTests(ABC):
abc = ffuncs.AbstractFitFunction
_test_params = NotImplemented # type: tuple
_test_param_errors = NotImplemented # type: tuple
_test_param_names = NotImplemented # type: tuple
_test_latex_str = NotImplemented # type: str
_test__str__ = NotImplemented # type: str
@property
@abstractmethod
def ff_class(self):
"""Fit function class to be tested."""
...
@staticmethod
@abstractmethod
def func(x, *args):
"""
Formula/Function that the fit function class is suppose to be modeling.
This is used to test the fit function `func` method.
"""
...
@abstractmethod
def func_err(self, x, params, param_errors, x_err=None):
"""
Function representing the propagation of error associated with fit function
model. This is used to test the fit function `func_err` method.
"""
...
def test_inheritance(self):
"""Test inheritance from `AbstractFitFunction`."""
assert issubclass(self.ff_class, self.abc)
def test_iscallable(self):
"""Test instantiated fit function is callable."""
assert callable(self.ff_class())
def test_repr(self):
"""Test __repr__."""
ff_obj = self.ff_class()
assert ff_obj.__repr__() == f"{ff_obj.__str__()} {ff_obj.__class__}"
@pytest.mark.parametrize(
"name, isproperty",
[
("__call__", False),
("_param_names", False),
("curve_fit", False),
("curve_fit_results", True),
("func", False),
("func_err", False),
("latex_str", True),
("param_errors", True),
("param_names", True),
("params", True),
("rsq", True),
("root_solve", False),
],
)
def test_methods(self, name, isproperty):
"""Test attribute/method/property existence."""
assert hasattr(self.ff_class, name)
if isproperty:
assert isinstance(getattr(self.ff_class, name), property)
if name == "_param_names" and self.ff_class._param_names == NotImplemented:
pytest.fail(
f"{self.ff_class} class attribute '_param_names' needs to "
f" be defined as a tuple of strings representing the names of "
f"the fit parameters."
)
@pytest.mark.parametrize(
"name, value_ref_name",
[
("param_names", "_test_param_names"),
("latex_str", "_test_latex_str"),
("__str__", "_test__str__"),
],
)
def test_abstractmethod_values(self, name, value_ref_name):
"""Test value of all abstract methods, except `func` and `func_err`."""
ff_obj = self.ff_class()
value = getattr(ff_obj, name)
if callable(value):
value = value()
exp_value = getattr(self, value_ref_name)
if exp_value == NotImplemented:
pytest.fail(
f"The expected value for abstract method {name} is not "
f"implemented/defined in the test class attribute {value_ref_name}."
)
assert value == exp_value
@pytest.mark.parametrize(
"params, param_errors, with_condition",
[
(None, None, does_not_raise()),
("default", "default", does_not_raise()),
(5, None, pytest.raises(ValueError)),
(None, 5, pytest.raises(ValueError)),
(["wrong"], None, pytest.raises(ValueError)),
(None, ["wrong"], pytest.raises(ValueError)),
("default+", None, pytest.raises(ValueError)),
(None, "default+", pytest.raises(ValueError)),
],
)
def test_instantiation(self, params, param_errors, with_condition):
"""Test behavior of fit function class instantiation."""
if params == "default":
params = self._test_params
elif params == "default+":
params = self._test_params
params = list(params)
params.append(5)
if param_errors == "default":
param_errors = self._test_param_errors
elif param_errors == "default+":
param_errors = self._test_param_errors
param_errors = list(param_errors)
param_errors.append(5)
with with_condition:
ff_obj = self.ff_class(params=params, param_errors=param_errors)
assert ff_obj.curve_fit_results is None
assert ff_obj.rsq is None
if params is None:
assert ff_obj.params is None
else:
assert ff_obj.params == ff_obj.FitParamTuple(*params)
if param_errors is None:
assert ff_obj.param_errors is None
else:
assert ff_obj.param_errors == ff_obj.FitParamTuple(*param_errors)
def test_param_namedtuple(self):
"""
Test that the namedtuple used for `params` and `param_errors` is
constructed correctly.
"""
ff_obj = self.ff_class()
assert hasattr(ff_obj, "FitParamTuple")
assert issubclass(ff_obj.FitParamTuple, tuple)
for name in ff_obj.param_names:
assert hasattr(ff_obj.FitParamTuple, name)
def test_param_names(self):
"""Test attribute `param_names` is defined correctly."""
ff_obj = self.ff_class()
assert isinstance(ff_obj.param_names, tuple)
assert len(ff_obj.param_names) != 0
assert all(isinstance(val, str) for val in ff_obj.param_names)
@pytest.mark.parametrize(
"params, extra, with_condition",
[
([2], None, does_not_raise()),
(5, None, pytest.raises(ValueError)),
(["wrong"], None, pytest.raises(ValueError)),
([3], 10, pytest.raises(ValueError)),
],
)
def test_params_setting(self, params, extra, with_condition):
"""Tests for property setting of attribute `params`."""
ff_obj = self.ff_class()
if isinstance(params, list) and len(params) == 1:
params = params * len(ff_obj.param_names)
if extra is not None:
params.append(extra)
with with_condition:
ff_obj.params = params
assert ff_obj.params == ff_obj.FitParamTuple(*params)
@pytest.mark.parametrize(
"param_errors, extra, with_condition",
[
([2], None, does_not_raise()),
(5, None, pytest.raises(ValueError)),
(["wrong"], None, pytest.raises(ValueError)),
([3], 10, pytest.raises(ValueError)),
],
)
def test_param_errors_setting(self, param_errors, extra, with_condition):
"""Tests for property setting of attribute `param_errors`."""
ff_obj = self.ff_class()
if isinstance(param_errors, list) and len(param_errors) == 1:
param_errors = param_errors * len(ff_obj.param_names)
if extra is not None:
param_errors.append(extra)
with with_condition:
ff_obj.param_errors = param_errors
assert ff_obj.param_errors == ff_obj.FitParamTuple(*param_errors)
@pytest.mark.parametrize(
"x, replace_a_param, with_condition",
[
(0, None, does_not_raise()),
(1.0, None, does_not_raise()),
(np.linspace(10, 30, num=20), None, does_not_raise()),
([4, 5, 6], None, does_not_raise()),
("hello", None, pytest.raises(TypeError)),
(5, "hello", pytest.raises(TypeError)),
],
)
def test_func(self, x, replace_a_param, with_condition):
"""Test the `func` method."""
ff_obj = self.ff_class()
params = self._test_params
if replace_a_param is not None:
params = list(params)
params[0] = replace_a_param
with with_condition:
y = ff_obj.func(x, *params)
if isinstance(x, list):
x = np.array(x)
y_expected = self.func(x, *params)
assert np.allclose(y, y_expected)
@pytest.mark.parametrize(
"x, kwargs, with_condition",
[
(0, {}, does_not_raise()),
(1.0, {}, does_not_raise()),
(np.linspace(10, 30, num=20), {}, does_not_raise()),
([4, 5, 6], {"x_err": 0.1, "rety": True}, does_not_raise()),
("hello", {}, pytest.raises(TypeError)),
(5, {"x_err": "goodbye"}, pytest.raises(TypeError)),
(5, {"x_err": [0.1, 0.1]}, pytest.raises(ValueError)),
],
)
def test_func_err(self, x, kwargs, with_condition):
"""Test the `func_err` method."""
params = self._test_params
param_errors = self._test_param_errors
ff_obj = self.ff_class(params=params, param_errors=param_errors)
with with_condition:
results = ff_obj.func_err(x, **kwargs)
if "rety" in kwargs and kwargs["rety"]:
y_err, y = results
else:
y_err = results
y = None
x_err = kwargs["x_err"] if "x_err" in kwargs else None
if isinstance(x, list):
x = np.array(x)
y_err_expected = self.func_err(x, params, param_errors, x_err=x_err)
assert np.allclose(y_err, y_err_expected)
if y is not None:
assert np.allclose(y, self.func(x, *params))
@pytest.mark.parametrize(
"x, kwargs, with_condition",
[
(0, {}, does_not_raise()),
(0, {"reterr": True}, does_not_raise()),
(1.0, {}, does_not_raise()),
(1.0, {"reterr": True}, does_not_raise()),
(np.linspace(10, 30, num=20), {}, does_not_raise()),
(np.linspace(10, 30, num=20), {"reterr": True}, does_not_raise()),
([4, 5, 6], {}, does_not_raise()),
([4, 5, 6], {"x_err": 0.05, "reterr": True}, does_not_raise()),
("hello", {}, pytest.raises(TypeError)),
(5, {"x_err": [1, 2], "reterr": True}, pytest.raises(ValueError)),
],
)
def test_call(self, x, kwargs, with_condition):
"""Test __call__ behavior."""
params = self._test_params
param_errors = self._test_param_errors
ff_obj = self.ff_class(params=params, param_errors=param_errors)
reterr = kwargs["reterr"] if "reterr" in kwargs else False
x_err = kwargs["x_err"] if "x_err" in kwargs else None
with with_condition:
results = ff_obj(x, **kwargs)
if reterr:
y = results[0]
y_err = results[1]
else:
y = results
if isinstance(x, list):
x = np.array(x)
y_expected = self.func(x, *params)
assert np.allclose(y, y_expected)
if reterr:
y_err_expected = self.func_err(x, params, param_errors, x_err=x_err)
assert np.allclose(y_err, y_err_expected)
@abstractmethod
def test_root_solve(self):
...
def test_curve_fit(self):
"""Test the `curve_fit` method."""
ff_obj = self.ff_class()
xdata = np.linspace(-10, 10)
ydata = self.func(xdata, *self._test_params)
assert ff_obj.params is None
assert ff_obj.param_errors is None
assert ff_obj.rsq is None
assert ff_obj.curve_fit_results is None
ff_obj.curve_fit(xdata, ydata)
assert ff_obj.curve_fit_results is not None
assert np.isclose(ff_obj.rsq, 1.0)
assert np.allclose(
ff_obj.param_errors,
tuple([0] * len(ff_obj.param_names)),
atol=1.5e-8,
)
assert np.allclose(ff_obj.params, self._test_params)
class TestFFExponential(BaseFFTests):
"""
Tests for fit function class `plasmapy.analysis.fit_functions.Exponential`.
"""
ff_class = ffuncs.Exponential
_test_params = (5.0, 1.0)
_test_param_errors = (0.1, 0.1)
_test_param_names = ("a", "alpha")
_test_latex_str = r"a \, \exp(\alpha x)"
_test__str__ = "f(x) = a exp(alpha x)"
@staticmethod
def func(x, a, alpha):
return a * np.exp(alpha * x)
def func_err(self, x, params, param_errors, x_err=None):
a, alpha = params
a_err, alpha_err = param_errors
y = self.func(x, *params)
a_term = (a_err / a) ** 2
alpha_term = (x * alpha_err) ** 2
err = a_term + alpha_term
if x_err is not None:
x_term = (alpha * x_err) ** 2
err += x_term
err = np.abs(y) * np.sqrt(err)
return err
def test_root_solve(self):
ff_obj = self.ff_class(params=(1, 1), param_errors=(0, 0))
root, err = ff_obj.root_solve()
assert np.isnan(root)
assert np.isnan(err)
class TestFFExponentialPlusLinear(BaseFFTests):
"""
Tests for fit function class
`plasmapy.analysis.fit_functions.ExponentialPlusLinear`.
"""
ff_class = ffuncs.ExponentialPlusLinear
_test_params = (2.0, 1.0, 5.0, -10.0)
_test_param_errors = (0.1, 0.1, 0.1, 0.1)
_test_param_names = ("a", "alpha", "m", "b")
_test_latex_str = r"a \, \exp(\alpha x) + m x + b"
_test__str__ = "f(x) = a exp(alpha x) + m x + b"
@staticmethod
def func(x, a, alpha, m, b):
return a * np.exp(alpha * x) + m * x + b
def func_err(self, x, params, param_errors, x_err=None):
a, alpha, m, b = params
a_err, alpha_err, m_err, b_err = param_errors
exp_y = a * np.exp(alpha * x)
a_term = (exp_y * a_err / a) ** 2
alpha_term = (exp_y * x * alpha_err) ** 2
m_term = (m_err * x) ** 2
b_term = b_err**2
err = a_term + alpha_term + m_term + b_term
if x_err is not None:
x_term = (exp_y * alpha * x_err) ** 2
x_term += (m * x_err) ** 2
x_term += 2 * a * alpha * m * np.exp(alpha * x) * (x_err**2)
err += x_term
err = np.sqrt(err)
return err
def test_root_solve(self):
ff_obj = self.ff_class(params=(5.0, 0.5, 1.0, 5.0), param_errors=(0, 0, 0, 0))
root, err = ff_obj.root_solve(-5)
assert np.isclose(root, -5.345338)
assert np.isnan(err)
class TestFFExponentialPlusOffset(BaseFFTests):
"""
Tests for fit function class
`plasmapy.analysis.fit_functions.ExponentialPlusOffset`.
"""
ff_class = ffuncs.ExponentialPlusOffset
_test_params = (2.0, 1.0, -10.0)
_test_param_errors = (0.1, 0.1, 0.1)
_test_param_names = ("a", "alpha", "b")
_test_latex_str = r"a \, \exp(\alpha x) + b"
_test__str__ = "f(x) = a exp(alpha x) + b"
@staticmethod
def func(x, a, alpha, b):
return a * np.exp(alpha * x) + b
def func_err(self, x, params, param_errors, x_err=None):
a, alpha, b = params
a_err, alpha_err, b_err = param_errors
exp_y = a * np.exp(alpha * x)
a_term = (exp_y * a_err / a) ** 2
alpha_term = (exp_y * x * alpha_err) ** 2
b_term = b_err**2
err = a_term + alpha_term + b_term
if x_err is not None:
x_term = (exp_y * alpha * x_err) ** 2
err += x_term
err = np.sqrt(err)
return err
def test_root_solve(self):
ff_obj = self.ff_class(params=(3.0, 0.5, -5.0), param_errors=(0, 0, 0))
root, err = ff_obj.root_solve()
assert root == np.log(5.0 / 3.0) / 0.5
assert err == 0
ff_obj.params = (3.0, 0.5, 5.0)
with pytest.warns(RuntimeWarning, match="invalid value encountered in log"):
root, err = ff_obj.root_solve()
assert np.isnan(root)
assert np.isnan(err)
class TestFFLinear(BaseFFTests):
"""
Tests for fit function class `plasmapy.analysis.fit_functions.Linear`.
"""
ff_class = ffuncs.Linear
_test_params = (5.0, 4.0)
_test_param_errors = (0.1, 0.1)
_test_param_names = ("m", "b")
_test_latex_str = r"m x + b"
_test__str__ = "f(x) = m x + b"
@staticmethod
def func(x, m, b):
return m * x + b
def func_err(self, x, params, param_errors, x_err=None):
m, b = params
m_err, b_err = param_errors
m_term = (m_err * x) ** 2
b_term = b_err**2
err = m_term + b_term
if x_err is not None:
x_term = (m * x_err) ** 2
err += x_term
err = np.sqrt(err)
return err
@pytest.mark.parametrize(
"params, param_errors, root, root_err, conditional",
[
((1, 1), (0, 0), -1, 0, does_not_raise()),
(
(5.0, 1.3),
(0.1, 0.1),
-1.3 / 5.0,
np.abs(-1.3 / 5.0) * np.sqrt((0.1 / 5.0) ** 2 + (0.1 / 1.3) ** 2),
does_not_raise(),
),
((0.3, 0.0), (0.1, 0.1), 0.0, np.abs(0.1 / 0.3), does_not_raise()),
((0.0, 1.0), (0.1, 0.1), np.nan, np.nan, pytest.warns(RuntimeWarning)),
],
)
def test_root_solve(self, params, param_errors, root, root_err, conditional):
with conditional:
ff_obj = self.ff_class(params=params, param_errors=param_errors)
results = ff_obj.root_solve()
if np.all(np.isnan([root, root_err])):
assert np.all(np.isnan(results))
elif np.isnan(root):
assert np.isnan(results[0])
assert np.isclose(results[1], root_err)
elif np.isnan(root_err):
assert np.isclose(results[0], root)
assert np.isnan(results[1])
else:
assert np.allclose(results, [root, root_err])
|
{
"content_hash": "f26e0533af74ea1abd77b02cd1bbcddd",
"timestamp": "",
"source": "github",
"line_count": 603,
"max_line_length": 87,
"avg_line_length": 32.72139303482587,
"alnum_prop": 0.5320054736201916,
"repo_name": "StanczakDominik/PlasmaPy",
"id": "a5da598f39dc6aab09d07673059e13917c40435a",
"size": "19731",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "plasmapy/analysis/tests/test_fit_functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1285"
},
{
"name": "Python",
"bytes": "2148684"
}
],
"symlink_target": ""
}
|
"""
fritzmonitor.py
Implement a tkinter-based grafic interface to view basic status- and
traffic-informations.
"""
__version__ = '0.1.0'
import argparse
try:
# python 2
import Tkinter as tk
import tkFont as tkfont
except ImportError:
# python 3
import tkinter as tk
import tkinter.font as tkfont
import fritzconnection
from . import fritzstatus
from . import fritztools
class MeterRectangle(object):
"""
a Tkinter meter-rectangel. This is a rectangle with a
background-color that can be filled up from left to right with
another color (fill).
This object must have a canvas-widget as parent.
"""
def __init__(self, canvas, xpos, ypos, meter_width, meter_height,
fill="#0080FF", background="#E6E6E6", horizontal=True,
**kwargs):
self.canvas = canvas
self.xpos = xpos
self.ypos = ypos
self.width = meter_width
self.height = meter_height
self.horizontal = horizontal
self.container = canvas.create_rectangle(xpos, ypos,
xpos + meter_width,
ypos + meter_height,
fill = background,
outline = background,
**kwargs)
self.meter = canvas.create_rectangle(xpos, ypos,
xpos,
ypos + meter_height,
fill = fill,
outline = fill,
**kwargs)
def set_fraction(self, value):
"""Set the meter indicator. Value should be between 0 and 1."""
if value < 0:
value *= -1
value = min(value, 1)
if self.horizontal:
width = int(self.width * value)
height = self.height
else:
width = self.width
height = int(self.height * value)
self.canvas.coords(self.meter, self.xpos, self.ypos,
self.xpos + width, self.ypos + height)
class FritzMonitor(tk.Frame):
def __init__(self, master=None,
address=fritzconnection.FRITZ_IP_ADDRESS,
port=fritzconnection.FRITZ_TCP_PORT):
tk.Frame.__init__(self, master)
self.status = fritzstatus.FritzStatus(address=address, port=port)
self.max_upstream, self.max_downstream = self.status.max_byte_rate
self.max_stream_rate = tk.StringVar()
self.connection_state = tk.StringVar()
self.ip = tk.StringVar()
self.uptime = tk.StringVar()
self.traffic_info = tk.StringVar()
self.bytes_received = self.status.bytes_received
self.bytes_sent = self.status.bytes_sent
self.grid()
self.create_widgets()
self.update_status()
def get_stream_rate_str(self):
up, down = self.status.str_max_bit_rate
return "Down: %s, Up: %s" % (down, up)
def update_connection_status(self):
if self.status.is_connected:
color, text = 'green', 'up'
else:
color, text = 'red', 'down'
self.connection_label.config(fg=color)
self.connection_state.set(text)
def update_traffic_info(self):
received = fritztools.format_num(
self.status.bytes_received - self.bytes_received)
sent = fritztools.format_num(
self.status.bytes_sent - self.bytes_sent)
text = "received: %s, send: %s" % (received, sent)
self.traffic_info.set(text)
def update_status(self):
"""Update status informations in tkinter window."""
try:
# all this may fail if the connection to the fritzbox is down
self.update_connection_status()
self.max_stream_rate.set(self.get_stream_rate_str())
self.ip.set(self.status.external_ip)
self.uptime.set(self.status.str_uptime)
upstream, downstream = self.status.transmission_rate
except IOError:
# here we inform the user about being unable to
# update the status informations
pass
else:
# max_downstream and max_upstream may be zero if the
# fritzbox is configured as ip-client.
if self.max_downstream > 0:
self.in_meter.set_fraction(
1.0 * downstream / self.max_downstream)
if self.max_upstream > 0:
self.out_meter.set_fraction(1.0 * upstream / self.max_upstream)
self.update_traffic_info()
self.after(1000, self.update_status)
def create_widgets(self):
self.place_header()
self.place_meter()
self.place_traffic_info()
self.place_connection_info()
self.place_buttons()
def place_header(self):
tk.Label(self, text="%s:" % self.status.modelname).grid(
row=0, column=0, sticky=tk.NW, padx=5)
self.connection_label = tk.Label(self,
textvariable=self.connection_state, fg='red')
self.connection_label.grid(row=0, column=1)
tk.Label(self, textvariable=self.max_stream_rate).grid(
row=1, column=0, sticky=tk.W, padx=5)
def place_traffic_info(self):
tk.Label(self, textvariable=self.traffic_info,
font=tkfont.Font(family='courier', size=12),
fg='grey',
).grid(row=2, column=0, columnspan=2,
padx=5, pady=5, sticky=tk.W)
def place_meter(self):
pane = tk.Canvas(self, height=50)
pane.grid(row=3, column=0, columnspan=2, sticky=tk.NW, pady=10)
self.in_meter = MeterRectangle(pane, 10, 10, 240, 12)
self.out_meter = MeterRectangle(
pane, 10, 30, 240, 12, fill="#FF6666")
def place_connection_info(self):
tk.Label(self, textvariable=self.ip).grid(
row=4, column=0, sticky=tk.W, padx=5)
tk.Label(self, textvariable=self.uptime).grid(row=4, column=1)
def place_buttons(self):
tk.Button(self, text='Reconnect', command=self.status.reconnect).grid(
row=5, column=0, sticky=tk.W, padx=10)
tk.Button(self, text='Quit', command=self.quit).grid(row=5, column=1)
# ---------------------------------------------------------
# cli-section:
# ---------------------------------------------------------
def _get_cli_arguments():
parser = argparse.ArgumentParser(description='FritzBox Monitor')
parser.add_argument('-i', '--ip-address',
nargs='?', default=fritzconnection.FRITZ_IP_ADDRESS,
dest='address',
help='ip-address of the FritzBox to connect to. '
'Default: %s' % fritzconnection.FRITZ_IP_ADDRESS)
parser.add_argument('-p', '--port',
nargs='?', default=fritzconnection.FRITZ_TCP_PORT,
dest='port',
help='port of the FritzBox to connect to. '
'Default: %s' % fritzconnection.FRITZ_TCP_PORT)
args = parser.parse_args()
return args
if __name__ == '__main__':
arguments = _get_cli_arguments()
app = FritzMonitor(address=arguments.address, port=arguments.port)
app.master.title('FritzMonitor')
app.mainloop()
|
{
"content_hash": "20a3363eb016c497df0f357d48cc3a90",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 79,
"avg_line_length": 38.23857868020305,
"alnum_prop": 0.5455993628036638,
"repo_name": "lukasklein/fritzconnection",
"id": "c2519bdee9525df3f475e1085223f42c824e611f",
"size": "7580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fritzconnection/fritzmonitor.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45501"
}
],
"symlink_target": ""
}
|
import os.path
import re
from collections import namedtuple
from uuid import UUID
from Common import *
from CommandExecutor import *
from BekUtil import *
from DiskUtil import *
from EncryptionConfig import *
class OSEncryptionState(object):
def __init__(self, state_name, context):
super(OSEncryptionState, self).__init__()
self.state_name = state_name
self.context = context
self.state_executed = False
self.state_marker = os.path.join(self.context.encryption_environment.os_encryption_markers_path, self.state_name)
self.command_executor = CommandExecutor(self.context.logger)
self.disk_util = DiskUtil(hutil=self.context.hutil,
patching=self.context.distro_patcher,
logger=self.context.logger,
encryption_environment=self.context.encryption_environment)
self.bek_util = BekUtil(disk_util=self.disk_util,
logger=self.context.logger)
self.encryption_config = EncryptionConfig(encryption_environment=self.context.encryption_environment,
logger=self.context.logger)
rootfs_mountpoint = '/'
if self._is_in_memfs_root():
rootfs_mountpoint = '/oldroot'
rootfs_sdx_path = self._get_fs_partition(rootfs_mountpoint)[0]
if rootfs_sdx_path == "none":
self.context.logger.log("rootfs_sdx_path is none, parsing UUID from fstab")
rootfs_sdx_path = self._parse_uuid_from_fstab('/')
self.context.logger.log("rootfs_uuid: {0}".format(rootfs_sdx_path))
if rootfs_sdx_path and (rootfs_sdx_path.startswith("/dev/disk/by-uuid/") or self._is_uuid(rootfs_sdx_path)):
rootfs_sdx_path = self.disk_util.query_dev_sdx_path_by_uuid(rootfs_sdx_path)
self.context.logger.log("rootfs_sdx_path: {0}".format(rootfs_sdx_path))
self.rootfs_disk = None
self.rootfs_block_device = None
self.bootfs_block_device = None
if self.disk_util.is_os_disk_lvm():
proc_comm = ProcessCommunicator()
self.command_executor.Execute('pvs', True, communicator=proc_comm)
for line in proc_comm.stdout.split("\n"):
if "rootvg" in line:
self.rootfs_block_device = line.strip().split()[0]
self.rootfs_disk = self.rootfs_block_device[:-1]
self.bootfs_block_device = self.rootfs_disk + '2'
elif not rootfs_sdx_path:
self.rootfs_disk = '/dev/sda'
self.rootfs_block_device = '/dev/sda2'
self.bootfs_block_device = '/dev/sda1'
elif rootfs_sdx_path == '/dev/mapper/osencrypt' or rootfs_sdx_path.startswith('/dev/dm-'):
self.rootfs_block_device = '/dev/mapper/osencrypt'
bootfs_uuid = self._parse_uuid_from_fstab('/boot')
self.context.logger.log("bootfs_uuid: {0}".format(bootfs_uuid))
self.bootfs_block_device = self.disk_util.query_dev_sdx_path_by_uuid(bootfs_uuid)
else:
self.rootfs_block_device = self.disk_util.query_dev_id_path_by_sdx_path(rootfs_sdx_path)
if not self.rootfs_block_device.startswith('/dev/disk/by-id/'):
self.context.logger.log("rootfs_block_device: {0}".format(self.rootfs_block_device))
raise Exception("Could not find rootfs block device")
self.rootfs_disk = self.rootfs_block_device[:self.rootfs_block_device.index("-part")]
self.bootfs_block_device = self.rootfs_disk + "-part2"
if self._get_block_device_size(self.bootfs_block_device) > self._get_block_device_size(self.rootfs_block_device):
self.context.logger.log("Swapping partition identifiers for rootfs and bootfs")
self.rootfs_block_device, self.bootfs_block_device = self.bootfs_block_device, self.rootfs_block_device
self.context.logger.log("rootfs_disk: {0}".format(self.rootfs_disk))
self.context.logger.log("rootfs_block_device: {0}".format(self.rootfs_block_device))
self.context.logger.log("bootfs_block_device: {0}".format(self.bootfs_block_device))
def should_enter(self):
self.context.logger.log("OSEncryptionState.should_enter() called for {0}".format(self.state_name))
if self.state_executed:
self.context.logger.log("State {0} has already executed, not entering".format(self.state_name))
return False
if not os.path.exists(self.state_marker):
self.context.logger.log("State marker {0} does not exist, state {1} can be entered".format(self.state_marker,
self.state_name))
return True
else:
self.context.logger.log("State marker {0} exists, state {1} has already executed".format(self.state_marker,
self.state_name))
return False
def should_exit(self):
self.context.logger.log("OSEncryptionState.should_exit() called for {0}".format(self.state_name))
if not os.path.exists(self.state_marker):
self.disk_util.make_sure_path_exists(self.context.encryption_environment.os_encryption_markers_path)
self.context.logger.log("Creating state marker {0}".format(self.state_marker))
self.disk_util.touch_file(self.state_marker)
self.state_executed = True
self.context.logger.log("state_executed for {0}: {1}".format(self.state_name, self.state_executed))
return self.state_executed
def _get_fs_partition(self, fs):
result = None
dev = os.lstat(fs).st_dev
for line in file('/proc/mounts'):
line = [s.decode('string_escape') for s in line.split()[:3]]
if dev == os.lstat(line[1]).st_dev:
result = tuple(line)
return result
def _is_in_memfs_root(self):
mounts = file('/proc/mounts', 'r').read()
return bool(re.search(r'/\s+tmpfs', mounts))
def _parse_uuid_from_fstab(self, mountpoint):
contents = file('/etc/fstab', 'r').read()
matches = re.findall(r'UUID=(.*?)\s+{0}\s+'.format(mountpoint), contents)
if matches:
return matches[0]
def _get_block_device_size(self, dev):
if not os.path.exists(dev):
return 0
proc_comm = ProcessCommunicator()
self.command_executor.Execute('blockdev --getsize64 {0}'.format(dev),
raise_exception_on_failure=True,
communicator=proc_comm)
return int(proc_comm.stdout.strip())
def _is_uuid(self, s):
try:
UUID(s)
except:
return False
else:
return True
OSEncryptionStateContext = namedtuple('OSEncryptionStateContext',
['hutil',
'distro_patcher',
'logger',
'encryption_environment'])
|
{
"content_hash": "b03cf945837954290d87926828a57551",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 125,
"avg_line_length": 44.412121212121214,
"alnum_prop": 0.5832423580786026,
"repo_name": "varunkumta/azure-linux-extensions",
"id": "c0651cc0d5ebb1294fe5030b129498a453bb793f",
"size": "7989",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "VMEncryption/main/oscrypto/OSEncryptionState.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39379"
},
{
"name": "JavaScript",
"bytes": "22883"
},
{
"name": "Makefile",
"bytes": "4033"
},
{
"name": "PowerShell",
"bytes": "24124"
},
{
"name": "Python",
"bytes": "3893505"
},
{
"name": "Shell",
"bytes": "21864"
}
],
"symlink_target": ""
}
|
from unittest import mock
from nova import objects
from nova.scheduler import weights
from nova.scheduler.weights import affinity
from nova import test
from nova.tests.unit.scheduler import fakes
class SoftWeigherTestBase(test.NoDBTestCase):
def setUp(self):
super(SoftWeigherTestBase, self).setUp()
self.weight_handler = weights.HostWeightHandler()
self.weighers = []
def _get_weighed_host(self, hosts, policy, group='default'):
if group == 'default':
members = ['member1', 'member2', 'member3', 'member4', 'member5',
'member6', 'member7']
else:
members = ['othermember1', 'othermember2']
request_spec = objects.RequestSpec(
instance_group=objects.InstanceGroup(
policy=policy,
members=members))
return self.weight_handler.get_weighed_objects(self.weighers,
hosts,
request_spec)[0]
def _get_all_hosts(self):
host_values = [
('host1', 'node1', {'instances': {
'member1': mock.sentinel,
'instance13': mock.sentinel
}}),
('host2', 'node2', {'instances': {
'member2': mock.sentinel,
'member3': mock.sentinel,
'member4': mock.sentinel,
'member5': mock.sentinel,
'othermember1': mock.sentinel,
'othermember2': mock.sentinel,
'instance14': mock.sentinel
}}),
('host3', 'node3', {'instances': {
'instance15': mock.sentinel
}}),
('host4', 'node4', {'instances': {
'member6': mock.sentinel,
'member7': mock.sentinel,
'instance16': mock.sentinel
}})]
return [fakes.FakeHostState(host, node, values)
for host, node, values in host_values]
def _do_test(self, policy, expected_weight, expected_host,
group='default'):
hostinfo_list = self._get_all_hosts()
weighed_host = self._get_weighed_host(hostinfo_list,
policy, group)
self.assertEqual(expected_weight, weighed_host.weight)
if expected_host:
self.assertEqual(expected_host, weighed_host.obj.host)
class SoftAffinityWeigherTestCase(SoftWeigherTestBase):
def setUp(self):
super(SoftAffinityWeigherTestCase, self).setUp()
self.weighers = [affinity.ServerGroupSoftAffinityWeigher()]
self.softaffin_weigher = affinity.ServerGroupSoftAffinityWeigher()
def test_soft_affinity_weight_multiplier_by_default(self):
self._do_test(policy='soft-affinity',
expected_weight=1.0,
expected_host='host2')
def test_soft_affinity_weight_multiplier_zero_value(self):
# We do not know the host, all have same weight.
self.flags(soft_affinity_weight_multiplier=0.0,
group='filter_scheduler')
self._do_test(policy='soft-affinity',
expected_weight=0.0,
expected_host=None)
def test_soft_affinity_weight_multiplier_positive_value(self):
self.flags(soft_affinity_weight_multiplier=2.0,
group='filter_scheduler')
self._do_test(policy='soft-affinity',
expected_weight=2.0,
expected_host='host2')
def test_soft_affinity_weight_multiplier(self):
self.flags(soft_affinity_weight_multiplier=0.0,
group='filter_scheduler')
host_attr = {'instances': {'instance1': mock.sentinel}}
host1 = fakes.FakeHostState('fake-host', 'node', host_attr)
# By default, return the weight_multiplier configuration directly
self.assertEqual(0.0, self.softaffin_weigher.weight_multiplier(host1))
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'soft_affinity_weight_multiplier': '2'},
)]
# read the weight multiplier from metadata to override the config
self.assertEqual(2.0, self.softaffin_weigher.weight_multiplier(host1))
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'soft_affinity_weight_multiplier': '2'},
),
objects.Aggregate(
id=2,
name='foo',
hosts=['fake-host'],
metadata={'soft_affinity_weight_multiplier': '1.5'},
)]
# If the host is in multiple aggs and there are conflict weight values
# in the metadata, we will use the min value among them
self.assertEqual(1.5, self.softaffin_weigher.weight_multiplier(host1))
def test_host_with_agg(self):
self.flags(soft_affinity_weight_multiplier=0.0,
group='filter_scheduler')
hostinfo_list = self._get_all_hosts()
aggs = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'soft_affinity_weight_multiplier': '1.5'},
)]
for h in hostinfo_list:
h.aggregates = aggs
weighed_host = self._get_weighed_host(hostinfo_list,
'soft-affinity')
self.assertEqual(1.5, weighed_host.weight)
self.assertEqual('host2', weighed_host.obj.host)
def test_running_twice(self):
"""Run the weighing twice for different groups each run
The first run has a group with more members on the same host than the
second both. In both cases, most members of their groups are on the
same host => weight should be maximum (1 with default multiplier).
"""
self._do_test(policy='soft-affinity',
expected_weight=1.0,
expected_host='host2')
self._do_test(policy='soft-affinity',
expected_weight=1.0,
expected_host='host2',
group='other')
class SoftAntiAffinityWeigherTestCase(SoftWeigherTestBase):
def setUp(self):
super(SoftAntiAffinityWeigherTestCase, self).setUp()
self.weighers = [affinity.ServerGroupSoftAntiAffinityWeigher()]
self.antiaffin_weigher = affinity.ServerGroupSoftAntiAffinityWeigher()
def test_soft_anti_affinity_weight_multiplier_by_default(self):
self._do_test(policy='soft-anti-affinity',
expected_weight=1.0,
expected_host='host3')
def test_soft_anti_affinity_weight_multiplier_zero_value(self):
# We do not know the host, all have same weight.
self.flags(soft_anti_affinity_weight_multiplier=0.0,
group='filter_scheduler')
self._do_test(policy='soft-anti-affinity',
expected_weight=0.0,
expected_host=None)
def test_soft_anti_affinity_weight_multiplier_positive_value(self):
self.flags(soft_anti_affinity_weight_multiplier=2.0,
group='filter_scheduler')
self._do_test(policy='soft-anti-affinity',
expected_weight=2.0,
expected_host='host3')
def test_soft_anti_affinity_weight_multiplier(self):
self.flags(soft_anti_affinity_weight_multiplier=0.0,
group='filter_scheduler')
host_attr = {'instances': {'instance1': mock.sentinel}}
host1 = fakes.FakeHostState('fake-host', 'node', host_attr)
# By default, return the weight_multiplier configuration directly
self.assertEqual(0.0, self.antiaffin_weigher.weight_multiplier(host1))
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'soft_anti_affinity_weight_multiplier': '2'},
)]
# read the weight multiplier from metadata to override the config
self.assertEqual(2.0, self.antiaffin_weigher.weight_multiplier(host1))
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'soft_anti_affinity_weight_multiplier': '2'},
),
objects.Aggregate(
id=2,
name='foo',
hosts=['fake-host'],
metadata={'soft_anti_affinity_weight_multiplier': '1.5'},
)]
# If the host is in multiple aggs and there are conflict weight values
# in the metadata, we will use the min value among them
self.assertEqual(1.5, self.antiaffin_weigher.weight_multiplier(host1))
def test_host_with_agg(self):
self.flags(soft_anti_affinity_weight_multiplier=0.0,
group='filter_scheduler')
hostinfo_list = self._get_all_hosts()
aggs = [
objects.Aggregate(
id=1,
name='foo',
hosts=['host1', 'host2', 'host3', 'host4'],
metadata={'soft_anti_affinity_weight_multiplier': '1.5'},
)]
for h in hostinfo_list:
h.aggregates = aggs
weighed_host = self._get_weighed_host(hostinfo_list,
'soft-anti-affinity')
self.assertEqual(1.5, weighed_host.weight)
self.assertEqual('host3', weighed_host.obj.host)
|
{
"content_hash": "ca2ca36691ea214a4b7bc01fa56e29ff",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 78,
"avg_line_length": 40.477366255144034,
"alnum_prop": 0.5554087027246848,
"repo_name": "openstack/nova",
"id": "3048e9f06c3498751cee0c5822e361adf1c2fbc2",
"size": "10467",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/scheduler/weights/test_weights_affinity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
}
|
import base64
from functools import wraps
from django.contrib.auth.models import AnonymousUser as DjangoAnonymousUser, User as DjangoUser
from django.http import HttpResponse
from apps.canvas_auth.backends import authenticate
from apps.canvas_auth.http import HttpUnauthorizedException
from apps.canvas_auth.models import AnonymousUser, User
from django.conf import settings
class AnonymousUserMiddleware(object):
""" Replaces request.user with our own AnonymousUser instead of Django's (if request.user is anonymous). """
def process_request(self, request):
if isinstance(request.user, DjangoAnonymousUser):
request.user = AnonymousUser()
class SessionMigrationMiddleware(object):
"""
Migrates the "_auth_backend_model" field in user sessions to the first backend listed in AUTHENTICATION_BACKENDS.
Does nothing if AUTHENTICATION_BACKENDS is empty.
Must come after "django.middleware.SessionMiddleware", and before
"django.contrib.auth.middleware.AuthenticationMiddleware".
"""
BACKEND_KEY = '_auth_user_backend'
def process_request(self, request):
if settings.AUTHENTICATION_BACKENDS:
auth_backend = settings.AUTHENTICATION_BACKENDS[0]
if request.session.get(self.BACKEND_KEY, auth_backend) != auth_backend:
request.session['_old_auth_user_backend'] = request.session[self.BACKEND_KEY]
request.session[self.BACKEND_KEY] = auth_backend
# http://djangosnippets.org/snippets/1720/
class HttpBasicAuthMiddleware(object):
""" Should be after your regular authentication middleware. """
def _unauthorized(self, request):
raise HttpUnauthorizedException("Basic Realm='%s'" % settings.HTTP_AUTH_REALM)
def process_request(self, request):
# At this point, the user is either not logged in, or must log in using
# http auth. If they have a header that indicates a login attempt, then
# use this to try to login.
if 'HTTP_AUTHORIZATION' not in request.META:
return
try:
(auth_type, data) = request.META['HTTP_AUTHORIZATION'].split()
if auth_type.lower() != 'basic':
return self._unauthorized(request)
user_pass = base64.b64decode(data)
except ValueError:
return self._unauthorized(request)
bits = user_pass.split(':')
if len(bits) != 2:
self._unauthorized(request)
user = authenticate(bits[0], bits[1])
if user is None:
return self._unauthorized(request)
request.user = user
|
{
"content_hash": "ac28f44348669e610e3038782b60e797",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 117,
"avg_line_length": 36.88732394366197,
"alnum_prop": 0.6842306223749522,
"repo_name": "canvasnetworks/canvas",
"id": "f6fc758affb6f4a02f0756aed318459637eb7173",
"size": "2619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/apps/canvas_auth/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "537625"
},
{
"name": "HTML",
"bytes": "689709"
},
{
"name": "JavaScript",
"bytes": "1313262"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6659685"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "5326"
}
],
"symlink_target": ""
}
|
import uuid
from django.db import models
from core.models.base import StandardModel
from core.models.base import CaseInsensitiveNamedModel
from core.models import Material
from core.models import Source
from core.models import SampleType
from core.models import Storage
from core.models import Project
from core.models import Note
from django.contrib.auth.models import User
from django.contrib.contenttypes import fields
from django.core.urlresolvers import reverse
from django.db import connection
from core import constants
from core import utils
from polymorphic.models import PolymorphicModel
import logging
logger = logging.getLogger(__name__)
class Sample(PolymorphicModel, CaseInsensitiveNamedModel):
STATUSES = utils.self_zip(constants.STANDARD_STATUSES)
sample_type = models.ForeignKey(SampleType)
material = models.ForeignKey(Material)
status = models.CharField(max_length=255,choices=STATUSES,default=constants.STATUS_ACTIVE)
owner = models.ForeignKey(User,null=True,blank=True)
source = models.ForeignKey(Source,null=True,blank=True)
lot = models.CharField(max_length=255, null=True, blank=True)
volume = models.CharField(max_length=255, null=True, blank=True)
concentration = models.CharField(max_length=255, null=True, blank=True)
concentration_units = models.CharField(max_length=255, null=True, blank=True)
project = models.ManyToManyField(Project,blank=True)
storage = models.ForeignKey(Storage,null=True, blank=True)
unit_count = models.CharField(max_length=255, null=True, blank=True)
notes = fields.GenericRelation(Note)
sample_links = models.ManyToManyField(
'self',
through='SampleToSample',
symmetrical=False,
related_name="linked_to",
blank=True
)
def _has_alert_note(self):
logger.debug('looking for alert note')
return self.notes.filter(note_type=constants.TYPE_ALERT).exists()
has_alert_note = property(_has_alert_note)
class Meta:
app_label = "core"
db_table = 'sample'
verbose_name_plural = 'samples'
unique_together = ("name",)
ordering = ['-date_created']
def save(self, *args, **kwargs):
if not self.name:
self.name = Sample.name_generator()
super(Sample, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('samples-detail', kwargs={'pk': self.pk})
def __str__(self):
return self.name
def add_sample_link(self, sample, link_type):
link, created = SampleToSample.objects.get_or_create(
source_sample=self,
target_sample=sample,
type=link_type
)
return link
def remove_sample_link(self, sample, link_type):
SampleToSample.objects.filter(
source_sample=self,
target_sample=sample,
type=link_type
).delete()
return
def get_sample_links(self, link_type):
return self.sample_links.filter(
target_samples__type=link_type,
target_samples__source_sample=self
)
def get_related_to(self, link_type):
return self.linked_to.filter(
source_samples__type=link_type,
source_samples__target_sample=self
)
def get_children(self):
logger.debug("in generic get children")
link_type = SampleLink.objects.get(name=constants.LINK_TYPE_CHILD)
return self.get_sample_links(link_type)
def get_parents(self):
logger.debug("in generic get parents")
link_type = SampleLink.objects.get(name=constants.LINK_TYPE_PARENT)
return self.get_related_to(link_type)
@classmethod
def name_generator(cls):
return "S-{0}".format(uuid.uuid4())
# get the next value in the sequence based on the record name
# record_1 would generate 2
# record_10 would generate 11
@staticmethod
def get_operational_index(value):
sql_string = """
select max(
to_number(
substring(name from char_length(%(value)s) + position(%(value)s in name)),
'999'
) + 1
) from sample
where name ~ (%(value)s || '[0-9]+$');
"""
index = 1
try:
cursor = connection.cursor()
cursor.execute(sql_string, {'value': value})
row = cursor.fetchone()
logger.debug(row)
index = row[0]
if index is None:
index = 1
except Exception as e:
logger.debug(e)
logger.debug("exception while looking up values")
index = 1
logger.debug("returning the following index {0}".format(index))
return index
Sample._meta.get_field('name').null = True
Sample._meta.get_field('name').blank = True
class SampleLink(StandardModel):
class Meta:
app_label = "core"
db_table = 'sample_link'
verbose_name_plural = 'sample links'
def __str__(self):
return self.name
class SampleToSample(models.Model):
source_sample = models.ForeignKey(Sample, related_name='source_samples')
target_sample = models.ForeignKey(Sample, related_name='target_samples')
type = models.ForeignKey(SampleLink)
class Meta:
app_label = "core"
db_table = 'sample_to_sample'
verbose_name_plural = 'sample to samples'
|
{
"content_hash": "2a5bae6064db8f8328e91267fd920b93",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 92,
"avg_line_length": 28.52840909090909,
"alnum_prop": 0.6954789882493527,
"repo_name": "slohr/paperlims",
"id": "1a34e386a228d6e08ad91f2065f14420bfcdf2e1",
"size": "5021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paperlims/core/models/sample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "30233"
},
{
"name": "Python",
"bytes": "109471"
},
{
"name": "TSQL",
"bytes": "362"
}
],
"symlink_target": ""
}
|
"""Check design."""
from __future__ import print_function
import os
from os.path import basename, splitext
from plumbum.cmd import stringtie, featureCounts
# from piret.runs import Map
import pandas as pd
from luigi.util import inherits, requires
from luigi.contrib.external_program import ExternalProgramTask
from luigi import LocalTarget, Parameter, IntParameter
import luigi
import logging
class FeatureCounts(luigi.Task):
"""Summarize mapped reads classificaion using FeatureCount."""
fastq_dic = luigi.DictParameter()
kingdom = luigi.Parameter()
gff_file = luigi.Parameter()
workdir = luigi.Parameter()
indexfile = luigi.Parameter()
num_cpus = luigi.IntParameter()
ref_file = luigi.Parameter()
fid = luigi.Parameter()
stranded = luigi.IntParameter()
def output(self):
"""Expected output of featureCounts."""
counts_dir = os.path.join(self.workdir, "processes", "featureCounts",
self.kingdom)
gff_fp = os.path.abspath(self.gff_file)
features = list(set(pd.read_csv(gff_fp, sep="\t", header=None,
comment='#')[2].tolist()))
features = [feat for feat in features if feat in ['CDS', 'rRNA',
'tRNA', 'exon',
'gene',
'transcript']]
loc_target = LocalTarget(os.path.join(counts_dir, features[-1] +
"_count.tsv"))
return loc_target
def run(self):
"""Running featureCounts on all."""
map_dir = os.path.join(self.workdir, "processes", "mapping")
samp_list = list(self.fastq_dic.keys())
in_srtbam_list = [os.path.join(map_dir, samp, samp + "_srt.bam")
for samp in samp_list]
counts_dir = os.path.join(self.workdir, "processes", "featureCounts",
self.kingdom)
if not os.path.exists(counts_dir):
os.makedirs(counts_dir)
if ',' in self.gff_file:
gff_list = self.gff_file.split(",")
gff_full_path = [os.path.abspath(gff) for gff in gff_list]
for gffs in gff_full_path:
feature = list(set(pd.read_csv(gffs,
sep="\t", header=None,
comment='#')[2].tolist()))
for feat in feature:
if feat in ['CDS', 'rRNA', 'tRNA', 'exon',
'NovelRegion', 'transcript', 'mRNA']:
fcount_cmd_opt = ["-a", self.gff_file,
"-s", self.stranded,
"-B",
"-p", "-P", "-C",
"-g", self.fid,
"-t", feat,
"-T", self.num_cpus,
"-o", counts_dir + "/" + feat +
"_count.tsv"] + in_srtbam_list
elif feat in ['gene']:
fcount_cmd_opt = ["-a", self.gff_file,
"-s", self.stranded,
"-B",
"-p", "-P", "-C",
"-g", "ID",
"-t", feat,
"-T", self.num_cpus,
"-o", counts_dir + "/" + feat +
"_count.tsv"] + in_srtbam_list
fcount_cmd = featureCounts[fcount_cmd_opt]
fcount_cmd()
else:
feature = list(set(pd.read_csv(self.gff_file, sep="\t", header=None,
comment='#')[2].tolist()))
for feat in feature:
if feat in ['CDS', 'rRNA', 'tRNA', 'exon', 'transcript',
'NovelRegion']:
fcount_cmd_opt = ["-a", self.gff_file,
"-s", self.stranded,
"-B",
"-p", "-P", "-C",
"-g", self.fid,
"-t", feat,
"-T", self.num_cpus,
"-o", counts_dir + "/" + feat +
"_count.tsv"] + in_srtbam_list
fcount_cmd = featureCounts[fcount_cmd_opt]
fcount_cmd()
if feat in ['gene']:
fcount_cmd_opt = ["-a", self.gff_file,
"-s", self.stranded,
"-B",
"-p", "-P", "-C",
"-g", "ID",
"-t", feat,
"-T", self.num_cpus,
"-o", counts_dir + "/" + feat +
"_count.tsv"] + in_srtbam_list
fcount_cmd = featureCounts[fcount_cmd_opt]
fcount_cmd()
|
{
"content_hash": "2280e85544780415fc3eb81aee328cec",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 80,
"avg_line_length": 48.77477477477478,
"alnum_prop": 0.38585149612116737,
"repo_name": "mshakya/PyPiReT",
"id": "94eb14653e8d0cd6dd8f614434706e14248207d4",
"size": "5439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "piret/counts/featurecounts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "152071"
},
{
"name": "R",
"bytes": "27008"
},
{
"name": "Shell",
"bytes": "6930"
}
],
"symlink_target": ""
}
|
import json
import requests
from pylons import app_globals as g
COUNTRIES_URL = 'https://api.adzerk.net/v1/countries'
HEADERS = {
'X-Adzerk-ApiKey': g.secrets['az_ads_key'],
'Content-Type': 'application/x-www-form-urlencoded',
}
def get_locations(exclude_regions_without_metros=True):
"""
Get countries/regions/metros from adzerk.
Optionally exclude non-US regions because we can't pull inventory reports
by region so we can't target them.
"""
response = requests.get(COUNTRIES_URL, headers=HEADERS)
if not (200 <= response.status_code <= 299):
raise ValueError('response %s' % response.status_code)
response = json.loads(response.text)
ret = {}
for country in response:
country_name = country['Name']
country_code = country['Code']
country_regions = country['Regions']
ret[country_code] = {
'name': country_name,
}
for region in country_regions.itervalues():
region_metros = region['Metros']
if region_metros or not exclude_regions_without_metros:
ret[country_code].setdefault('regions', {})
region_code = region['Code']
region_name = region['Name']
country_region = {
'name': region_name,
'metros': {},
}
ret[country_code]['regions'][region_code] = country_region
for metro in region_metros.itervalues():
metro_code = metro['Code']
metro_name = metro['Name']
country_region['metros'][metro_code] = {
'name': metro_name,
}
return ret
def write_locations(filename):
locations = get_locations()
with open(filename, 'w') as f:
f.write(json.dumps(locations, indent=2, sort_keys=True))
|
{
"content_hash": "790da71a09f652fa24000c38ca0e0ac4",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 77,
"avg_line_length": 29.90625,
"alnum_prop": 0.5679205851619644,
"repo_name": "madbook/reddit-plugin-adzerk",
"id": "e1f83a84de227306ab59cff6f3978e6002283630",
"size": "1914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit_adzerk/location.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2762"
},
{
"name": "JavaScript",
"bytes": "13796"
},
{
"name": "Python",
"bytes": "115394"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import re
from .core import common_subexpression
from .expressions import Expr
from .reductions import Reduction, Summary, summary
from ..dispatch import dispatch
from .expressions import dshape_method_list
from datashape import dshape, Record, Map, Unit, var
__all__ = ['by', 'By', 'count_values']
def _names_and_types(expr):
schema = expr.dshape.measure
schema = getattr(schema, 'ty', schema)
if isinstance(schema, Record):
return schema.names, schema.types
if isinstance(schema, Unit):
return [expr._name], [expr.dshape.measure]
if isinstance(schema, Map):
return [expr._name], [expr.dshape.measure.key]
raise ValueError("Unable to determine name and type of %s" % expr)
class By(Expr):
""" Split-Apply-Combine Operator
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = by(t['name'], total=t['amount'].sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> sorted(compute(e, data))
[('Alice', 150), ('Bob', 200)]
"""
_arguments = 'grouper', 'apply'
@property
def _child(self):
return common_subexpression(self.grouper, self.apply)
def _schema(self):
grouper_names, grouper_types = _names_and_types(self.grouper)
apply_names, apply_types = _names_and_types(self.apply)
names = grouper_names + apply_names
types = grouper_types + apply_types
return dshape(Record(list(zip(names, types))))
def _dshape(self):
# TODO: think if this should be generalized
return var * self.schema
def __str__(self):
return '%s(%s, %s)' % (type(self).__name__.lower(),
self.grouper,
re.sub(r'^summary\((.*)\)$', r'\1',
str(self.apply)))
@dispatch(Expr, Reduction)
def by(grouper, s):
raise ValueError("This syntax has been removed.\n"
"Please name reductions with keyword arguments.\n"
"Before: by(t.name, t.amount.sum())\n"
"After: by(t.name, total=t.amount.sum())")
@dispatch(Expr, Summary)
def by(grouper, s):
return By(grouper, s)
@dispatch(Expr)
def by(grouper, **kwargs):
return By(grouper, summary(**kwargs))
def count_values(expr, sort=True):
"""
Count occurrences of elements in this column
Sort by counts by default
Add ``sort=False`` keyword to avoid this behavior.
"""
result = by(expr, count=expr.count())
if sort:
result = result.sort('count', ascending=False)
return result
dshape_method_list.extend([
(lambda ds: len(ds.shape) == 1, set([count_values])),
])
|
{
"content_hash": "2742a3627944b4d31915b81c44e65869",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 71,
"avg_line_length": 27.68867924528302,
"alnum_prop": 0.5884156729131176,
"repo_name": "ContinuumIO/blaze",
"id": "4a787dbb6bf501fec0a457df962fec4cc5bce6a1",
"size": "2935",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "blaze/expr/split_apply_combine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "862729"
},
{
"name": "Shell",
"bytes": "35"
}
],
"symlink_target": ""
}
|
import unittest
import os.path
import re, gc, sys
from lxml import etree
def make_version_tuple(version_string):
l = []
for part in re.findall('([0-9]+|[^0-9.]+)', version_string):
try:
l.append(int(part))
except ValueError:
l.append(part)
return tuple(l)
IS_PYPY = (getattr(sys, 'implementation', None) == 'pypy' or
getattr(sys, 'pypy_version_info', None) is not None)
IS_PYTHON3 = sys.version_info[0] >= 3
try:
from xml.etree import ElementTree # Python 2.5+
except ImportError:
try:
from elementtree import ElementTree # standard ET
except ImportError:
ElementTree = None
if hasattr(ElementTree, 'VERSION'):
ET_VERSION = make_version_tuple(ElementTree.VERSION)
else:
ET_VERSION = (0,0,0)
try:
from xml.etree import cElementTree # Python 2.5+
except ImportError:
try:
import cElementTree # standard ET
except ImportError:
cElementTree = None
if hasattr(cElementTree, 'VERSION'):
CET_VERSION = make_version_tuple(cElementTree.VERSION)
else:
CET_VERSION = (0,0,0)
def filter_by_version(test_class, version_dict, current_version):
"""Remove test methods that do not work with the current lib version.
"""
find_required_version = version_dict.get
def dummy_test_method(self):
pass
for name in dir(test_class):
expected_version = find_required_version(name, (0,0,0))
if expected_version > current_version:
setattr(test_class, name, dummy_test_method)
try:
import doctest
# check if the system version has everything we need
doctest.DocFileSuite
doctest.DocTestParser
doctest.NORMALIZE_WHITESPACE
doctest.ELLIPSIS
except (ImportError, AttributeError):
# we need our own version to make it work (Python 2.3?)
import local_doctest as doctest
try:
sorted
except NameError:
def sorted(seq, **kwargs):
seq = list(seq)
seq.sort(**kwargs)
return seq
else:
locals()['sorted'] = sorted
try:
import pytest
except ImportError:
class skipif(object):
"Using a class because a function would bind into a method when used in classes"
def __init__(self, *args): pass
def __call__(self, func, *args): return func
else:
skipif = pytest.mark.skipif
def _get_caller_relative_path(filename, frame_depth=2):
module = sys.modules[sys._getframe(frame_depth).f_globals['__name__']]
return os.path.normpath(os.path.join(
os.path.dirname(getattr(module, '__file__', '')), filename))
if sys.version_info[0] >= 3:
# Python 3
from builtins import str as unicode
def _str(s, encoding="UTF-8"):
return s
def _bytes(s, encoding="UTF-8"):
return s.encode(encoding)
from io import StringIO, BytesIO as _BytesIO
def BytesIO(*args):
if args and isinstance(args[0], str):
args = (args[0].encode("UTF-8"),)
return _BytesIO(*args)
doctest_parser = doctest.DocTestParser()
_fix_unicode = re.compile(r'(\s+)u(["\'])').sub
_fix_exceptions = re.compile(r'(.*except [^(]*),\s*(.*:)').sub
def make_doctest(filename):
filename = _get_caller_relative_path(filename)
doctests = read_file(filename)
doctests = _fix_unicode(r'\1\2', doctests)
doctests = _fix_exceptions(r'\1 as \2', doctests)
return doctest.DocTestCase(
doctest_parser.get_doctest(
doctests, {}, os.path.basename(filename), filename, 0))
else:
# Python 2
from __builtin__ import unicode
def _str(s, encoding="UTF-8"):
return unicode(s, encoding=encoding)
def _bytes(s, encoding="UTF-8"):
return s
from StringIO import StringIO
BytesIO = StringIO
doctest_parser = doctest.DocTestParser()
_fix_traceback = re.compile(r'^(\s*)(?:\w+\.)+(\w*(?:Error|Exception|Invalid):)', re.M).sub
_fix_exceptions = re.compile(r'(.*except [^(]*)\s+as\s+(.*:)').sub
_fix_bytes = re.compile(r'(\s+)b(["\'])').sub
def make_doctest(filename):
filename = _get_caller_relative_path(filename)
doctests = read_file(filename)
doctests = _fix_traceback(r'\1\2', doctests)
doctests = _fix_exceptions(r'\1, \2', doctests)
doctests = _fix_bytes(r'\1\2', doctests)
return doctest.DocTestCase(
doctest_parser.get_doctest(
doctests, {}, os.path.basename(filename), filename, 0))
try:
skipIf = unittest.skipIf
except AttributeError:
def skipIf(condition, why,
_skip=lambda test_method: None,
_keep=lambda test_method: test_method):
if condition:
return _skip
return _keep
class HelperTestCase(unittest.TestCase):
def tearDown(self):
gc.collect()
def parse(self, text, parser=None):
f = BytesIO(text)
return etree.parse(f, parser=parser)
def _rootstring(self, tree):
return etree.tostring(tree.getroot()).replace(
_bytes(' '), _bytes('')).replace(_bytes('\n'), _bytes(''))
# assertFalse doesn't exist in Python 2.3
try:
unittest.TestCase.assertFalse
except AttributeError:
assertFalse = unittest.TestCase.failIf
class SillyFileLike:
def __init__(self, xml_data=_bytes('<foo><bar/></foo>')):
self.xml_data = xml_data
def read(self, amount=None):
if self.xml_data:
if amount:
data = self.xml_data[:amount]
self.xml_data = self.xml_data[amount:]
else:
data = self.xml_data
self.xml_data = _bytes('')
return data
return _bytes('')
class LargeFileLike:
def __init__(self, charlen=100, depth=4, children=5):
self.data = BytesIO()
self.chars = _bytes('a') * charlen
self.children = range(children)
self.more = self.iterelements(depth)
def iterelements(self, depth):
yield _bytes('<root>')
depth -= 1
if depth > 0:
for child in self.children:
for element in self.iterelements(depth):
yield element
yield self.chars
else:
yield self.chars
yield _bytes('</root>')
def read(self, amount=None):
data = self.data
append = data.write
if amount:
for element in self.more:
append(element)
if data.tell() >= amount:
break
else:
for element in self.more:
append(element)
result = data.getvalue()
data.seek(0)
data.truncate()
if amount:
append(result[amount:])
result = result[:amount]
return result
class LargeFileLikeUnicode(LargeFileLike):
def __init__(self, charlen=100, depth=4, children=5):
LargeFileLike.__init__(self, charlen, depth, children)
self.data = StringIO()
self.chars = _str('a') * charlen
self.more = self.iterelements(depth)
def iterelements(self, depth):
yield _str('<root>')
depth -= 1
if depth > 0:
for child in self.children:
for element in self.iterelements(depth):
yield element
yield self.chars
else:
yield self.chars
yield _str('</root>')
def fileInTestDir(name):
_testdir = os.path.dirname(__file__)
return os.path.join(_testdir, name)
def read_file(name, mode='r'):
f = open(name, mode)
try:
data = f.read()
finally:
f.close()
return data
def write_to_file(name, data, mode='w'):
f = open(name, mode)
try:
data = f.write(data)
finally:
f.close()
def readFileInTestDir(name, mode='r'):
return read_file(fileInTestDir(name), mode)
def canonicalize(xml):
tree = etree.parse(BytesIO(xml))
f = BytesIO()
tree.write_c14n(f)
return f.getvalue()
def unentitify(xml):
for entity_name, value in re.findall("(&#([0-9]+);)", xml):
xml = xml.replace(entity_name, unichr(int(value)))
return xml
|
{
"content_hash": "9d5aefd662f0f7bcc53244287c0ca3d0",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 95,
"avg_line_length": 30.095238095238095,
"alnum_prop": 0.5884858812074002,
"repo_name": "ioram7/keystone-federado-pgid2013",
"id": "c528a3cabd0b7dbd16dc6cde98933fb92b5e480d",
"size": "8216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/lxml/src/lxml/tests/common_imports.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1841"
},
{
"name": "C",
"bytes": "10584735"
},
{
"name": "C++",
"bytes": "19231"
},
{
"name": "CSS",
"bytes": "172341"
},
{
"name": "JavaScript",
"bytes": "530938"
},
{
"name": "Python",
"bytes": "26306359"
},
{
"name": "Shell",
"bytes": "38138"
},
{
"name": "XSLT",
"bytes": "306125"
}
],
"symlink_target": ""
}
|
from typing import Dict, Generator
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._internal.exceptions import NetworkConnectionError
# The following comments and HTTP headers were originally added by
# Donald Stufft in git commit 22c562429a61bb77172039e480873fb239dd8c03.
#
# We use Accept-Encoding: identity here because requests defaults to
# accepting compressed responses. This breaks in a variety of ways
# depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible file
# and will leave the file alone and with an empty Content-Encoding
# - Some servers will notice that the file is already compressed and
# will leave the file alone, adding a Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take a file
# that's already been compressed and compress it again, and set
# the Content-Encoding: gzip header
# By setting this to request only the identity encoding we're hoping
# to eliminate the third case. Hopefully there does not exist a server
# which when given a file will notice it is already compressed and that
# you're not asking for a compressed file and will then decompress it
# before sending because if that's the case I don't think it'll ever be
# possible to make this work.
HEADERS: Dict[str, str] = {"Accept-Encoding": "identity"}
def raise_for_status(resp: Response) -> None:
http_error_msg = ""
if isinstance(resp.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings.
try:
reason = resp.reason.decode("utf-8")
except UnicodeDecodeError:
reason = resp.reason.decode("iso-8859-1")
else:
reason = resp.reason
if 400 <= resp.status_code < 500:
http_error_msg = (
f"{resp.status_code} Client Error: {reason} for url: {resp.url}"
)
elif 500 <= resp.status_code < 600:
http_error_msg = (
f"{resp.status_code} Server Error: {reason} for url: {resp.url}"
)
if http_error_msg:
raise NetworkConnectionError(http_error_msg, response=resp)
def response_chunks(
response: Response, chunk_size: int = CONTENT_CHUNK_SIZE
) -> Generator[bytes, None, None]:
"""Given a requests Response, provide the data chunks."""
try:
# Special case for urllib3.
for chunk in response.raw.stream(
chunk_size,
# We use decode_content=False here because we don't
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False,
):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = response.raw.read(chunk_size)
if not chunk:
break
yield chunk
|
{
"content_hash": "753632450165d0eff8c4751a18d5cce5",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 76,
"avg_line_length": 42.427083333333336,
"alnum_prop": 0.6511171126933464,
"repo_name": "pypa/pip",
"id": "134848ae526e54e2b18738f83088c4a17efcce96",
"size": "4073",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "src/pip/_internal/network/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3137"
},
{
"name": "PowerShell",
"bytes": "2137"
},
{
"name": "Python",
"bytes": "7137503"
}
],
"symlink_target": ""
}
|
from skimage import morphology
from scipy import ndimage as ndi
import voxie
import numpy as np
args = voxie.parser.parse_args()
context = voxie.VoxieContext(args)
instance = context.createInstance()
if args.voxie_action != 'RunFilter':
raise Exception('Invalid operation: ' + args.voxie_action)
with context.makeObject(context.bus, context.busName, args.voxie_operation, ['de.uni_stuttgart.Voxie.ExternalOperationRunFilter']).ClaimOperationAndCatch() as op:
inputData = op.GetInputData('de.uni_stuttgart.Voxie.Input').CastTo(
'de.uni_stuttgart.Voxie.VolumeData')
outputPath = op.Properties['de.uni_stuttgart.Voxie.Output'].getValue('o')
maxSize = op.Properties['de.uni_stuttgart.Voxie.Filter.BigPoreFilter.MaxSize'].getValue(
'x')
inputDataVoxel = inputData.CastTo('de.uni_stuttgart.Voxie.VolumeDataVoxel')
method = op.Properties['de.uni_stuttgart.Voxie.Filter.BigPoreFilter.Mode'].getValue(
's')
with inputDataVoxel.GetBufferReadonly() as inputArray:
with op.GetOutputVolumeDataVoxelLike(outputPath, inputDataVoxel, type=('bool', 8, 'native')) as data:
with data.CreateUpdate() as update, data.GetBufferWritable(update) as outputBuffer:
imageInt = inputArray.array
image = imageInt > 0
# PoreSpace = pores are white SolidSpace = pores are black
if method == 'de.uni_stuttgart.Voxie.Filter.BigPoreFilter.Mode.PoreSpace':
negative = np.copy(image)
negative = morphology.remove_small_objects(
image, min_size=maxSize, connectivity=1, in_place=False)
outputBuffer.array[:] = image - \
np.logical_and(negative, image)
elif method == 'de.uni_stuttgart.Voxie.Filter.BigPoreFilter.Mode.SolidSpace':
negative = np.copy(image)
negative = morphology.remove_small_holes(
image, min_size=maxSize, connectivity=1, in_place=False)
outputBuffer.array[:] = image - \
np.invert(np.logical_or(negative, image))
version = update.Finish()
result = {}
result[outputPath] = {
'Data': voxie.Variant('o', data._objectPath),
'DataVersion': voxie.Variant('o', version._objectPath),
}
op.Finish(result)
version._referenceCountingObject.destroy()
context.client.destroy()
|
{
"content_hash": "3ae16dc69daa8aa185b8a52cd877bc6f",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 162,
"avg_line_length": 45.214285714285715,
"alnum_prop": 0.627172195892575,
"repo_name": "voxie-viewer/voxie",
"id": "d27217ff7bd9d37f2f32f34d107454eeeeec3854",
"size": "3657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filters/bigPoreRemoval.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "151"
},
{
"name": "C",
"bytes": "582785"
},
{
"name": "C++",
"bytes": "1860251"
},
{
"name": "CMake",
"bytes": "1934"
},
{
"name": "JavaScript",
"bytes": "3194"
},
{
"name": "Makefile",
"bytes": "2053"
},
{
"name": "Python",
"bytes": "36220"
},
{
"name": "QMake",
"bytes": "20169"
},
{
"name": "Shell",
"bytes": "2367"
}
],
"symlink_target": ""
}
|
from os.path import exists
from setuptools import setup, find_packages
setup(
name='python-hue-client',
version=open('VERSION').read().strip(),
# Your name & email here
author='Adam Charnock',
author_email='adam@adamcharnock.com',
# If you had hueclient.tests, you would also include that in this list
packages=find_packages(),
# Any executable scripts, typically in 'bin'. E.g 'bin/do-something.py'
scripts=[],
# REQUIRED: Your project's URL
url='http://github.com/adamcharnock/python-hue-client',
# Put your license here. See LICENSE.txt for more information
license='MIT',
# Put a nice one-liner description here
description='',
long_description=open('README.rst').read() if exists("README.rst") else "",
# Any requirements here, e.g. "Django >= 1.1.1"
install_requires=[
'booby==0.7.0',
'requests==2.20.0',
],
entry_points={
'console_scripts': [
'hue_authenticate = hueclient.utilities:authenticate_interactive',
]
}
)
|
{
"content_hash": "af513f74a8353dd454418f4cf024a229",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 31.939393939393938,
"alnum_prop": 0.6404174573055028,
"repo_name": "adamcharnock/python-hue-client",
"id": "d20cecf2730f6a799607797301f5de037fb19095",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37311"
}
],
"symlink_target": ""
}
|
__author__ = 'yinjun'
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
this.val = val
this.left, this.right = None, None
"""
class Solution:
'''
@param root: An object of TreeNode, denote the root of the binary tree.
This method will be invoked first, you should design your own algorithm
to serialize a binary tree which denote by a root node to a string which
can be easily deserialized by your own "deserialize" method later.
'''
def serialize(self, root):
# write your code here
result = []
result.append(self.preorderTraversal(root))
result.append(self.inorderTraversal(root))
return result
def inorderTraversal(self, root):
# write your code here
stack = []
dict = {}
dictStack = {}
result = []
if root == None:
return result
if root.right!=None:
stack.append(root.right)
dictStack[root.right] = 1
stack.append(root)
dictStack[root] = 1
if root.left!=None:
stack.append(root.left)
dictStack[root.left] = 1
l = len(stack)
while l>0:
#print result
p = stack.pop()
dictStack.pop(p)
l -= 1
if p.left ==None or p.left !=None and p.left in dict:
dict[p] = 1
result.append(p.val)
if p.right!=None and p.right not in dictStack:
stack.append(p.right)
dictStack[p.right] = 1
l += 1
else:
if p.right!=None:
stack.append(p.right)
dictStack[p.right] = 1
stack.append(p)
dictStack[p] = 1
if p.left!=None:
stack.append(p.left)
dictStack[p.left] = 1
l = len(stack)
return result
def preorderTraversal(self, root):
# write your code here
stack = []
result = []
if root == None:
return result
stack.append(root)
l = 1
while l>0:
p = stack.pop()
l -= 1
result.append(p.val)
if p.right != None:
stack.append(p.right)
l += 1
if p.left != None:
stack.append(p.left)
l += 1
return result
'''
@param data: A string serialized by your serialize method.
This method will be invoked second, the argument data is what exactly
you serialized at method "serialize", that means the data is not given by
system, it's given by your own serialize method. So the format of data is
designed by yourself, and deserialize it here as you serialize it in
"serialize" method.
'''
def deserialize(self, data):
# write your code here
if data==None or data == [] or len(data)!=2:
return None
return self.buildTree(data[0], data[1])
def buildTree(self, preorder, inorder):
# write your code here
if preorder ==[] and inorder == []:
return None
root = TreeNode(preorder[0])
inpos = inorder.index(preorder[0])
if inpos>0:
left_pre = preorder[1:inpos+1]
left_in = inorder[0:inpos]
root.left = self.buildTree(left_pre, left_in)
length = len(inorder)
if inpos + 1 < length:
right_pre = preorder[inpos+1:]
right_in = inorder[inpos+1:]
root.right = self.buildTree(right_pre, right_in)
return root
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
s = Solution()
n1 = TreeNode(1)
n2 = TreeNode(2)
n3 = TreeNode(3)
n4 = TreeNode(4)
n1.left = n2
n2.left = n3
n3.left = n4
print s.serialize(n1)
#print s.serialize(n1)
#print s.serialize(s.deserialize([1, '#', 2]))
#print s.serialize(s.deserialize([1,2,3,'#','#',4,5]))
#print s.serialize(s.deserialize([1, 2, '#', 3, '#',4]))
|
{
"content_hash": "ffd1a896be31921973a60c4055cd0f4a",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 77,
"avg_line_length": 25.57668711656442,
"alnum_prop": 0.5233869033341328,
"repo_name": "shootsoft/practice",
"id": "be308fe8773ee3cfed66a663af3b6845c0ad7c0a",
"size": "4169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lintcode/NineChapters/03/binary-tree-serialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "722333"
}
],
"symlink_target": ""
}
|
import math
import sys
import re
help = \
"""Usage : %s [ --verbose ] file1 file2 ... fileN
Display indentation used in the list of files. Possible answers are (with X
being the number of spaces used for indentation):
space X
tab 8
mixed tab X space Y
mixed means that indentation style is tab at the beginning of the line (tab
being 8 positions) and then spaces to do the indentation, unless you reach 8
spaces which are replaced by a tab. This is the vim source file indentation
for example. In my opinion, this is the worst possible style.
"""
VERBOSE_QUIET = 0
VERBOSE_INFO = 1
VERBOSE_DEBUG = 2
VERBOSE_DEEP_DEBUG = 3
DEFAULT_VERBOSITY = VERBOSE_QUIET
class LineType:
NoIndent = 'NoIndent'
SpaceOnly = 'SpaceOnly'
TabOnly = 'TabOnly'
Mixed = 'Mixed'
BeginSpace = 'BeginSpace'
def info(s):
log(VERBOSE_INFO, s)
def dbg(s):
log(VERBOSE_DEBUG, s)
def deepdbg(s):
log(VERBOSE_DEEP_DEBUG, s)
def log(level, s):
if level <= IndentFinder.VERBOSITY:
print s
class IndentFinder:
"""
IndentFinder reports the indentation used in a source file. Its approach
is not tied to any particular language. It was tested successfully with
python, C, C++ and Java code.
How does it work ?
It scans each line of the entry file for a space character (white space or
tab) repeated until a non space character is found. Such a line
is considered to be a properly indented line of code. Blank lines and
comments line (starting with # or /* or *) are ignored. Lines coming
after a line ending in '\\' have higher chance of being not properly
indented, and are thus ignored too.
Only the increment in indentation are fed in. Dedentation or maintaining
the same indentation is not taken into account when analysing a file. Increment
in indentation from zero indentation to some indentation is also ignored because
it's wrong in many cases (header file with many structures for example, do not always
obey the indentation of the rest of the code).
Each line is analysed as:
- SpaceOnly: indentation of more than 8 space
- TabOnly: indentation of tab only
- Mixed: indentation of tab, then less than 8 spaces
- BeginSpace: indentation of less than 8 space, that could be either a mixed indentation
or a pure space indentation.
- non-significant
Then two consecutive significant lines are then considered. The only valid combinations are:
- (NoIndent, BeginSpace) => space or mixed
- (NoIndent, Tab) => tab
- (BeginSpace, BeginSpace) => space or mixed
- (BeginSpace, SpaceOnly) => space
- (SpaceOnly, SpaceOnly) => space
- (TabOnly, TabOnly) => tab
- (TabOnly, Mixed) => mixed
- (Mixed, TabOnly) => mixed
The increment in number of spaces is then recorded.
At the end, the number of lines with space indentation, mixed space and tab indentation
are compared and a decision is made.
If no decision can be made, DEFAULT_RESULT is returned.
If IndentFinder ever reports wrong indentation, send me immediately a
mail, if possible with the offending file.
"""
INDENT_RE = re.compile("^([ \t]+)([^ \t]+)")
MIXED_RE = re.compile("^(\t+)( +)$")
def __init__(self):
self.clear()
VERBOSITY = DEFAULT_VERBOSITY
def parse_file_list(self, file_list):
for fname in file_list:
self.parse_file(fname)
def parse_file(self, fname):
with open(fname) as f:
for line in f:
self.analyse_line(line)
def clear(self):
self.lines = {}
for i in range(2, 9):
self.lines['space%d' % i] = 0
for i in range(2, 9):
self.lines['mixed%d' % i] = 0
self.lines['tab'] = 0
self.nb_processed_lines = 0
self.nb_indent_hint = 0
self.skip_next_line = False
self.previous_line_info = None
def analyse_line(self, line):
if line[-1:] == '\n':
line = line[:-1]
deepdbg('analyse_line: "%s"' % line.replace(' ', '.').replace('\t', '\\t'))
self.nb_processed_lines += 1
skip_current_line = self.skip_next_line
self.skip_next_line = False
if line[-1:] == '\\':
deepdbg('analyse_line: Ignoring next line!')
# skip lines after lines ending in \
self.skip_next_line = True
if skip_current_line:
deepdbg('analyse_line: Ignoring current line!')
return
ret = self.analyse_line_indentation(line)
if ret:
self.nb_indent_hint += 1
deepdbg('analyse_line: Result of line analysis: %s' % str(ret))
return ret
def analyse_line_type(self, line):
'''Analyse the type of line and return (LineType, <indentation part of
the line>).
The function will reject improperly formatted lines (mixture of tab
and space for example) and comment lines.
'''
mixed_mode = False
tab_part = ''
space_part = ''
if len(line) > 0 and line[0] != ' ' and line[0] != '\t':
return (LineType.NoIndent, '')
mo = self.INDENT_RE.match(line)
if not mo:
deepdbg('analyse_line_type: line is not indented')
return None
indent_part = mo.group(1)
text_part = mo.group(2)
deepdbg('analyse_line_type: indent_part="%s" text_part="%s"' %
(indent_part.replace(' ', '.').replace('\t', '\\t').replace('\n', '\\n'),
text_part))
if text_part[0] == '*':
# continuation of a C/C++ comment, unlikely to be indented correctly
return None
if text_part[0:2] == '/*' or text_part[0] == '#':
# python, C/C++ comment, might not be indented correctly
return None
if '\t' in indent_part and ' ' in indent_part:
# mixed mode
mo = self.MIXED_RE.match(indent_part)
if not mo:
# line is not composed of '\t\t\t ', ignore it
return None
mixed_mode = True
tab_part = mo.group(1)
space_part = mo.group(2)
if mixed_mode:
if len(space_part) >= 8:
# this is not mixed mode, this is garbage !
return None
return (LineType.Mixed, tab_part, space_part)
if '\t' in indent_part:
return (LineType.TabOnly, indent_part)
if ' ' in indent_part:
if len(indent_part) < 8:
# this could be mixed mode too
return (LineType.BeginSpace, indent_part)
else:
# this is really a line indented with spaces
return (LineType.SpaceOnly, indent_part)
assert False, 'We should never get there !'
def analyse_line_indentation(self, line):
previous_line_info = self.previous_line_info
current_line_info = self.analyse_line_type(line)
self.previous_line_info = current_line_info
if current_line_info is None or previous_line_info is None:
deepdbg('analyse_line_indentation: Not enough line info to analyse line: %s, %s' % (str(previous_line_info), str(current_line_info)))
return
t = (previous_line_info[0], current_line_info[0])
deepdbg('analyse_line_indentation: Indent analysis: %s %s' % t)
if (t == (LineType.TabOnly, LineType.TabOnly)
or t == (LineType.NoIndent, LineType.TabOnly)):
if len(current_line_info[1]) - len(previous_line_info[1]) == 1:
self.lines['tab'] += 1
return 'tab'
elif (t == (LineType.SpaceOnly, LineType.SpaceOnly)
or t == (LineType.BeginSpace, LineType.SpaceOnly)
or t == (LineType.NoIndent, LineType.SpaceOnly)):
nb_space = len(current_line_info[1]) - len(previous_line_info[1])
if 1 < nb_space <= 8:
key = 'space%d' % nb_space
self.lines[key] += 1
return key
elif (t == (LineType.BeginSpace, LineType.BeginSpace)
or t == (LineType.NoIndent, LineType.BeginSpace)):
nb_space = len(current_line_info[1]) - len(previous_line_info[1])
if 1 < nb_space <= 8:
key1 = 'space%d' % nb_space
key2 = 'mixed%d' % nb_space
self.lines[key1] += 1
self.lines[key2] += 1
return key1
elif t == (LineType.BeginSpace, LineType.TabOnly):
# we assume that mixed indentation used 8 characters tabs
if len(current_line_info[1]) == 1:
# more than one tab on the line --> not mixed mode !
nb_space = len(current_line_info[1]) * 8 - len(previous_line_info[1])
if 1 < nb_space <= 8:
key = 'mixed%d' % nb_space
self.lines[key] += 1
return key
elif t == (LineType.TabOnly, LineType.Mixed):
tab_part, space_part = tuple(current_line_info[1:3])
if len(previous_line_info[1]) == len(tab_part):
nb_space = len(space_part)
if 1 < nb_space <= 8:
key = 'mixed%d' % nb_space
self.lines[key] += 1
return key
elif t == (LineType.Mixed, LineType.TabOnly):
tab_part, space_part = previous_line_info[1:3]
if len(tab_part) + 1 == len(current_line_info[1]):
nb_space = 8 - len(space_part)
if 1 < nb_space <= 8:
key = 'mixed%d' % nb_space
self.lines[key] += 1
return key
else:
pass
return None
def results(self):
dbg("Nb of scanned lines : %d" % self.nb_processed_lines)
dbg("Nb of indent hint : %d" % self.nb_indent_hint)
dbg("Collected data:")
for key in self.lines:
if self.lines[key] > 0:
dbg('%s: %d' % (key, self.lines[key]))
max_line_space = max([self.lines['space%d' % i] for i in range(2, 9)])
max_line_mixed = max([self.lines['mixed%d' % i] for i in range(2, 9)])
max_line_tab = self.lines['tab']
dbg('max_line_space: %d' % max_line_space)
dbg('max_line_mixed: %d' % max_line_mixed)
dbg('max_line_tab: %d' % max_line_tab)
### Result analysis
#
# 1. Space indented file
# - lines indented with less than 8 space will fill mixed and space array
# - lines indented with 8 space or more will fill only the space array
# - almost no lines indented with tab
#
# => more lines with space than lines with mixed
# => more a lot more lines with space than tab
#
# 2. Tab indented file
# - most lines will be tab only
# - very few lines as mixed
# - very few lines as space only
#
# => a lot more lines with tab than lines with mixed
# => a lot more lines with tab than lines with space
#
# 3. Mixed tab/space indented file
# - some lines are tab-only (lines with exactly 8 step indentation)
# - some lines are space only (less than 8 space)
# - all other lines are mixed
#
# If mixed is tab + 2 space indentation:
# - a lot more lines with mixed than with tab
# If mixed is tab + 4 space indentation
# - as many lines with mixed than with tab
#
# If no lines exceed 8 space, there will be only lines with space
# and tab but no lines with mixed. Impossible to detect mixed indentation
# in this case, the file looks like it's actually indented as space only
# and will be detected so.
#
# => same or more lines with mixed than lines with tab only
# => same or more lines with mixed than lines with space only
#
result = (None, 0)
# Detect space indented file
if max_line_space >= max_line_mixed and max_line_space > max_line_tab:
nb = 0
indent_value = None
for i in range(8, 1, -1):
if self.lines['space%d' % i] > int(nb * 1.1): # give a 10% threshold
indent_value = i
nb = self.lines['space%d' % indent_value]
dbg("%d confidence: %d" % (indent_value, math.log(nb)))
if indent_value is not None: # no lines
result = ('space', indent_value)
# Detect tab files
elif max_line_tab > max_line_mixed and max_line_tab > max_line_space:
result = ('tab', 0)
# Detect mixed files
elif max_line_mixed >= max_line_tab and max_line_mixed > max_line_space:
nb = 0
indent_value = None
for i in range(8, 1, -1):
if self.lines['mixed%d' % i] > int(nb * 1.1): # give a 10% threshold
indent_value = i
nb = self.lines['mixed%d' % indent_value]
if indent_value is not None: # no lines
result = ('mixed', (8, indent_value))
info("Result: %s" % str(result))
return result
def __str__(self):
itype, ival = self.results()
if itype != 'mixed':
return '%s %d' % (itype, ival)
else:
itab, ispace = ival
return '%s tab %d space %d' % (itype, itab, ispace)
def main():
file_list = []
for opt in sys.argv[1:]:
if opt == "--verbose" or opt == '-v':
IndentFinder.VERBOSITY += 1
elif opt[0] == "-":
print help % sys.argv[0]
return
else:
file_list.append(opt)
fi = IndentFinder()
if len(file_list) > 1:
# multiple files
for fname in file_list:
fi.clear()
fi.parse_file(fname)
print "%s : %s" % (fname, str(fi))
return
else:
# only one file, don't print filename
fi.parse_file_list(file_list)
print str(fi)
if __name__ == "__main__":
main()
|
{
"content_hash": "fd2aa9fb3f8a6e864cd97cd61fc1e75c",
"timestamp": "",
"source": "github",
"line_count": 413,
"max_line_length": 145,
"avg_line_length": 34.738498789346245,
"alnum_prop": 0.5548895239422876,
"repo_name": "jirutka/editorconfig-tools",
"id": "39bb710383693d6b20bf19aa485c3275d54a95c5",
"size": "14598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indent_finder.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "863"
},
{
"name": "Python",
"bytes": "28692"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from person import views
urlpatterns = [
url(r'^person/$', views.JSONResponse.person_list),
url(r'^person/(?P<pk>[0-9]+)/$', views.JSONResponse.person_detail)
]
|
{
"content_hash": "8ac7dc4dbba512f4320347b86b2e3d4f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 70,
"avg_line_length": 29,
"alnum_prop": 0.6896551724137931,
"repo_name": "gnzandrs/opencare",
"id": "b4b5de637d756d9bd54ea83018278f1c5cc55055",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/server/api/Opencare/person/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "919"
},
{
"name": "Python",
"bytes": "15303"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponse
from django.shortcuts import render_to_response
import time, random, math, os, json
import Stemmer
from utils import *
global_counts = load_json("data/global_counts.json")
sorted_global_counts = sort_by_value(global_counts)
stem_to_terms = load_json("data/stem_to_terms.json")
persons_terms = load_json("data/persons_terms.json")
relevancy_scores = load_json("data/relevancy_scores.json")
persons_memberships = load_json("data/persons_memberships.json")
people_pages = load_json("data/people_pages.json")
topic_words = list(set( slurp("data/topics32.txt").split() ))
buddies = load_json("data/buddies.json")
stemmer = Stemmer.Stemmer("english")
def compute_weights(words):
"""
Take a list of words, grab their counts in the corpus, normalize the
counts to 1-10 scale for display in the cloud.
"""
weights = {}
counts = {}
for w in words:
stemmed_w = stemmer.stemWord(w)
if stemmed_w in global_counts:
counts[w] = global_counts[stemmed_w]
counts2 = sort_by_value(counts) # this is a list of tuples now -- (word, count)
b = counts2[0][1]
a = counts2[-1][1]
if b - a == 0: b = 2
for w in counts:
x = counts[w]
y = 1 + (x-a)*(10-1)/(b-a)
weights[w] = y
return weights
def unstem(weighted_words):
# FIXME:
result = {}
for word in weighted_words:
# Pick a random unstemming
# FIXME: better heuristic
if word in stem_to_terms:
unstemmed_word = random.choice(stem_to_terms[word])
result[unstemmed_word] = weighted_words[word]
else:
result[word] = weighted_words[word]
return result
def simple_unstem(words):
result = []
for word in words:
if word in stem_to_terms:
unstemmed_word = random.choice(stem_to_terms[word])
result.append(unstemmed_word)
else:
result.append(word)
return result
def common_words_for(people):
"""
Get common words for list of people.
"""
common_terms = set(persons_terms[people[0]])
for person in people:
if persons_terms[person] != []:
common_terms = common_terms.intersection(set(persons_terms[person]))
return list(common_terms)
def number_of_people_sharing_term(term, people):
"""
Return number of people sharing the term.
"""
n = 0
for person in people:
if term in persons_terms[person]:
n += 1
return n
def terms_with_count_of(count, term_cloud_counts):
result = []
for term in term_cloud_counts:
if term_cloud_counts[term] == count:
result.append(term)
return result
def top_n_most_relevant_terms(terms_per_person, person):
"""
Return N most relevant terms for a person.
"""
s = sort_by_value(relevancy_scores[person])
terms = [pair[0] for pair in s][0:terms_per_person*2]
random.shuffle(terms)
return terms[0:terms_per_person]
### VIEWS HERE:
def home(req):
return render_to_response("home.html", {})
def init_json(req):
n = random.randint(0, 2)
if n == 0:
random.shuffle(topic_words)
start_words = topic_words[:20]
elif n == 1:
start_words = "goal agent information language function model structure algorithm data system tree xml database computer process research translation thread design".split()
else:
start_words = "agent ontology knowledge semantics logic calculus performance compiler language learning translation database xml rdf proof tree requirements process user learning text corpus probability design".split()
weights = compute_weights(start_words)
thejson = json.dumps(weights)
return HttpResponse(thejson)
def app_query(req):
stemmed_terms = stemmer.stemWords(req.POST["queryWords"].strip().split(", "))
old_count = int(req.POST["peopleCount"])
if old_count == 0: old_count = len(persons_terms.keys())
new_group = []
for person in persons_terms.keys():
all_found = True
for term in stemmed_terms:
if not term in persons_terms[person]:
all_found = False
break
if all_found:
new_group.append(person)
all_found = True
if len(new_group) > 5 and int((old_count*0.85)) < len(new_group):
# FIXME:
i = int((old_count*0.85))
new_group = new_group[0:i]
w = 24
n_total = len(persons_terms.keys())
n_current = len(new_group)
if w < n_current: # each term must describe ~N_current/W people
random.shuffle(new_group)
group_size = int(math.ceil((1.0*n_current)/w))
another_term_cloud = []
pivots = range(0, n_current, group_size)
for pivot in pivots:
common_terms = persons_terms[new_group[pivot]]
for idx in range(pivot+1, pivot+group_size):
if idx >= len(new_group): break
common_terms = list(set(persons_terms[new_group[idx]]).intersection(set(common_terms)))
common_terms_relevancy_scores = {}
for term in common_terms:
common_terms_relevancy_scores[term] = relevancy_scores[new_group[pivot]][term]
top_cw = [pair[0] for pair in sort_by_value(common_terms_relevancy_scores)][0:group_size*2]
another_term_cloud += top_cw
another_term_cloud = list(set(another_term_cloud))
random.shuffle(another_term_cloud)
another_term_cloud = another_term_cloud[0:w]
return HttpResponse(json.dumps({"people": new_group, "weights": unstem(compute_weights(another_term_cloud))}))
else:
terms_per_person = int(math.ceil(1.0*w/n_current))
term_cloud = []
for person in new_group:
moar_terms = top_n_most_relevant_terms(terms_per_person, person)
term_cloud += moar_terms
return HttpResponse(json.dumps({"people": new_group, "weights": unstem(compute_weights(term_cloud))}))
def person_details(req, person_id):
info = {}
if person_id in persons_memberships:
info.update(persons_memberships[person_id])
else:
info["name"] = ""
info["url"] = ""
if person_id in people_pages:
info["homepage_url"] = people_pages[person_id]
info["photo_url"] = "http://localhost:8000/static/photos/no_photo.png"
photos = os.listdir("data/photos")
for p in photos:
if person_id == p.split(".")[0]:
info["photo_url"] = "http://localhost:8000/static/photos/"+p
keywords = simple_unstem(top_n_most_relevant_terms(20, person_id))
info["keywords"] = keywords
return HttpResponse(json.dumps(info))
def buddies_index(req):
return render_to_response("buddies.html", {})
def person_buddies(req, person_id):
data = {"id": person_id, "name": person_id, "data": {}, "children": []}
for b in buddies[person_id]:
if b != person_id:
data["children"].append({"id": b, "name": b, "data": {}, "children": []})
return HttpResponse(json.dumps(data))
def static_graph(req):
return render_to_response("static_graph.html", {})
|
{
"content_hash": "f7c073cf66d82d1e6738d667763f1ef7",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 226,
"avg_line_length": 34.25821596244131,
"alnum_prop": 0.6120323420583802,
"repo_name": "hassy/informatics-explorer",
"id": "75e52f253106fad625781f573b7886dc9a47460b",
"size": "7297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "376877"
},
{
"name": "Python",
"bytes": "12878"
}
],
"symlink_target": ""
}
|
from .Base import *
from .helper import containsChnChar
class FEDTransChnFontFamilyNameIntoEng(RuleChecker):
'''{
"summary":"字体设置时使用英文",
"desc":"有的字体设置可以通过中文和英文两者方式来声明,比如<br>
<code>微软雅黑</code> 和 <code>Microsoft Yahei</code> ,我们推荐用英文的方式来实现"
}'''
def __init__(self):
self.id = 'no-chn-font-family'
self.errorLevel = ERROR_LEVEL.ERROR
self.errorMsg = 'should not use chinese font family name in "${selector}"'
def check(self, rule, config):
if rule.name != 'font' and rule.name != 'font-family':
return True
if containsChnChar(rule.value):
return False
return True
|
{
"content_hash": "6b43161466d17966aaf4db08d96e7e56",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 82,
"avg_line_length": 28.708333333333332,
"alnum_prop": 0.6052249637155298,
"repo_name": "wangjeaf/CSSCheckStyle",
"id": "c662fe57cc07146bdc6151fe21b6b370656825c3",
"size": "829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ckstyle/plugins/FEDTransChnFontFamilyNameIntoEng.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "373226"
},
{
"name": "Shell",
"bytes": "928"
},
{
"name": "VimL",
"bytes": "1871"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.