text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from .utils import TYPE_PICKUP, TYPE_TRANSFORM, KEY, MOVE_ACTS
class Mining(object):
def __init__(self):
# map
self.env_name = 'mining'
nb_block = [1, 3]
nb_water = [1, 3]
# object
obj_list = []
obj_list.append(dict(
name='workspace', pickable=False, transformable=True,
oid=0, outcome=0, unique=True))
obj_list.append(dict(
name='furnace', pickable=False, transformable=True,
oid=1, outcome=1, unique=True))
obj_list.append(
dict(name='tree', pickable=True, transformable=False,
oid=2, max=3))
obj_list.append(
dict(name='stone', pickable=True, transformable=False,
oid=3, max=3))
obj_list.append(
dict(name='grass', pickable=True, transformable=False,
oid=4, max=2))
obj_list.append(
dict(name='pig', pickable=True, transformable=False,
oid=5, max=1))
obj_list.append(
dict(name='coal', pickable=True, transformable=False,
oid=6, max=1))
obj_list.append(
dict(name='iron', pickable=True, transformable=False,
oid=7, max=1))
obj_list.append(
dict(name='silver', pickable=True, transformable=False,
oid=8, max=1))
obj_list.append(
dict(name='gold', pickable=True, transformable=False,
oid=9, max=1))
obj_list.append(
dict(name='diamond', pickable=True, transformable=False,
oid=10, max=3))
obj_list.append(dict(
name='jeweler', pickable=False, transformable=True, oid=11,
outcome=11, unique=True))
obj_list.append(dict(
name='lumbershop', pickable=False, transformable=True, oid=12,
outcome=12, unique=True))
for obj in obj_list:
obj['imgname'] = obj['name']+'.png'
# operation: pickup (type=0) or transform (type=1)
operation_list = {
KEY.PICKUP: dict(name='pickup', oper_type=TYPE_PICKUP, key='p'),
KEY.USE_1: dict(name='use_1', oper_type=TYPE_TRANSFORM, key='1'),
KEY.USE_2: dict(name='use_2', oper_type=TYPE_TRANSFORM, key='2'),
KEY.USE_3: dict(name='use_3', oper_type=TYPE_TRANSFORM, key='3'),
KEY.USE_4: dict(name='use_4', oper_type=TYPE_TRANSFORM, key='4'),
KEY.USE_5: dict(name='use_5', oper_type=TYPE_TRANSFORM, key='5'),
}
# item = agent+block+water+objects
item_name_to_iid = dict()
item_name_to_iid['agent'] = 0
item_name_to_iid['block'] = 1
item_name_to_iid['water'] = 2
for obj in obj_list:
item_name_to_iid[obj['name']] = obj['oid'] + 3
# subtask
subtask_list = []
subtask_list.append(dict(name='Cut wood', param=(KEY.PICKUP, 2)))
subtask_list.append(dict(name="Get stone", param=(KEY.PICKUP, 3)))
subtask_list.append(
dict(name="Get string", param=(KEY.PICKUP, 4))) # 2
#
subtask_list.append(
dict(name="Make firewood", param=(KEY.USE_1, 12))) # 3
subtask_list.append(dict(name="Make stick", param=(KEY.USE_2, 12)))
subtask_list.append(dict(name="Make arrow", param=(KEY.USE_3, 12)))
subtask_list.append(dict(name="Make bow", param=(KEY.USE_4, 12)))
#
subtask_list.append(
dict(name="Make stone pickaxe", param=(KEY.USE_1, 0))) # 7
subtask_list.append(
dict(name="Hit pig", param=(KEY.PICKUP, 5)))
#
subtask_list.append(
dict(name="Get coal", param=(KEY.PICKUP, 6))) # 9
subtask_list.append(dict(name="Get iron ore", param=(KEY.PICKUP, 7)))
subtask_list.append(dict(name="Get silver ore", param=(KEY.PICKUP, 8)))
#
subtask_list.append(dict(name="Light furnace",
param=(KEY.USE_1, 1))) # 12
#
subtask_list.append(
dict(name="Smelt iron", param=(KEY.USE_2, 1))) # 13
subtask_list.append(
dict(name="Smelt silver", param=(KEY.USE_3, 1)))
subtask_list.append(
dict(name="Bake pork", param=(KEY.USE_5, 1)))
#
subtask_list.append(dict(name="Make iron pickaxe",
param=(KEY.USE_2, 0))) # 16
subtask_list.append(
dict(name="Make silverware", param=(KEY.USE_3, 0)))
#
subtask_list.append(
dict(name="Get gold ore", param=(KEY.PICKUP, 9))) # 18
subtask_list.append(
dict(name="Get diamond ore", param=(KEY.PICKUP, 10)))
#
subtask_list.append(
dict(name="Smelt gold", param=(KEY.USE_4, 1))) # 20
subtask_list.append(
dict(name="Craft earrings", param=(KEY.USE_1, 11)))
subtask_list.append(
dict(name="Craft rings", param=(KEY.USE_2, 11)))
#
subtask_list.append(
dict(name="Make goldware", param=(KEY.USE_4, 0))) # 23
subtask_list.append(dict(name="Make bracelet", param=(KEY.USE_5, 0)))
subtask_list.append(
dict(name="Craft necklace", param=(KEY.USE_3, 11)))
#
subtask_param_to_id = dict()
subtask_param_list = []
for i in range(len(subtask_list)):
subtask = subtask_list[i]
par = subtask['param']
subtask_param_list.append(par)
subtask_param_to_id[par] = i
nb_obj_type = len(obj_list)
nb_operation_type = len(operation_list)
self.operation_list = operation_list
self.legal_actions = MOVE_ACTS | {
KEY.PICKUP, KEY.USE_1, KEY.USE_2, KEY.USE_3, KEY.USE_4, KEY.USE_5}
self.nb_operation_type = nb_operation_type
self.object_param_list = obj_list
self.nb_obj_type = nb_obj_type
self.item_name_to_iid = item_name_to_iid
self.nb_block = nb_block
self.nb_water = nb_water
self.subtask_list = subtask_list
self.subtask_param_list = subtask_param_list
self.subtask_param_to_id = subtask_param_to_id
self.nb_subtask_type = len(subtask_list)
self.width = 10
self.height = 10
self.feat_dim = 3*len(subtask_list)+1
self.ranksep = "0.1"
|
{
"content_hash": "02edb09ddd28f6513cf4d74d9932caa4",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 79,
"avg_line_length": 40.6125,
"alnum_prop": 0.5341643582640813,
"repo_name": "srsohn/subtask-graph-execution-light",
"id": "dfc48b84ebcd0d1342e884a8382d106db8cc2c3a",
"size": "6498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sge/mining.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35392"
}
],
"symlink_target": ""
}
|
import json
import os.path
class AzulejoConfiguration(object):
""" Handles configuration of program """
def __init__(self, always_use_initial=False):
self._initial_config_path = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'initial_config.json')
if always_use_initial:
conf_filename = self._initial_config_path
else:
conf_filename = os.path.expanduser('~/.azulejorc.js')
if not os.path.isfile(conf_filename):
print("Creating file: '%s'" % (conf_filename))
self.create_initial_config_file(conf_filename)
print("Reading config file: '%s'" % (conf_filename))
json_string = self.read_file(conf_filename)
self.conf_data = json.loads(json_string)
@staticmethod
def read_file(path):
"""Returns file content as string."""
file_handler = open(path, 'r')
content = file_handler.read()
file_handler.close()
return content
def create_initial_config_file(self, conf_filename):
"""Create a file with config values."""
with open(conf_filename, 'w') as cfgfile:
raw_json = self.read_file(self._initial_config_path)
cfgfile.write(raw_json)
def get_config_data(self):
""" Gets the config data """
return self.conf_data
|
{
"content_hash": "1fc884442150d52fb6f974d0f3666541",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 65,
"avg_line_length": 30.555555555555557,
"alnum_prop": 0.5970909090909091,
"repo_name": "johnteslade/azulejo",
"id": "1ede49e3d975237d1f936caf259b4497f0dcd41b",
"size": "1493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azulejo/configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34918"
},
{
"name": "Shell",
"bytes": "252"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.core import mail
from django.core.urlresolvers import reverse
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.account import app_settings as account_settings
from allauth.account.models import EmailConfirmation, EmailAddress
from allauth.socialaccount.models import SocialAccount, SocialToken
from allauth.socialaccount.providers import registry
from allauth.tests import MockedResponse, TestCase, patch
from allauth.account.signals import user_signed_up
from allauth.account.adapter import get_adapter
from requests.exceptions import HTTPError
from .provider import GoogleProvider
@override_settings(
SOCIALACCOUNT_AUTO_SIGNUP=True,
ACCOUNT_SIGNUP_FORM_CLASS=None,
ACCOUNT_EMAIL_VERIFICATION=account_settings
.EmailVerificationMethod.MANDATORY)
class GoogleTests(OAuth2TestsMixin, TestCase):
provider_id = GoogleProvider.id
def get_mocked_response(self,
family_name='Penners',
given_name='Raymond',
name='Raymond Penners',
email='raymond.penners@gmail.com',
verified_email=True):
return MockedResponse(200, """
{"family_name": "%s", "name": "%s",
"picture": "https://lh5.googleusercontent.com/photo.jpg",
"locale": "nl", "gender": "male",
"email": "%s",
"link": "https://plus.google.com/108204268033311374519",
"given_name": "%s", "id": "108204268033311374519",
"verified_email": %s }
""" % (family_name,
name,
email,
given_name,
(repr(verified_email).lower())))
def test_google_compelete_login_401(self):
from allauth.socialaccount.providers.google.views import \
GoogleOAuth2Adapter
class LessMockedResponse(MockedResponse):
def raise_for_status(self):
if self.status_code != 200:
raise HTTPError(None)
request = RequestFactory().get(
reverse(self.provider.id + '_login'),
dict(process='login'))
adapter = GoogleOAuth2Adapter()
app = adapter.get_provider().get_app(request)
token = SocialToken(token='some_token')
response_with_401 = LessMockedResponse(
401, """
{"error": {
"errors": [{
"domain": "global",
"reason": "authError",
"message": "Invalid Credentials",
"locationType": "header",
"location": "Authorization" } ],
"code": 401,
"message": "Invalid Credentials" }
}""")
with patch(
'allauth.socialaccount.providers.google.views'
'.requests') as patched_requests:
patched_requests.get.return_value = response_with_401
with self.assertRaises(HTTPError):
adapter.complete_login(request, app, token)
def test_username_based_on_email(self):
first_name = '明'
last_name = '小'
email = 'raymond.penners@gmail.com'
self.login(self.get_mocked_response(name=first_name + ' ' + last_name,
email=email,
given_name=first_name,
family_name=last_name,
verified_email=True))
user = User.objects.get(email=email)
self.assertEqual(user.username, 'raymond.penners')
def test_email_verified(self):
test_email = 'raymond.penners@gmail.com'
self.login(self.get_mocked_response(verified_email=True))
email_address = EmailAddress.objects \
.get(email=test_email,
verified=True)
self.assertFalse(EmailConfirmation.objects
.filter(email_address__email=test_email)
.exists())
account = email_address.user.socialaccount_set.all()[0]
self.assertEqual(account.extra_data['given_name'], 'Raymond')
def test_user_signed_up_signal(self):
sent_signals = []
def on_signed_up(sender, request, user, **kwargs):
sociallogin = kwargs['sociallogin']
self.assertEqual(sociallogin.account.provider,
GoogleProvider.id)
self.assertEqual(sociallogin.account.user,
user)
sent_signals.append(sender)
user_signed_up.connect(on_signed_up)
self.login(self.get_mocked_response(verified_email=True))
self.assertTrue(len(sent_signals) > 0)
def test_email_unverified(self):
test_email = 'raymond.penners@gmail.com'
resp = self.login(self.get_mocked_response(verified_email=False))
email_address = EmailAddress.objects \
.get(email=test_email)
self.assertFalse(email_address.verified)
self.assertTrue(EmailConfirmation.objects
.filter(email_address__email=test_email)
.exists())
self.assertTemplateUsed(
resp,
'account/email/email_confirmation_signup_subject.txt')
def test_email_verified_stashed(self):
# http://slacy.com/blog/2012/01/how-to-set-session-variables-in-django-unit-tests/
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
request = RequestFactory().get('/')
request.session = self.client.session
adapter = get_adapter()
test_email = 'raymond.penners@gmail.com'
adapter.stash_verified_email(request, test_email)
request.session.save()
self.login(self.get_mocked_response(verified_email=False))
email_address = EmailAddress.objects \
.get(email=test_email)
self.assertTrue(email_address.verified)
self.assertFalse(
EmailConfirmation.objects.filter(
email_address__email=test_email).exists())
def test_account_connect(self):
email = 'some@mail.com'
user = User.objects.create(username='user',
is_active=True,
email=email)
user.set_password('test')
user.save()
EmailAddress.objects.create(user=user,
email=email,
primary=True,
verified=True)
self.client.login(username=user.username,
password='test')
self.login(self.get_mocked_response(verified_email=True),
process='connect')
# Check if we connected...
self.assertTrue(SocialAccount.objects.filter(
user=user,
provider=GoogleProvider.id).exists())
# For now, we do not pick up any new e-mail addresses on connect
self.assertEqual(EmailAddress.objects.filter(user=user).count(), 1)
self.assertEqual(EmailAddress.objects.filter(
user=user,
email=email).count(), 1)
@override_settings(
ACCOUNT_EMAIL_VERIFICATION=account_settings
.EmailVerificationMethod.MANDATORY,
SOCIALACCOUNT_EMAIL_VERIFICATION=account_settings
.EmailVerificationMethod.NONE
)
def test_social_email_verification_skipped(self):
test_email = 'raymond.penners@gmail.com'
self.login(self.get_mocked_response(verified_email=False))
email_address = EmailAddress.objects.get(email=test_email)
self.assertFalse(email_address.verified)
self.assertFalse(EmailConfirmation.objects.filter(
email_address__email=test_email).exists())
@override_settings(
ACCOUNT_EMAIL_VERIFICATION=account_settings
.EmailVerificationMethod.OPTIONAL,
SOCIALACCOUNT_EMAIL_VERIFICATION=account_settings
.EmailVerificationMethod.OPTIONAL
)
def test_social_email_verification_optional(self):
self.login(self.get_mocked_response(verified_email=False))
self.assertEqual(len(mail.outbox), 1)
self.login(self.get_mocked_response(verified_email=False))
self.assertEqual(len(mail.outbox), 1)
|
{
"content_hash": "d32d1afb9b44ec5bdad7d8db2502fdd0",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 90,
"avg_line_length": 41.350467289719624,
"alnum_prop": 0.6007458469883603,
"repo_name": "bjorand/django-allauth",
"id": "b67b4e331f87056888ae4b47c440c437b5231436",
"size": "8877",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/google/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "42100"
},
{
"name": "JavaScript",
"bytes": "3967"
},
{
"name": "Python",
"bytes": "658809"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
from enum import Enum, unique
from numbers import Number
from gen.apache.aurora.api.ttypes import Resource
ResourceDetails = namedtuple('ResourceDetails', ['resource_type', 'value'])
@unique
class ResourceType(Enum):
"""Describes Aurora resource types and their traits."""
CPUS = ('numCpus', 'CPU', ' core(s)', float, 1)
RAM_MB = ('ramMb', 'RAM', ' MB', int, 2)
DISK_MB = ('diskMb', 'Disk', ' MB', int, 3)
PORTS = ('namedPort', 'Port', '', str, 4)
GPUS = ('numGpus', 'GPU', ' GPU(s)', int, 5)
def __init__(self, field, display_name, display_unit, value_type, display_position):
self._field = field
self._display_name = display_name
self._display_unit = display_unit
self._value_type = value_type
self._display_position = display_position
@property
def field(self):
return self._field
@property
def display_name(self):
return self._display_name
@property
def display_unit(self):
return self._display_unit
@property
def value_type(self):
return self._value_type
@property
def display_position(self):
return self._display_position
def resource_value(self, resource):
return resource.__dict__.get(self._field)
@classmethod
def from_resource(cls, resource):
for _, member in cls.__members__.items():
if resource.__dict__.get(member.field) is not None:
return member
else:
raise ValueError("Unknown resource: %s" % resource)
class ResourceManager(object):
"""Provides helper methods for working with Aurora resources."""
@classmethod
def resource_details(cls, resources):
result = []
if resources:
for resource in list(resources):
r_type = ResourceType.from_resource(resource)
result.append(ResourceDetails(r_type, r_type.resource_value(resource)))
return sorted(result, key=lambda rd: rd.resource_type.display_position)
return result
@classmethod
def resource_details_from_quota(cls, quota):
return cls.resource_details(quota.resources)
@classmethod
def resource_details_from_task(cls, task):
return cls.resource_details(cls._backfill_resources(task))
@classmethod
def quantity_of(cls, resource_details, resource_type):
result = 0.0
for d in resource_details:
if d.resource_type is resource_type:
result += d.value if isinstance(d.value, Number) else 1
return result
@classmethod
def _backfill_resources(cls, r_object):
resources = list(r_object.resources) if r_object.resources else None
if resources is None:
resources = [
Resource(numCpus=r_object.numCpus),
Resource(ramMb=r_object.ramMb),
Resource(diskMb=r_object.diskMb)
]
if hasattr(r_object, 'requestedPorts'):
resources += [Resource(namedPort=p) for p in r_object.requestedPorts or []]
return resources
|
{
"content_hash": "3ebf6cae524bea4cf82f9d102d88c051",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 86,
"avg_line_length": 29.161616161616163,
"alnum_prop": 0.6754416349151369,
"repo_name": "apache/aurora",
"id": "21d1fc7645a6cc1968eb7548ba92b937263c508e",
"size": "3435",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/main/python/apache/aurora/config/resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24231"
},
{
"name": "Groovy",
"bytes": "7847"
},
{
"name": "HTML",
"bytes": "13576"
},
{
"name": "Java",
"bytes": "3566317"
},
{
"name": "JavaScript",
"bytes": "215916"
},
{
"name": "Python",
"bytes": "1578747"
},
{
"name": "Ruby",
"bytes": "4315"
},
{
"name": "Shell",
"bytes": "91180"
},
{
"name": "Smalltalk",
"bytes": "79"
},
{
"name": "Smarty",
"bytes": "25233"
},
{
"name": "Starlark",
"bytes": "43680"
},
{
"name": "Thrift",
"bytes": "58591"
}
],
"symlink_target": ""
}
|
import json
import logging
import os
import uuid
from unittest import skip
from mock import patch
from django.contrib.auth.models import Group, User
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.core.files import File
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from api.pagination import LinkHeaderPagination
from jobs.models import ExportConfig, ExportFormat, ExportProfile, Job
from tasks.models import ExportRun, ExportTask
logger = logging.getLogger(__name__)
class TestJobViewSet(APITestCase):
def setUp(self, ):
self.path = os.path.dirname(os.path.realpath(__file__))
self.group = Group.objects.create(name='TestDefaultExportExtentGroup')
profile = ExportProfile.objects.create(
name='DefaultExportProfile',
max_extent=2500000,
group=self.group
)
self.user = User.objects.create_user(
username='demo', email='demo@demo.com', password='demo'
)
extents = (-3.9, 16.1, 7.0, 27.6)
bbox = Polygon.from_bbox(extents)
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob', event='Test Activation',
description='Test description', user=self.user,
the_geom=the_geom)
format = ExportFormat.objects.get(slug='obf')
self.job.formats.add(format)
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
# create a test config
f = File(open(self.path + '/files/hdm_presets.xml'))
filename = f.name.split('/')[-1]
name = 'Test Configuration File'
self.config = ExportConfig.objects.create(name='Test Preset Config', filename=filename, upload=f, config_type='PRESET', user=self.user)
f.close()
self.assertIsNotNone(self.config)
self.job.configs.add(self.config)
self.tags = [
{
"name": "Telecommunication office",
"key": "office", "value": "telecommunication",
"data_model": "HDM",
"geom_types": ["point", "polygon"],
"groups": ['HDM Presets v2.11', 'Commercial and Economic', 'Telecommunication']
},
{
"name": "Radio or TV Studio",
"key": "amenity", "value": "studio",
"data_model": "OSM",
"geom_types": ["point", "polygon"],
"groups": ['HDM Presets v2.11', 'Commercial and Economic', 'Telecommunication']
},
{
"name": "Telecommunication antenna",
"key": "man_made", "value": "tower",
"data_model": "OSM",
"geom_types": ["point", "polygon"],
"groups": ['HDM Presets v2.11', 'Commercial and Economic', 'Telecommunication']
},
{
"name": "Telecommunication company retail office",
"key": "office", "value": "telecommunication",
"data_model": "OSM",
"geom_types": ["point", "polygon"],
"groups": ['HDM Presets v2.11', 'Commercial and Economic', 'Telecommunication']
}
]
def tearDown(self,):
self.config.delete() # clean up
def test_list(self, ):
expected = '/api/jobs'
url = reverse('api:jobs-list')
self.assertEquals(expected, url)
def test_get_job_detail(self, ):
expected = '/api/jobs/{0}'.format(self.job.uid)
url = reverse('api:jobs-detail', args=[self.job.uid])
self.assertEquals(expected, url)
data = {"uid": str(self.job.uid),
"name": "Test",
"url": 'http://testserver{0}'.format(url),
"description": "Test Description",
"exports": [{"uid": "8611792d-3d99-4c8f-a213-787bc7f3066",
"url": "http://testserver/api/formats/obf",
"name": "OBF Format",
"description": "OSMAnd OBF Export Format."}],
"created_at": "2015-05-21T19:46:37.163749Z",
"updated_at": "2015-05-21T19:46:47.207111Z",
"status": "SUCCESS"}
response = self.client.get(url)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant content
self.assertEquals(response.data['uid'], data['uid'])
self.assertEquals(response.data['url'], data['url'])
self.assertEqual(response.data['exports'][0]['url'], data['exports'][0]['url'])
def test_delete_job(self, ):
url = reverse('api:jobs-detail', args=[self.job.uid])
response = self.client.delete(url)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEquals(response['Content-Length'], '0')
self.assertEquals(response['Content-Language'], 'en')
@patch('api.views.ExportTaskRunner')
def test_create_job_success(self, mock):
task_runner = mock.return_value
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
config_uid = self.config.uid
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats,
'preset': config_uid,
'published': True,
'tags': self.tags
}
response = self.client.post(url, request_data, format='json')
job_uid = response.data['uid']
# test the ExportTaskRunner.run_task(job_id) method gets called.
task_runner.run_task.assert_called_once_with(job_uid=job_uid)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant response content
self.assertEqual(response.data['exports'][0]['slug'], request_data['formats'][0])
self.assertEqual(response.data['exports'][1]['slug'], request_data['formats'][1])
self.assertEqual(response.data['name'], request_data['name'])
self.assertEqual(response.data['description'], request_data['description'])
self.assertTrue(response.data['published'])
# check we have the correct tags
job = Job.objects.get(uid=job_uid)
tags = job.tags.all()
self.assertIsNotNone(tags)
self.assertEquals(233, len(tags))
@patch('api.views.ExportTaskRunner')
def test_create_job_with_config_success(self, mock):
task_runner = mock.return_value
config_uid = self.config.uid
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats,
'preset': config_uid,
'transform': '',
'translation': ''
}
response = self.client.post(url, request_data, format='json')
job_uid = response.data['uid']
# test the ExportTaskRunner.run_task(job_id) method gets called.
task_runner.run_task.assert_called_once_with(job_uid=job_uid)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant response content
self.assertEqual(response.data['exports'][0]['slug'], request_data['formats'][0])
self.assertEqual(response.data['exports'][1]['slug'], request_data['formats'][1])
self.assertEqual(response.data['name'], request_data['name'])
self.assertEqual(response.data['description'], request_data['description'])
self.assertFalse(response.data['published'])
configs = self.job.configs.all()
self.assertIsNotNone(configs[0])
@patch('api.views.ExportTaskRunner')
def test_create_job_with_tags(self, mock):
# delete the existing tags and test adding them with json
self.job.tags.all().delete()
task_runner = mock.return_value
config_uid = self.config.uid
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats,
# 'preset': config_uid,
'transform': '',
'translate': '',
'tags': self.tags
}
response = self.client.post(url, request_data, format='json')
job_uid = response.data['uid']
# test the ExportTaskRunner.run_task(job_id) method gets called.
task_runner.run_task.assert_called_once_with(job_uid=job_uid)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant response content
self.assertEqual(response.data['exports'][0]['slug'], request_data['formats'][0])
self.assertEqual(response.data['exports'][1]['slug'], request_data['formats'][1])
self.assertEqual(response.data['name'], request_data['name'])
self.assertEqual(response.data['description'], request_data['description'])
configs = self.job.configs.all()
# self.assertIsNotNone(configs[0])
def test_missing_bbox_param(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
# 'xmin': -3.9, missing
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['xmin is required.'], response.data['xmin'])
def test_invalid_bbox_param(self, ):
url = reverse('api:jobs-list')
formats = [str(format.uid) for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': '', # empty
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data, format='json')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid xmin value.'], response.data['xmin'])
def test_invalid_bbox(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 7.0, # invalid
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid_bounds'], response.data['id'])
def test_lat_lon_bbox(self, ):
url = reverse('api:jobs-list')
formats = [str(format.uid) for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -227.14, # invalid
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(["Ensure this value is greater than or equal to -180."], response.data['xmin'])
def test_coord_nan(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 'xyz', # invalid
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid xmin value.'], response.data['xmin'])
def test_inverted_coords(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 7.0, # inverted
'ymin': 16.1,
'xmax': -3.9, # inverted
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['inverted_coordinates'], response.data['id'])
def test_empty_string_param(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': '', # empty
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['This field may not be blank.'], response.data['description'])
def test_missing_format_param(self, ):
url = reverse('api:jobs-list')
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
# 'formats': '', # missing
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['Select an export format.'], response.data['formats'])
def test_invalid_format_param(self, ):
url = reverse('api:jobs-list')
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': '', # invalid
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertIsNotNone(response.data['formats'])
def test_no_matching_format_slug(self, ):
url = reverse('api:jobs-list')
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': ['broken-format-one', 'broken-format-two']
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(response.data['formats'], ['invalid export format.'])
@patch('api.views.ExportTaskRunner')
def test_get_correct_region(self, mock):
task_runner = mock.return_value
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
# job extent spans africa / asia but greater intersection with asia
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 36.90,
'ymin': 13.54,
'xmax': 48.52,
'ymax': 20.24,
'formats': formats
}
response = self.client.post(url, request_data, format='json')
job_uid = response.data['uid']
# test the ExportTaskRunner.run_task(job_id) method gets called.
task_runner.run_task.assert_called_once_with(job_uid=job_uid)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant response content
self.assertEqual(response.data['exports'][0]['slug'], request_data['formats'][0])
self.assertEqual(response.data['exports'][1]['slug'], request_data['formats'][1])
self.assertEqual(response.data['name'], request_data['name'])
self.assertEqual(response.data['description'], request_data['description'])
# test the region
region = response.data['region']
self.assertIsNotNone(region)
self.assertEquals(region['name'], 'Central Asia/Middle East')
def test_invalid_region(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
# job outside any region
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 2.74,
'ymin': 47.66,
'xmax': 11.61,
'ymax': 54.24,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid_region'], response.data['id'])
def test_extents_too_large(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
# job outside any region
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -40,
'ymin': -10,
'xmax': 40,
'ymax': 20,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid_extents'], response.data['id'])
class TestBBoxSearch(APITestCase):
"""
Test cases for testing bounding box searches.
"""
@patch('api.views.ExportTaskRunner')
def setUp(self, mock):
task_runner = mock.return_value
url = reverse('api:jobs-list')
# create dummy user
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create_user(
username='demo', email='demo@demo.com', password='demo'
)
# setup token authentication
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
# pull out the formats
formats = [format.slug for format in ExportFormat.objects.all()]
# create test jobs
extents = [(-3.9, 16.1, 7.0, 27.6), (36.90, 13.54, 48.52, 20.24),
(-71.79, -49.57, -67.14, -46.16), (-61.27, -6.49, -56.20, -2.25),
(-11.61, 32.07, -6.42, 36.31), (-10.66, 5.81, -2.45, 11.83),
(47.26, 34.58, 52.92, 39.15), (90.00, 11.28, 95.74, 17.02)]
for extent in extents:
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': extent[0],
'ymin': extent[1],
'xmax': extent[2],
'ymax': extent[3],
'formats': formats
}
response = self.client.post(url, request_data, format='json')
self.assertEquals(status.HTTP_202_ACCEPTED, response.status_code)
self.assertEquals(8, len(Job.objects.all()))
LinkHeaderPagination.page_size = 2
def test_bbox_search_success(self, ):
url = reverse('api:jobs-list')
extent = (-79.5, -16.16, 7.40, 52.44)
param = 'bbox={0},{1},{2},{3}'.format(extent[0], extent[1], extent[2], extent[3])
response = self.client.get('{0}?{1}'.format(url, param))
self.assertEquals(status.HTTP_206_PARTIAL_CONTENT, response.status_code)
self.assertEquals(2, len(response.data)) # 8 jobs in total but response is paginated
def test_list_jobs_no_bbox(self, ):
url = reverse('api:jobs-list')
response = self.client.get(url)
self.assertEquals(status.HTTP_206_PARTIAL_CONTENT, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(response['Link'], '<http://testserver/api/jobs?page=2>; rel="next"')
self.assertEquals(2, len(response.data)) # 8 jobs in total but response is paginated
def test_bbox_search_missing_params(self, ):
url = reverse('api:jobs-list')
param = 'bbox=' # missing params
response = self.client.get('{0}?{1}'.format(url, param))
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals('missing_bbox_parameter', response.data['id'])
def test_bbox_missing_coord(self, ):
url = reverse('api:jobs-list')
extent = (-79.5, -16.16, 7.40) # one missing
param = 'bbox={0},{1},{2}'.format(extent[0], extent[1], extent[2])
response = self.client.get('{0}?{1}'.format(url, param))
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals('missing_bbox_parameter', response.data['id'])
class TestPagination(APITestCase):
pass
class TestExportRunViewSet(APITestCase):
"""
Test cases for ExportRunViewSet
"""
def setUp(self, ):
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='demo@demo.com', password='demo')
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
extents = (-3.9, 16.1, 7.0, 27.6)
bbox = Polygon.from_bbox(extents)
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom)
self.job_uid = str(self.job.uid)
self.run = ExportRun.objects.create(job=self.job, user=self.user)
self.run_uid = str(self.run.uid)
def test_retrieve_run(self, ):
expected = '/api/runs/{0}'.format(self.run_uid)
url = reverse('api:runs-detail', args=[self.run_uid])
self.assertEquals(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
result = response.data
# make sure we get the correct uid back out
self.assertEquals(self.run_uid, result[0].get('uid'))
def test_list_runs(self, ):
expected = '/api/runs'
url = reverse('api:runs-list')
self.assertEquals(expected, url)
query = '{0}?job_uid={1}'.format(url, self.job.uid)
response = self.client.get(query)
self.assertIsNotNone(response)
result = response.data
# make sure we get the correct uid back out
self.assertEquals(1, len(result))
self.assertEquals(self.run_uid, result[0].get('uid'))
class TestExportConfigViewSet(APITestCase):
"""
Test cases for ExportConfigViewSet
"""
def setUp(self, ):
self.path = os.path.dirname(os.path.realpath(__file__))
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='demo@demo.com', password='demo')
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom)
self.uid = self.job.uid
# setup token authentication
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
def test_create_config(self, ):
url = reverse('api:configs-list')
path = os.path.dirname(os.path.realpath(__file__))
f = File(open(path + '/files/Example Transform.sql', 'r'))
name = 'Test Export Config'
response = self.client.post(url, {'name': name, 'upload': f, 'config_type': 'TRANSFORM', 'published': True}, format='multipart')
data = response.data
uid = data['uid']
saved_config = ExportConfig.objects.get(uid=uid)
self.assertIsNotNone(saved_config)
self.assertEquals(name, saved_config.name)
self.assertTrue(saved_config.published)
self.assertEquals('example_transform.sql', saved_config.filename)
self.assertEquals('text/plain', saved_config.content_type)
saved_config.delete()
def test_invalid_config_type(self, ):
url = reverse('api:configs-list')
path = os.path.dirname(os.path.realpath(__file__))
f = open(path + '/files/Example Transform.sql', 'r')
self.assertIsNotNone(f)
response = self.client.post(url, {'upload': f, 'config_type': 'TRANSFORM-WRONG'}, format='multipart')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_invalid_name(self, ):
url = reverse('api:configs-list')
path = os.path.dirname(os.path.realpath(__file__))
f = open(path + '/files/Example Transform.sql', 'r')
self.assertIsNotNone(f)
response = self.client.post(url, {'upload': f, 'config_type': 'TRANSFORM'}, format='multipart')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response.data['name'], ['This field is required.'])
def test_invalid_upload(self, ):
url = reverse('api:configs-list')
response = self.client.post(url, {'upload': '', 'config_type': 'TRANSFORM-WRONG'}, format='multipart')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
@skip('Transform not implemented.')
def test_update_config(self, ):
url = reverse('api:configs-list')
# create an initial config we can then update..
path = os.path.dirname(os.path.realpath(__file__))
f = File(open(path + '/files/Example Transform.sql', 'r'))
name = 'Test Export Config'
response = self.client.post(url, {'name': name, 'upload': f, 'config_type': 'TRANSFORM'}, format='multipart')
data = response.data
saved_uid = data['uid']
saved_config = ExportConfig.objects.get(uid=saved_uid)
# update the config
url = reverse('api:configs-detail', args=[saved_uid])
f = File(open(path + '/files/hdm_presets.xml', 'r'))
updated_name = 'Test Export Config Updated'
response = self.client.put(url, {'name': updated_name, 'upload': f, 'config_type': 'PRESET'}, format='multipart')
data = response.data
updated_uid = data['uid']
self.assertEquals(saved_uid, updated_uid) # check its the same uid
updated_config = ExportConfig.objects.get(uid=updated_uid)
self.assertIsNotNone(updated_config)
self.assertEquals('hdm_presets.xml', updated_config.filename)
self.assertEquals('application/xml', updated_config.content_type)
self.assertEquals('Test Export Config Updated', updated_config.name)
updated_config.delete()
try:
f = File(open(path + '/files/Example Transform.sql', 'r'))
except IOError:
pass # expected.. old file has been deleted during update.
class TestExportTaskViewSet(APITestCase):
"""
Test cases for ExportTaskViewSet
"""
def setUp(self, ):
self.path = os.path.dirname(os.path.realpath(__file__))
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='demo@demo.com', password='demo')
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom)
# setup token authentication
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
self.run = ExportRun.objects.create(job=self.job)
self.celery_uid = str(uuid.uuid4())
self.task = ExportTask.objects.create(run=self.run, name='Shapefile Export',
celery_uid=self.celery_uid, status='SUCCESS')
self.task_uid = str(self.task.uid)
def test_retrieve(self, ):
expected = '/api/tasks/{0}'.format(self.task_uid)
url = reverse('api:tasks-detail', args=[self.task_uid])
self.assertEquals(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEquals(200, response.status_code)
result = json.dumps(response.data)
data = json.loads(result)
# make sure we get the correct uid back out
self.assertEquals(self.task_uid, data[0].get('uid'))
def test_list(self, ):
expected = '/api/tasks'.format(self.task_uid)
url = reverse('api:tasks-list')
self.assertEquals(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEquals(200, response.status_code)
result = json.dumps(response.data)
data = json.loads(result)
# should only be one task in the list
self.assertEquals(1, len(data))
# make sure we get the correct uid back out
self.assertEquals(self.task_uid, data[0].get('uid'))
|
{
"content_hash": "27c53f770dee18a9954d8e56be781509",
"timestamp": "",
"source": "github",
"line_count": 789,
"max_line_length": 143,
"avg_line_length": 44.453738910012675,
"alnum_prop": 0.5811997491018989,
"repo_name": "dodobas/osm-export-tool2",
"id": "383e2ce1c959cfdc4f31a446dc4a6074a2d92eac",
"size": "35098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "119726"
},
{
"name": "HTML",
"bytes": "152701"
},
{
"name": "JavaScript",
"bytes": "4936760"
},
{
"name": "Python",
"bytes": "366592"
},
{
"name": "Shell",
"bytes": "755"
}
],
"symlink_target": ""
}
|
import os
import sys
import subprocess
import binwalk.core.common
import binwalk.core.compat
import binwalk.core.plugin
class JFFS2Exception(Exception):
pass
class JFFS2Entry(object):
def __init__(self, **kwargs):
for (k, v) in binwalk.core.compat.iterator(kwargs):
setattr(self, k, v)
class UnJFFS2(object):
'''
User space JFFS2 extractor; just a simple Python wrapper around the jffs2dump
and jffs2reader utilities from mtd-utils. Not terribly efficient, but works.
'''
def __init__(self, image, directory, verbose=False):
self.image = image
self.verbose = verbose
self.directory = directory
if not os.path.exists(self.image):
raise JFFS2Exception("Invalid/non-existant image: '%s'" % self.image)
try:
os.mkdir(self.directory)
except OSError as e:
raise JFFS2Exception("Failed to create output directory '%s': %s" % (self.directory, str(e)))
def parse_permission(self, perm_txt):
perm = 0
for i in range(0, 3):
if perm_txt[::-1][i] != '-':
perm |= (1 << i)
return perm
def parse_permissions(self, permissions):
ftype = permissions[0]
owner = self.parse_permission(permissions[1:4])
group = self.parse_permission(permissions[4:7])
other = self.parse_permission(permissions[7:10])
perms = (owner * 64) + (group * 8) + other
return (ftype, perms)
def ls(self):
entries = []
# jffs2reader self.image -d / -r
(stdout, stderr) = subprocess.Popen(['jffs2reader', self.image, '-d', '/', '-r'], stdout=subprocess.PIPE).communicate()
# Handle big endian images
if not stdout and not stderr:
subprocess.call(['jffs2dump', '-b', '-e', self.image + '.le', self.image])
self.image += '.le'
# jffs2reader self.image -d / -r
(stdout, stderr) = subprocess.Popen(['jffs2reader', self.image, '-d', '/', '-r'], stdout=subprocess.PIPE).communicate()
for line in binwalk.core.compat.bytes2str(stdout).splitlines():
parts = [x for x in line.split(' ') if x]
parts = parts[:5] + [' '.join(parts[5:])]
uid = int(parts[2])
guid = int(parts[3])
fpath = parts[-1]
symlink = ""
(ftype, permissions) = self.parse_permissions(parts[0])
if ftype == 'l' and '->' in fpath:
(fpath, symlink) = fpath.split('->', 1)
fpath = fpath.strip()
symlink = symlink.strip()
if fpath.startswith(os.path.sep):
fpath = fpath[1:]
if symlink and symlink.startswith(os.path.sep):
symlink = symlink[1:]
entries.append(JFFS2Entry(type=ftype, path=fpath, symlink=symlink, uid=uid, guid=guid, permissions=permissions))
return entries
def extract_entry(self, entry):
outfile = os.path.join(self.directory, entry.path)
if self.verbose:
sys.stderr.write(entry.path + "\n")
if entry.type == 'd':
try:
os.mkdir(outfile)
except OSError as e:
pass
#sys.stderr.write("Failed to create directory '%s': %s\n" % (outfile, str(e)))
elif entry.type == 'l':
try:
os.symlink(entry.symlink, outfile)
except OSError as e:
pass
#sys.stderr.write("Failed to create symlink '%s -> %s': %s\n" % (outfile, entry.symlink, str(e)))
elif entry.type == '-':
# jffs2reader self.image -f entry.path
(stdout, stderr) = subprocess.Popen(['jffs2reader', self.image, '-f', entry.path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if stderr:
pass
#sys.stderr.write("jffs2reader error while reading file '%s': %s\n" % (entry.path, binwalk.core.compat.bytes2str(stderr)))
else:
fp = binwalk.core.common.BlockFile(outfile, "wb")
fp.write(stdout)
fp.close()
# TODO: Add support for special device files
#elif entry.type == 'c':
# pass
else:
#sys.stderr.write("Don't know how to handle file type '%c' for '%s'\n" % (entry.type, entry.path))
return
# Set file user/group owner
try:
os.chown(outfile, entry.uid, entry.guid)
except OSError as e:
pass
# Set file permissions
try:
os.chmod(outfile, entry.permissions)
except OSError as e:
pass
def extract(self):
for entry in self.ls():
self.extract_entry(entry)
class UnJFFS2Plugin(binwalk.core.plugin.Plugin):
'''
Extrator plugin for JFFS2 file systems.
'''
MODULES = ['Signature']
def init(self):
if self.module.extractor.enabled:
self.module.extractor.add_rule(txtrule=None,
regex='^jffs2 filesystem',
extension='jffs2',
cmd=self.extractor)
def extractor(self, fname):
fname = os.path.realpath(fname)
outdir = os.path.join(os.path.dirname(fname), 'jffs2-root')
outdir = binwalk.core.common.unique_file_name(outdir)
try:
UnJFFS2(fname, outdir).extract()
except KeyboardInterrupt as e:
raise e
except Exception as e:
return False
return True
|
{
"content_hash": "45fbb8c76674813f29ef5ec8baf60831",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 138,
"avg_line_length": 33.40462427745665,
"alnum_prop": 0.5367710676587645,
"repo_name": "0x7678/binwalk",
"id": "0f43cdc2d831ca987b24ae804a09276f83f034fb",
"size": "5779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/binwalk/plugins/unjffs2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "255051"
}
],
"symlink_target": ""
}
|
from . import core, utils
import cgt
import ctypes, os.path as osp, hashlib, numpy as np, sys, subprocess, string, os, time, traceback, cPickle
from collections import defaultdict, namedtuple
from StringIO import StringIO
import logging
def function(inputs, outputs, dbg=None, updates=None, givens=None):
assert isinstance(inputs, list), "Inputs must be a list"
assert all(el.is_argument() for el in inputs), "Invalid input: should be a list of Argument nodes"
if isinstance(outputs, list):
assert all(isinstance(el, core.Node) for el in outputs), "Invalid output: should all be symbolic variables"
return _function_listout(inputs, outputs, dbg, updates, givens)
elif isinstance(outputs, core.Node):
f_listout = _function_listout(inputs, [outputs], dbg, updates, givens)
return lambda *args : f_listout(*args)[0]
else:
raise ValueError("Expected `outputs` to be a Node or a list of Nodes. Got an object of type %s"%type(outputs))
def _function_listout(inputs, outputs, dbg = None, updates=None, givens=None):
if isinstance(updates,dict): updates=updates.items()
if updates is None: updates = []
else: assert (isinstance(updates, list) and
all(isinstance(a,tuple) and len(a)==2
and isinstance(a[0], core.Node) and isinstance(a[1], core.Node)
for a in updates)), "updates should be a list of pairs (before, after)"
if givens is None: givens = []
else: assert all(before.is_data() for (before,_) in updates), "lhs of updates must be Data instances"
if dbg: raise core.Todo("debug functionality is broken")
outputs = [cgt.make_tuple(*x) if isinstance(x, tuple) else x for x in outputs]
interp = run_compilation_pipeline(inputs, outputs, updates, givens)
return interp
# ================================================================
# Execution
# ================================================================
def python_only():
return not hasattr(cgt,"cycgt")
def determine_devices(nodes_sorted, updatetarg2src):
# Op definitions (available impls, inplace-ness, etc) define constraints
# on possible devices for a node
if python_only():
return {node:Device() for node in nodes_sorted}
# (1) Get available devices for nodes, determined by which impls are available and node types
compile_info = get_compile_info()
cuda_enabled = compile_info["CGT_ENABLE_CUDA"]
node2dev = {}
home_device = core.Device(devtype="cpu", idx=0)
for node in nodes_sorted:
default_device = node.props.get("default_device", home_device)
if node in updatetarg2src:
device = node2dev[updatetarg2src[node]]
elif node.is_data():
device = node.op.device
elif node.is_argument():
device = home_device
else:
if "native_gpu" in node.op.available_impls and (default_device.devtype == "gpu" or "native_cpu" not in node.op.available_impls):
assert cuda_enabled, "trying to put op on gpu but cuda is disabled"
device = core.Device("gpu", default_device.idx)
else:
device = core.Device(devtype="cpu", idx=default_device.idx)
node2dev[node] = device
return node2dev
def is_tensor(x):
return isinstance(x.typ, core.TensorType)
def is_tuple(x):
return isinstance(x.typ, core.TupleType)
def create_interpreter(inputs, outputs, eg, node2memloc):
assert isinstance(eg, ExecutionGraph)
input_types = [input.typ for input in inputs] #pylint: disable=W0622
output_locs = [node2memloc[node] for node in outputs]
config = cgt.get_config()
backend = config["backend"]
parallel = config["parallel"]
if backend == "python":
if parallel:
raise NotImplementedError("For parallel=True, set backend=native")
# return ParallelInterpreter(eg, output_locs, input_types)
else:
return SequentialInterpreter(eg, output_locs, input_types)
elif backend == "native":
if parallel:
return cgt.cycgt.CppInterpreterWrapper(eg, input_types, output_locs, config["num_threads"])
else:
return cgt.cycgt.CppInterpreterWrapper(eg, input_types, output_locs, 0)
else:
raise NotImplementedError("invalid backend %s"%backend)
def topsorted_shapes_first(outputs, node2shape):
# Almost identical to topsorted(...) function
# But we also need to visit the shape elements of an in-place node
# before visiting that node
marks = {}
out = []
stack = []
for x in outputs:
stack.append((x,0))
while stack:
(i,jidx) = stack.pop()
if jidx == 0:
m = marks.get(i,0)
if m == 0:
marks[i] = 1
elif m == 1:
raise ValueError("not a dag")
else:
continue
ps = i.parents
###### Changed part ######
if i.ndim > 0 and not i.is_input() and i.op.return_type=="byref":
if i in node2shape:
shpels = node2shape[i]
else:
raise core.Unreachable
# shpels = i.op.shp_apply(i.parents)
ps = ps + shpels
elif is_tuple(i):
for arrshp in node2shape[i]:
ps = ps + arrshp
##########################
if jidx == len(ps):
marks[i] = 2
out.append(i)
else:
stack.append((i,jidx+1))
j = ps[jidx]
stack.append((j,0))
return out
def determine_memowner(nodes_sorted, updates, node2dev):
# First determine how many "child" nodes each node has
node2child = defaultdict(list)
for node in nodes_sorted:
for parent in node.parents:
node2child[parent].append(node)
# Now traverse graph again and see where we can use the same memory
node2memowner = {} # mapping node x -> the node that owns its memory
# For updates, memlocation(RHS) = memlocation(LHS)
after2before = {after:before for (before,after) in updates}
enable_inplace_opt = core.get_config()["enable_inplace_opt"]
for node in nodes_sorted:
base = node # by default,
if node.is_argument():
pass
elif node.op.writes_to_input >= 0:
base = node2memowner[node.parents[node.op.writes_to_input]]
elif node in after2before:
base = after2before[node]
elif enable_inplace_opt and node.op.return_type == "byref": # TODO think about if we need any other conditions
nodeshape = node.op.shp_apply(node.parents)
for parent in node.parents:
if (len(node2child[parent])==1
and nodeshape==cgt.shape(parent) # XXX not a very robust way to check
and node.dtype == parent.dtype
and _is_data_mutable(parent)):
base = parent
break
# TODO: add optimization for in-place incrementing
node2memowner[node] = base
return node2memowner
class MemCounter(object):
"""
returns `MemLocation`s with indices 0,1,...
`count` member indicates how many have been returned thus far
"""
def __init__(self):
self.count=0
def new_memloc(self, devtype):
out = MemLocation(self.count, devtype)
self.count += 1
return out
def create_execution_graph(inputs, nodes_sorted, node2shape, node2memowner, node2dev):
# node2impltype = copy.copy(node2impltype) # we'll insert transport ops
instrs = []
counter = MemCounter()
node2memloc = {}
for node in nodes_sorted:
if node not in node2dev: node2dev[node] = core.Device(devtype="cpu",idx=node2dev[node.parents[0]].idx if len(node.parents)>0 else 0)
if node.is_argument():
write_loc = counter.new_memloc(node2dev[node].devtype)
node2memloc[node] = write_loc
i = inputs.index(node)
instrs.append(LoadArgument(i, write_loc))
else:
read_locs = [node2memloc[parent] for parent in node.parents]
if node.op.return_type == "byref":
if node2memowner[node] is node:
if is_tensor(node): # just make one memory location for output
nodeshape = node2shape[node] if node.ndim > 0 else []
shape_locs = [node2memloc[shpel] for shpel in nodeshape]
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(Alloc(node.dtype, shape_locs, write_loc))
else: # if it's a tuple, we need to allocate all of the components, then build tuple
nodeshape = node2shape[node]
assert isinstance(nodeshape, tuple)
arr_locs = []
for (arrshp, arrtyp) in utils.safezip(nodeshape, node.typ):
arr_loc = counter.new_memloc(node2dev[node].devtype)
shape_locs = [node2memloc[shpel] for shpel in arrshp]
instrs.append(Alloc(arrtyp.dtype, shape_locs, arr_loc))
arr_locs.append(arr_loc)
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(BuildTup(node.typ, arr_locs, write_loc))
else:
# If this node writes to another node's memory, the devices must be the same
# this should have been enforced in determine_devices()
assert node2dev[node] == node2dev[node2memowner[node]]
write_loc = node2memloc[node2memowner[node]]
instrs.append(ReturnByRef(node.op, [par.typ for par in node.parents], read_locs, write_loc, node_props=node.props))
else:
assert node.op.return_type == "byval"
write_loc = counter.new_memloc(node2dev[node].devtype)
instrs.append(ReturnByVal(node.op, [par.typ for par in node.parents], read_locs, write_loc, node_props=node.props))
node2memloc[node] = write_loc
return ExecutionGraph(instrs, len(inputs), counter.count), node2memloc
def get_callable(op, input_types, devtype, prefer_python=False):
assert op.available_impls, "need to set op.available_impls"
config = core.get_config()
if (prefer_python or config["force_python_impl"]) and "python" in op.available_impls:
return op.get_py_callable(input_types)
elif config["backend"] == "python":
if "python" in op.available_impls:
return op.get_py_callable(input_types)
else:
assert devtype=="cpu", "can't use devtype=gpu with python backend"
if "native_cpu" in op.available_impls:
return get_native_callable(op, input_types, "cpu")
else:
raise RuntimeError("Can't find an implementation of %s suitable for python backend. Just have available_impls=%s"%(op,op.available_impls))
else: # backend = native
if devtype == "cpu":
if "native_cpu" in op.available_impls:
return get_native_callable(op, input_types, "cpu")
else:
print "using python impl for",op
return op.get_py_callable(input_types)
else:
if "native_gpu" in op.available_impls:
return get_native_callable(op, input_types, "gpu")
else:
raise RuntimeError("Tried to put Op %s on the GPU but I only have a python impl :("%op)
def get_native_callable(op, input_types, devtype):
nci = op.get_native_compile_info(input_types, devtype)
nci.op_str = str(op)
nci.return_type = op.return_type
nci.n_in = len(input_types)
return nci2callable(nci)
def add_transports(nodelist, node2dev, node2shape):
node2child = defaultdict(list)
for node in nodelist:
for par in node.parents:
node2child[par].append(node)
# XXX look at native compilation info, gpu deref mask
for node in nodelist:
dev = node2dev[node]
dev2copy = {}
for child in node2child[node]:
childdev = node2dev[child]
if not childdev == dev:
if childdev not in dev2copy:
nodecopy = core.Result(core.Transport(childdev), [node])
node2dev[nodecopy] = childdev
dev2copy[childdev] = nodecopy
node2shape[nodecopy] = node2shape[node]
replace_parents(child, node, dev2copy[childdev])
def replace_parents(node, before, after):
for (i,p) in enumerate(node.parents):
if p is before:
node.parents[i] = after
def run_compilation_pipeline(inputs, outputs, updates, givens):
"""
Compiles the expression graph into an execution graph.
"""
config = core.get_config()
# Phase 1: simplification and analysis of expression graph
# ------------------------------------------------------
# Add add update targets to outputs
outputs_updatetargs = outputs + [after for (_before, after) in updates]
if givens: outputs_updatetargs = core.clone(outputs_updatetargs, dict(givens))
# Do simplification + analysis pass on expression graph
outputs_updatetargs_simple, analysis, _ = \
core.simplify_and_analyze(outputs_updatetargs) if config["enable_simplification"] \
else (outputs_updatetargs, core.analyze(outputs_updatetargs), {})
# Phase 2: device targeting
# ------------------------------------------------------
outputs_updatetargs_simple = cgt.core.clone(outputs_updatetargs_simple)
analysis = core.analyze(outputs_updatetargs_simple)
# XXX inefficient to just copy the graph and redo analysis
nodelist = core.topsorted(outputs_updatetargs_simple)
updatesrcs = [before for (before, _) in updates]
updatetargs_simple = outputs_updatetargs_simple[len(outputs):]
node2dev = determine_devices(nodelist, {targ:src for (src,targ) in zip(updatesrcs, updatetargs_simple)})
add_transports(nodelist, node2dev, analysis["node2shape"])
# Phase 3: build execution graph
# ------------------------------------------------------
# Sort nodes so that shape elements appear before a given node
nodes_sorted = topsorted_shapes_first(outputs_updatetargs_simple, analysis["node2shape"]) # XXX don't need shapes for byval ops
# For each node, figure out if its output should be written to a previous node's memory
# (memowner : "memory owner")
updatetargs_simple = outputs_updatetargs_simple[len(outputs):]
node2memowner = determine_memowner(nodes_sorted, zip(updatesrcs, updatetargs_simple), node2dev)
# Find the outputs we want to return
outputs_simple = outputs_updatetargs_simple[:len(outputs)] # get rid
# Generate execution graph
eg, node2memloc = create_execution_graph(
inputs, nodes_sorted, analysis["node2shape"], node2memowner, node2dev)
# print execution graph
if config["verbose"]:
print 'begin'
print '\n'.join(str(i)+'.) \t'+repr(instr) for (i,instr) in enumerate(eg.instrs))
print 'end'
# Phase 3: create C or Python interpreter for graph
# ------------------------------------------------------
interp = create_interpreter(inputs, outputs_simple, eg, node2memloc)
# Done!
return interp
# ================================================================
# Simple numeric eval via traversal
# ================================================================
def numeric_eval(output, arg2val):
"""
Numerically evaluates symbolic variable without any compilation,
by associating each argument with a value (via `arg2val`) and traversing the
computation graph
Inputs
------
output: symbolic variable or list of variables we would like to evaluate
arg2val: dictionary assigning each argument that output depends on to a numerical value
Returns
-------
Numeric value or list of numeric values of variables corresponding to output
"""
if isinstance(output, list):
assert all(isinstance(x, core.Node) for x in output), "expected a list of Nodes"
return _numeric_eval_listout(output, arg2val)
elif isinstance(output, core.Node):
return _numeric_eval_listout([output], arg2val)[0]
else:
raise ValueError("expected `output` to be a Node or a list of Nodes. Got an object of type %s"%type(output))
def _numeric_eval_listout(outputs, arg2val):
"""
Evaluate outputs numerically. arg2val is a dictionary mapping arguments to numerical values
"""
assert isinstance(outputs, list)
assert isinstance(arg2val, dict)
nodes = list(core.topsorted(outputs))
node2val = {}
for node in nodes:
if node.is_argument():
node2val[node] = core.as_valid_array(arg2val[node])
elif node.is_data():
node2val[node] = node.op.get_value()
else:
parentvals = [node2val[par] for par in node.parents]
node2val[node] = core.py_numeric_apply(node, parentvals)
# assert node.get_ndim() == np.array(node2val[node]).ndim
numeric_outputs = [node2val[node] for node in outputs]
return numeric_outputs
################################################################
### Execution graph
################################################################
MemInfo = namedtuple("MemInfo",["loc","access"])
MEM_OVERWRITE = "overwrite"
MEM_INCREMENT = "increment"
class ExecutionGraph(object):
def __init__(self, instrs, n_args, n_locs):
self.instrs = instrs
self.n_args = n_args
self.n_locs = n_locs
class MemLocation(object):
def __init__(self, idx, devtype):
assert isinstance(idx, int) and devtype in ["cpu", "gpu"]
self.index = idx
self.devtype = devtype
# TODO: dtype
def __repr__(self):
return "%%%i/%s" % (self.index, self.devtype)
# ================================================================
# Instructions
# ================================================================
class Instr(object):
def fire(self, interp):
raise NotImplementedError
class LoadArgument(Instr):
def __init__(self, ind, write_loc):
self.ind = ind
self.write_loc = write_loc
def fire(self, interp):
interp.set(self.write_loc, interp.getarg(self.ind))
def __repr__(self):
return "%s = LoadArg ind:%i" % (self.write_loc, self.ind)
class Alloc(Instr):
def __init__(self, dtype, read_locs, write_loc):
self.dtype = dtype
self.read_locs = read_locs
self.write_loc = write_loc
def fire(self, interp):
shp = tuple(interp.get(mem) for mem in self.read_locs)
prevarr = interp.get(self.write_loc)
if prevarr is None or prevarr.shape != shp:
interp.set(self.write_loc, np.ones(shp, self.dtype))
def __repr__(self):
return "%s = Alloc shp:%s dtype:%s" % (self.write_loc, str(self.read_locs), self.dtype)
class BuildTup(Instr):
def __init__(self, typ, read_locs, write_loc):
self.typ = typ
self.read_locs = read_locs
self.write_loc = write_loc
def fire(self, interp):
interp.set(self.write_loc, tuple(interp.get(loc) for loc in self.read_locs))
def __repr__(self):
return "%s = BuildTup args:%s" % (self.write_loc, str(self.read_locs))
class ReturnByRef(Instr):
def __init__(self, op, input_types, read_locs, write_loc, node_props=None):
self.op = op
self.input_types = input_types
self.read_locs = read_locs
self.write_loc = write_loc
self._callable = None
self.node_props=node_props
def fire(self, interp):
if self._callable is None: self._callable = self.get_callable()
self._callable.call(
[interp.get(mem) for mem in self.read_locs],
interp.get(self.write_loc))
def __repr__(self):
return "%s = ReturnByRef op:%s args:%s" % (self.write_loc, str(self.op), str(self.read_locs))
def get_callable(self):
return get_callable(self.op, self.input_types, self.write_loc.devtype)
class ReturnByVal(Instr):
def __init__(self, op, input_types, read_locs, write_loc, node_props=None):
self.op = op
self.input_types = input_types
self.read_locs = read_locs
self.write_loc = write_loc
self._callable = None
self.node_props=node_props
def fire(self, interp):
if self._callable is None: self._callable = self.get_callable()
interp.set(self.write_loc, self._callable.call([interp.get(mem) for mem in self.read_locs]))
def get_callable(self):
return get_callable(self.op, self.input_types, self.write_loc.devtype)
def __repr__(self):
return "%s = ReturnByVal op:%s args:%s" % (self.write_loc, str(self.op), str(self.read_locs))
# ================================================================
# Compiling native code
# ================================================================
def nci2callable(nci):
template_code = gen_templated_code(nci.includes, nci.closure_triples, nci.func_code)
compile_info = get_compile_info()
prefix = utils.hash_seq1(template_code, compile_info["CPP_FLAGS"], *(src.code for src in nci.extra_srcs))
d = dict(function=_funcname(prefix), closure=_closurename(prefix),setup=_setupname(prefix),teardown=_teardownname(prefix))
fn_srcfile = core.SrcFile("c++",string.Template(template_code).substitute(d))
srcfiles = [fn_srcfile]
srcfiles.extend(core.SrcFile(sf.lang, string.Template(sf.code).substitute(d)) for sf in nci.extra_srcs)
CACHE_ROOT = compile_info["CACHE_ROOT"]
libpath = osp.join(CACHE_ROOT, prefix+".so")
if not osp.exists(libpath):
tu = TranslationUnit(srcfiles, nci.link_flags)
tu.compile(prefix, libpath)
lib = get_or_load_lib(libpath)
fptr = getattr(lib, _funcname(prefix))
setup_fptr = getattr(lib, _setupname(prefix)) if nci.setup else None
teardown_fptr = getattr(lib, _teardownname(prefix)) if nci.teardown else None
cldata = _build_closure(nci.closure_triples)
return core.NativeCallable(nci.n_in, nci.return_type, nci.op_str, fptr, cldata=cldata, setup_fptr=setup_fptr, teardown_fptr=teardown_fptr,
store_objects=nci.store_objects)
def _funcname(prefix):
return "call_"+prefix
def _setupname(prefix):
return "setup_"+prefix
def _teardownname(prefix):
return "teardown_"+prefix
def _closurename(prefix):
return "closure_"+prefix
def gen_templated_code(includes, closure_info, func_code):
s = StringIO()
includes = ["cgt_common.h"] + includes
for fname in includes:
s.write('#include "%s"\n'%fname)
gen_struct_code(closure_info, s)
s.write(func_code)
return s.getvalue()
def gen_struct_code(triples, outstream):
if triples is None:
return
outstream.write("typedef struct $closure {\n")
for (fieldname,fieldtype,_val) in triples:
outstream.write(_ctypes2str[fieldtype])
outstream.write(" ")
outstream.write(fieldname)
outstream.write(";\n")
outstream.write("} $closure;\n")
_LIBRARIES = {}
def get_or_load_lib(libname):
if libname in _LIBRARIES:
return _LIBRARIES[libname]
else:
out = ctypes.cdll.LoadLibrary(libname)
_LIBRARIES[libname] = out
return out
class TranslationUnit(object):
"""All the input that goes into building a native binary for one or more ops"""
def __init__(self, srcfiles, link_flags):
self.srcfiles = srcfiles
self.link_flags = link_flags
def compile(self, prefix, libpath):
"""
Compiles all of the files, places them in the cache directory
Then links them creating prefix.so
"""
CACHE_ROOT = get_compile_info()["CACHE_ROOT"]
cmds = ["cd %s"%CACHE_ROOT]
objs = []
for (i,(lang,code)) in enumerate(self.srcfiles):
if lang=="c++":
srcpath = osp.join(CACHE_ROOT, prefix+"_%i.cpp"%i)
cmds.append(_make_cpp_compile_cmd(srcpath))
elif lang=="cuda":
srcpath = osp.join(CACHE_ROOT, prefix+"_%i.cu"%i)
cmds.append(_make_cuda_compile_cmd(srcpath))
else:
raise NotImplementedError
with open(srcpath,"w") as fh: fh.write(code)
objs.append(srcpath+".o")
cmds.append(_make_link_cmd(objs, self.link_flags, libpath))
bigcmd = " && ".join(cmds)
call_and_print(bigcmd)
_COMPILE_CONFIG = None
def get_compile_info():
global _COMPILE_CONFIG
if _COMPILE_CONFIG is None:
config = core.get_config()
CGT_BUILD_ROOT = cgt.cycgt.cgt_build_root() #pylint: disable=E1101
cmake_info = {}
with open(osp.join(CGT_BUILD_ROOT,"build_info.txt")) as fh:
lines = fh.readlines()
for line in lines:
if ":=" not in line: print "skipping",line
lhs,rhs = line.split(":=")
lhs = lhs.strip()
rhs = rhs.strip()
cmake_info[lhs] = rhs
CUDA_ROOT = cmake_info["CUDA_ROOT"]
CGT_ENABLE_CUDA = cmake_info["CGT_ENABLE_CUDA"] in ["1","ON"]
CGT_ENABLE_CUDNN = cmake_info["CGT_ENABLE_CUDNN"] in ["1","ON"]
DEFINITIONS = "-DENABLE_CUDA" if CGT_ENABLE_CUDA else ""
CUDNN_ROOT = cmake_info["CUDNN_ROOT"]
_COMPILE_CONFIG = dict(
OPENBLAS_INCLUDE_DIR = osp.join(CGT_BUILD_ROOT,"OpenBLAS"),
CGT_INCLUDE_DIR = cmake_info["CGT_INCLUDE_DIR"],
CGT_LIBRARY_DIR = osp.join(CGT_BUILD_ROOT,"lib"),
CUDA_LIBRARY_DIR = osp.join(CUDA_ROOT,"lib"),
CUDA_INCLUDE_DIR = osp.join(CUDA_ROOT,"include"),
CUDA_LIBRARIES = cmake_info["CUDA_LIBRARIES"],
DEFINITIONS = DEFINITIONS,
CUDA_ROOT = CUDA_ROOT,
CUDNN_ROOT = CUDNN_ROOT,
CACHE_ROOT = osp.expanduser(config["cache_dir"]),
CGT_ENABLE_CUDA = CGT_ENABLE_CUDA,
CGT_ENABLE_CUDNN = CGT_ENABLE_CUDNN,
# CGT_LIBRARY = cmake_info["CGT_LIBRARY"],
)
includes = "-I"+_COMPILE_CONFIG["CGT_INCLUDE_DIR"]
includes += " -I"+_COMPILE_CONFIG["OPENBLAS_INCLUDE_DIR"]
link_flags = ""
if _COMPILE_CONFIG["CGT_ENABLE_CUDA"]: includes += " -I"+_COMPILE_CONFIG["CUDA_INCLUDE_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDNN"]: includes += " -I"+_COMPILE_CONFIG["CUDNN_ROOT"]
_COMPILE_CONFIG["INCLUDES"] = includes
link_flags = "-lcgt -L"+_COMPILE_CONFIG["CGT_LIBRARY_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDA"]: link_flags += " -L"+_COMPILE_CONFIG["CUDA_LIBRARY_DIR"]
if _COMPILE_CONFIG["CGT_ENABLE_CUDNN"]:
link_flags += " -L"+_COMPILE_CONFIG["CUDNN_ROOT"]
link_flags += " -Wl,-rpath,"+_COMPILE_CONFIG["CUDNN_ROOT"]
if sys.platform == "darwin":
link_flags += " -dynamiclib -Wl,-headerpad_max_install_names"
else:
link_flags += " -shared -rdynamic"
_COMPILE_CONFIG["LINK_FLAGS"] = link_flags
cpp_flags = "-fvisibility=hidden -std=c++11 -fPIC" + (" -O0 -g" if config["debug_cpp"] else " -O3 -DNDEBUG")
if sys.platform == "darwin": cpp_flags += " -stdlib=libc++"
_COMPILE_CONFIG["CPP_FLAGS"] = cpp_flags
CACHE_ROOT = _COMPILE_CONFIG["CACHE_ROOT"]
if not osp.exists(CACHE_ROOT):
os.makedirs(CACHE_ROOT)
return _COMPILE_CONFIG
def _make_cpp_compile_cmd(srcpath):
d = get_compile_info()
return "c++ %(cpp_flags)s %(srcpath)s -c -o %(srcpath)s.o %(includes)s %(definitions)s"%dict(
srcpath = srcpath, includes=d["INCLUDES"], definitions=d["DEFINITIONS"],
cpp_flags=d["CPP_FLAGS"], cacheroot=d["CACHE_ROOT"])
def _make_cuda_compile_cmd(srcpath):
d = get_compile_info()
return "nvcc %(srcpath)s -c -o %(srcpath)s.o -ccbin cc -m64 -Xcompiler -fPIC -Xcompiler -O3 -Xcompiler -arch -Xcompiler x86_64 %(includes)s %(definitions)s"%dict(
srcpath = srcpath, includes=d["INCLUDES"], definitions=d["DEFINITIONS"])
def _make_link_cmd(objs, extra_link_flags, libpath):
d = get_compile_info()
iname = "-install_name %s"%osp.basename(libpath) if sys.platform=="darwin" else ""
return r"c++ %(cpp_flags)s %(objnames)s %(link_flags)s %(iname)s -o %(libpath)s"%dict(
objnames=" ".join(objs), includes=d["INCLUDES"], cpp_flags=d["CPP_FLAGS"], libpath=libpath,
link_flags=d["LINK_FLAGS"]+" "+extra_link_flags, cacheroot=d["CACHE_ROOT"], iname=iname)
def call_and_print(cmd):
print "\x1b[32m%s\x1b[0m"%cmd
subprocess.check_call(cmd,shell=True)
_ctypes2str = {
ctypes.c_byte : "uint8_t",
ctypes.c_bool : "bool",
ctypes.c_char : "char",
ctypes.c_int : "int",
ctypes.c_long : "long",
ctypes.c_void_p : "void*",
ctypes.c_double : "double",
ctypes.c_float : "float"
}
_struct_cache = {} # because creating ctypes.Structure class is slow for some reason
def _build_closure(triples):
if triples is None:
return ctypes.c_void_p(0)
vals = []
fields = []
for (fieldname,fieldtype,val) in triples:
vals.append(val)
fields.append((fieldname,fieldtype))
try:
key = cPickle.dumps(fields)
S = _struct_cache[key]
except KeyError:
class S(ctypes.Structure):
_fields_ = fields
_struct_cache[key] = S
closure = S(*vals)
return closure
################################################################
### Interpreters
################################################################
class Interpreter(object):
def __call__(self, args):
raise NotImplementedError
def get(self, mem):
raise NotImplementedError
def set(self, mem, val):
raise NotImplementedError
def getarg(self, i):
raise NotImplementedError
class SequentialInterpreter(Interpreter):
"""
Runs an execution graph
"""
def __init__(self, eg, output_locs, input_types, copy_outputs=True):
self.eg = eg
self.input_types = input_types
self.output_locs = output_locs
self.storage = [None for _ in xrange(self.eg.n_locs)]
self.args = None
self.copy_outputs = copy_outputs
def __call__(self, *args):
assert len(args) == len(self.input_types), "Wrong number of inputs provided"
self.args = tuple(core.as_valid_array(arg, intype) for (arg, intype) in zip(args, self.input_types))
for instr in self.eg.instrs:
if profiler.on: tstart = time.time()
try:
instr.fire(self)
except Exception as e:
traceback.print_exc()
if isinstance(instr, (ReturnByRef,ReturnByVal)):
if core.get_config()["debug"]:
assert "stack" in instr.node_props
utils.colorprint(utils.Color.MAGENTA, "HERE'S THE STACK WHEN THE OFFENDING NODE WAS CREATED\n",o=sys.stderr)
print>>sys.stderr, ">>>>>>>>>>>>>>>>>>>>>>>>>>"
traceback.print_list(instr.node_props["stack"])
print>>sys.stderr, "<<<<<<<<<<<<<<<<<<<<<<<<<<"
raise e
else:
utils.error("Didn't save the stack so I can't give you a nice traceback :(. Try running with CGT_FLAGS=debug=True")
raise e
else:
utils.error("Oy vey, an exception occurred in a %s Instruction. I don't know how to help you debug this one right now :(."%type(instr))
raise e
if profiler.on: profiler.update(instr, time.time()-tstart)
outputs = [self.get(loc) for loc in self.output_locs]
if self.copy_outputs: outputs = map(_copy, outputs)
return outputs
# need to copy because otherwise we might mess up the data when we call func again
# todo: add option that prevents this behavior
def get(self, mem):
return self.storage[mem.index]
def set(self, mem, val):
self.storage[mem.index] = val
def getarg(self, i):
return self.args[i]
# ================================================================
# Profiler
# ================================================================
class _Profiler(object):
"""
Profiler for Python backend, i.e. Interpreter
"""
def __init__(self):
self.instr2stats = {}
self.on = False
self.t_total = 0.0
def start(self): self.on = True
def stop(self): self.on = False
def update(self, instr, elapsed):
(prevcount, prevtime) = self.instr2stats.get(instr, (0,0.0))
self.instr2stats[instr] = (prevcount+1, prevtime+elapsed)
self.t_total += elapsed
def print_stats(self):
op2stats = {}
# Collapse by Op, rather than instruction
for (instr,(count,t)) in self.instr2stats.iteritems():
if isinstance(instr, (ReturnByRef, ReturnByVal)):
opkey = str(instr.op)
elif isinstance(instr, Alloc):
opkey = "Alloc{dtype=%s,ndim=%i}"%(instr.dtype, len(instr.read_locs))
else:
opkey = instr.__class__.__name__
(prevcount, prevtime) = op2stats.get(opkey, (0, 0.0))
op2stats[opkey] = (prevcount+count, prevtime+t)
print "Total time elapsed: %.3g seconds"%self.t_total
# _print_heading("By instruction")
# _print_stats(self.instr2stats, self.t_total)
_print_heading("By Op")
_print_stats(op2stats, self.t_total)
def clear_stats(self):
self.instr2stats = {}
self.t_total = 0.0
profiler = _Profiler()
def _print_heading(heading):
heading = " " + heading + " "
width = 60
assert len(heading) < width-10
print
print "*"*width
padleft = (width-len(heading))//2
padright = width-len(heading)-padleft
print "*"*padleft + heading + "*"*padright
print "*"*width
def _print_stats(key2stats, t_total):
rows = []
for (key, (count,t)) in key2stats.iteritems():
rows.append([str(key), count, t, t/t_total])
rows = sorted(rows, key=lambda row: row[2], reverse=True)
cumsum = 0
for row in rows:
cumsum += row[3]
row.append(cumsum)
from thirdparty.tabulate import tabulate
print tabulate(rows, headers=["Instruction","Count","Time","Frac","Frac cumsum"])
def _copy(x):
if isinstance(x, np.ndarray): return x.copy()
elif isinstance(x, tuple): return tuple(el.copy() for el in x)
elif np.isscalar(x): return x # xxx is this case ok?
else: raise NotImplementedError
def typecheck_args(numargs, types):
assert len(numargs)==len(types), "wrong number of arguments. got %i, expected %i"%(len(numargs),len(types))
for (numarg,typ) in zip(numargs,types):
if isinstance(typ, core.TensorType):
assert numarg.dtype==typ.dtype and numarg.ndim==typ.ndim
# ================================================================
# Utils
# ================================================================
def _list_to_json(xs):
return [x.to_json() for x in xs]
def _is_data_mutable(node):
return not node.is_input() and not isinstance(node.op, core.Constant)
|
{
"content_hash": "b8c60f03373571c8cbdfa906e2fea2ab",
"timestamp": "",
"source": "github",
"line_count": 894,
"max_line_length": 167,
"avg_line_length": 40.22371364653244,
"alnum_prop": 0.5846218020022247,
"repo_name": "FighterLYL/cgt",
"id": "aef0647deb74cc8ccc6bfd6606bb0d3e14c3a901",
"size": "35960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cgt/compilation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2313"
},
{
"name": "C++",
"bytes": "49432"
},
{
"name": "CMake",
"bytes": "21519"
},
{
"name": "Cuda",
"bytes": "5004"
},
{
"name": "Python",
"bytes": "295631"
}
],
"symlink_target": ""
}
|
from scipy.stats import scoreatpercentile
def percentile(tosort, percentile=95):
return scoreatpercentile(tosort, percentile)
|
{
"content_hash": "bda8cf33d950a1db787c2d3446dc8000",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 55,
"avg_line_length": 34.5,
"alnum_prop": 0.7753623188405797,
"repo_name": "xchewtoyx/pct95-bench",
"id": "7c5f831db50ea7a5792a6b4915dec9b3748c48f7",
"size": "202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy95.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3671"
},
{
"name": "Python",
"bytes": "3764"
}
],
"symlink_target": ""
}
|
'''
pyssdb
~~~~~~~
A SSDB Client Library for Python.
:copyright: (c) 2013-2017 by Yue Du.
:license: BSD 2-clause License, see LICENSE for more details.
'''
from __future__ import print_function
import os
import sys
import socket
import functools
import itertools
__version__ = '0.4.2'
__author__ = 'Yue Du <ifduyue@gmail.com>'
__url__ = 'https://github.com/ifduyue/pyssdb'
__license__ = 'BSD 2-Clause License'
PY3 = sys.version_info >= (3,)
if PY3:
unicode = str
from itertools import zip_longest
else:
from itertools import izip_longest as zip_longest
def utf8(s):
s = str(s) if isinstance(s, int) else s
return s.encode('utf8') if isinstance(s, unicode) else s
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
"Copy From itertools Recipes"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
class error(Exception):
def __init__(self, reason, *args):
super(error, self).__init__(reason, *args)
self.reason = reason
self.message = ' '.join(args)
class Connection(object):
def __init__(self, host='127.0.0.1', port=8888, socket_timeout=None):
self.pid = os.getpid()
self.host = host
self.port = port
self.socket_timeout = socket_timeout
self._sock = None
self._fp = None
def connect(self):
if self._sock:
return
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.socket_timeout)
sock.connect((self.host, self.port))
self._sock = sock
self._fp = sock.makefile('rb')
except socket.error:
raise
def disconnect(self):
if self._sock is None:
return
try:
self._sock.close()
except socket.error:
pass
self._sock = self._fp = None
close = disconnect
def reconnect(self):
self.disconnect()
self.connect()
def send(self, cmd, *args):
if cmd == 'delete':
cmd = 'del'
self.last_cmd = cmd
if self._sock is None:
self.connect()
args = [utf8(cmd)] + [utf8(i) for i in args]
buf = utf8('').join(utf8('%d\n%s\n') % (len(i), i) for i in args) + utf8('\n')
self._sock.sendall(buf)
def recv(self):
cmd = self.last_cmd
ret = []
while True:
line = self._fp.readline().rstrip(utf8('\n'))
if not line:
break
data = self._fp.read(int(line))
self._fp.read(1) # discard '\n'
ret.append(data)
status, ret = ret[0], ret[1:]
st = status.decode('utf8')
if st == 'not_found':
return None
elif st == 'ok':
if cmd.endswith('keys') or cmd.endswith('hgetall') or cmd.endswith('list') or \
cmd.endswith('scan') or cmd.endswith('range') or \
(cmd.startswith('multi_') and cmd.endswith('get')) or \
cmd.endswith('getall'):
return ret
elif cmd == 'info':
return ret[1:]
elif len(ret) == 1:
if cmd.endswith('set') or cmd.endswith('del') or \
cmd.endswith('incr') or cmd.endswith('decr') or \
cmd.endswith('size') or cmd.endswith('rank') or \
cmd in ('setx', 'zget', 'qtrim_front', 'qtrim_back'):
return int(ret[0])
else:
return ret[0]
elif not ret:
return True
else:
return ret
raise error(status, *ret)
class ConnectionPool(object):
def __init__(self, connection_class=Connection, max_connections=1048576,
**connection_kwargs):
self.pid = os.getpid()
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
self.idle_connections = []
self.active_connections = set()
def checkpid(self):
if self.pid != os.getpid():
self.disconnect()
self.__init__(self.connection_class, self.max_connections,
**self.connection_kwargs)
def get_connection(self):
self.checkpid()
try:
connection = self.idle_connections.pop()
except IndexError:
connection = self.new_connection()
self.active_connections.add(connection)
return connection
def new_connection(self):
count = len(self.active_connections) + len(self.idle_connections)
if count > self.max_connections:
raise error("Too many connections")
return self.connection_class(**self.connection_kwargs)
def release(self, connection, error=False):
self.checkpid()
if connection.pid == self.pid:
if not error:
self.active_connections.remove(connection)
self.idle_connections.append(connection)
else:
connection.close()
self.active_connections.remove(connection)
def disconnect(self):
acs, self.active_connections = self.active_connections, set()
ics, self.idle_connections = self.idle_connections, []
for connection in itertools.chain(acs, ics):
connection.disconnect()
close = disconnect
def command_post_processing(func):
@functools.wraps(func)
def wrapper(self, cmd, *args):
data = func(self, cmd, *args)
if 'info' == cmd:
return dict(grouper(data, 2, None))
else:
return data
return wrapper
class Client(object):
def __init__(self, host='127.0.0.1', port=8888, connection_pool=None,
socket_timeout=None, max_connections=1048576):
if not connection_pool:
connection_pool = ConnectionPool(host=host, port=port,
socket_timeout=socket_timeout,
max_connections=max_connections)
self.connection_pool = connection_pool
connection = self.connection_pool.new_connection()
connection.connect()
self.connection_pool.idle_connections.append(connection)
@command_post_processing
def execute_command(self, cmd, *args):
connection = self.connection_pool.get_connection()
try:
connection.send(cmd, *args)
data = connection.recv()
except Exception as e:
self.connection_pool.release(connection, error=True)
raise e
else:
self.connection_pool.release(connection)
return data
def disconnect(self):
self.connection_pool.disconnect()
close = disconnect
def __getattr__(self, cmd):
if cmd not in self.__dict__:
self.__dict__[cmd] = functools.partial(self.execute_command, cmd)
return self.__dict__[cmd]
if __name__ == '__main__':
c = Client()
print(c.set('key', 'value'))
print(c.get('key'))
import string
for i in string.ascii_letters:
c.incr(i)
print(c.keys('a', 'z', 1))
print(c.keys('a', 'z', 10))
print(c.get('z'))
print(c.get('a'))
print(c.set('中文', '你好'))
print(c.get('中文'))
print(c.info())
c.disconnect()
|
{
"content_hash": "a46a375aae89f9dc079ea5dec56895d8",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 91,
"avg_line_length": 29.546875,
"alnum_prop": 0.5519566367001586,
"repo_name": "ifduyue/pyssdb",
"id": "e69cd0f8f7369dbf1d7836a3e5231abbcff7e8cd",
"size": "7622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyssdb.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9712"
}
],
"symlink_target": ""
}
|
from mitmproxy.addons import check_alpn
from mitmproxy.test import taddons
from ...conftest import requires_alpn
class TestCheckALPN:
@requires_alpn
def test_check_alpn(self):
msg = 'ALPN support missing'
with taddons.context() as tctx:
a = check_alpn.CheckALPN()
tctx.configure(a)
assert not tctx.master.has_log(msg)
def test_check_no_alpn(self, disable_alpn):
msg = 'ALPN support missing'
with taddons.context() as tctx:
a = check_alpn.CheckALPN()
tctx.configure(a)
assert tctx.master.has_log(msg)
|
{
"content_hash": "a5c7851209ea7d8948c7b06450fff399",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 47,
"avg_line_length": 27.043478260869566,
"alnum_prop": 0.6205787781350482,
"repo_name": "xaxa89/mitmproxy",
"id": "2b1d60587528cd09215a67a7d85af4ed39d187ea",
"size": "622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/mitmproxy/addons/test_check_alpn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17714"
},
{
"name": "HTML",
"bytes": "4270"
},
{
"name": "JavaScript",
"bytes": "150625"
},
{
"name": "PowerShell",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1535155"
},
{
"name": "Shell",
"bytes": "3660"
}
],
"symlink_target": ""
}
|
from eventlet import patcher
from eventlet.green import zmq
from eventlet.hubs import _threadlocal
from eventlet.hubs.hub import BaseHub, READ, WRITE, noop
from eventlet.support import clear_sys_exc_info
import sys
time = patcher.original('time')
select = patcher.original('select')
sleep = time.sleep
EXC_MASK = zmq.POLLERR
READ_MASK = zmq.POLLIN
WRITE_MASK = zmq.POLLOUT
class Hub(BaseHub):
def __init__(self, clock=time.time):
BaseHub.__init__(self, clock)
self.poll = zmq.Poller()
def get_context(self, io_threads=1):
"""zmq's Context must be unique within a hub
The zeromq API documentation states:
All zmq sockets passed to the zmq_poll() function must share the same
zmq context and must belong to the thread calling zmq_poll()
As zmq_poll is what's eventually being called then we need to insure
that all sockets that are going to be passed to zmq_poll (via
hub.do_poll) are in the same context
"""
try:
return _threadlocal.context
except AttributeError:
_threadlocal.context = zmq._Context(io_threads)
return _threadlocal.context
def add(self, evtype, fileno, cb):
listener = super(Hub, self).add(evtype, fileno, cb)
self.register(fileno, new=True)
return listener
def remove(self, listener):
super(Hub, self).remove(listener)
self.register(listener.fileno)
def register(self, fileno, new=False):
mask = 0
if self.listeners[READ].get(fileno):
mask |= READ_MASK
if self.listeners[WRITE].get(fileno):
mask |= WRITE_MASK
if mask:
self.poll.register(fileno, mask)
else:
self.poll.unregister(fileno)
def remove_descriptor(self, fileno):
super(Hub, self).remove_descriptor(fileno)
try:
self.poll.unregister(fileno)
except (KeyError, ValueError, IOError, OSError):
# raised if we try to remove a fileno that was
# already removed/invalid
pass
def do_poll(self, seconds):
# zmq.Poller.poll expects milliseconds
return self.poll.poll(seconds * 1000.0)
def wait(self, seconds=None):
readers = self.listeners[READ]
writers = self.listeners[WRITE]
if not readers and not writers:
if seconds:
sleep(seconds)
return
try:
presult = self.do_poll(seconds)
except zmq.ZMQError, e:
# In the poll hub this part exists to special case some exceptions
# from socket. There may be some error numbers that wider use of
# this hub will throw up as needing special treatment so leaving
# this block and this comment as a remineder
raise
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
if self.debug_blocking:
self.block_detect_pre()
for fileno, event in presult:
try:
if event & READ_MASK:
readers.get(fileno, noop).cb(fileno)
if event & WRITE_MASK:
writers.get(fileno, noop).cb(fileno)
if event & EXC_MASK:
# zmq.POLLERR is returned for any error condition in the
# underlying fd (as passed through to poll/epoll)
readers.get(fileno, noop).cb(fileno)
writers.get(fileno, noop).cb(fileno)
except SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())
clear_sys_exc_info()
if self.debug_blocking:
self.block_detect_post()
|
{
"content_hash": "266eaecfd630b21bd3f483a8485df91c",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 78,
"avg_line_length": 34.36363636363637,
"alnum_prop": 0.5944444444444444,
"repo_name": "2013Commons/HUE-SHARK",
"id": "686c97421fc65ff75505a66a3b38949c74781975",
"size": "3780",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "build/env/lib/python2.7/site-packages/eventlet-0.9.14-py2.7.egg/eventlet/hubs/zeromq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "9992379"
},
{
"name": "C++",
"bytes": "199612"
},
{
"name": "CSS",
"bytes": "419753"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3683071"
},
{
"name": "JavaScript",
"bytes": "1076553"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "Python",
"bytes": "40522057"
},
{
"name": "SQL",
"bytes": "522"
},
{
"name": "Shell",
"bytes": "27739"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "XSLT",
"bytes": "190688"
}
],
"symlink_target": ""
}
|
import json
from django.views.decorators.csrf import csrf_exempt
from keychain.models import Account
from keychain.models import App
from keychain.models import User
from keychain.models import Service
from keychain.views.client.userview import validate_request, status_response, encrypt_response
import re
@csrf_exempt
def query(request):
if request.method == 'POST':
valid, status_code, userdb, user, certjson, datajson = validate_request(request)
if not valid:
return status_response(status_code)
if userdb is None:
return status_response(-3)
if not 'service_id' in certjson.keys():
return status_response(-11)
service_id = certjson['service_id']
p = re.compile(r'\w*')
if len(service_id)!=32 or not p.match(service_id):
return status_response(-11)
try:
s = Service.objects.get(service_id=service_id)
if not s.has_expired():
s.service_status = 'S'
s.save()
data = {}
accounts = Account.objects.filter(account_app=s.service_app,account_user=userdb)
if accounts is not None and accounts.count() > 0:
l = []
for account in accounts:
l.append(account.to_json(False, False))
accountsjson = json.dumps(l)
data['service_accounts'] = accountsjson
else:
data['service_accounts'] = None
data['service'] = s.to_json()
return encrypt_response(data, certjson)
else:
return status_response(-9)
except Service.DoesNotExist:
return status_response(-8)
return encrypt_response(data, certjson)
return status_response(-6)
@csrf_exempt
def confirm(request):
if request.method == 'POST':
valid, status_code, userdb, user, certjson, datajson = validate_request(request)
if not valid:
return status_response(status_code)
if userdb is None:
return status_response(-3)
service_id = certjson['service_id']
account_id = certjson['account_id']
try:
s = Service.objects.get(service_id=service_id)
a = Account.objects.get(account_id=account_id)
if not s.has_expired():
s.service_status = 'C'
s.service_account = a
s.save()
return status_response(1)
else:
return status_response(-9)
except Service.DoesNotExist:
return status_response(-8)
return encrypt_response(data, certjson)
return status_response(-6)
@csrf_exempt
def get_accounts(request):
if request.method == 'POST':
valid, status_code, userdb, user, certjson, datajson = validate_request(request)
if not valid:
return status_response(status_code)
if userdb is None:
return status_response(-3)
data = {}
userjson = json.loads(datajson['user'])
accounts = Account.objects.filter(account_user=userdb)
if accounts is not None and accounts.count() > 0:
l = []
for account in accounts:
l.append(Account.to_json(account.decrypt(userjson['user_password'])))
accountsJson = json.dumps(l)
data['accounts'] = accountsJson
else:
data['accounts'] = None
return encrypt_response(data, certjson)
return status_response(-6)
@csrf_exempt
def add_account(request):
if request.method == 'POST':
valid, status_code, userdb, user, certjson, datajson = validate_request(request)
if not valid:
return status_response(status_code)
if userdb is not None:
userjsonstr = datajson['user']
userjson = json.loads(userjsonstr)
user_password = userjson['user_password']
accountjson = json.loads(datajson['account'])
account_username = accountjson['account_username']
account_password = accountjson['account_password']
user_id = accountjson['account_user']['user_id']
account_user = User.objects.get(user_id=user_id)
account = Account(account_username=account_username, account_password=account_password,
account_user=account_user)
account_type = accountjson['account_type']
account.account_type = account_type
if 'account_app' in accountjson:
app_id = accountjson['account_app']['app_id']
account_app = App.objects.get(app_id=app_id)
account.account_app = account_app
if 'account_email' in accountjson:
account.account_email = accountjson['account_email']
if 'account_cellphone' in accountjson:
account.account_cellphone = accountjson['account_cellphone']
account.encrypt_save(user_password)
account.decrypt(user_password)
data = {}
data['account'] = account.to_json()
return encrypt_response(data, certjson)
return status_response(-3)
return status_response(-6)
@csrf_exempt
def delete_account(request):
if request.method == 'POST':
valid, status_code, userdb, user, certjson, datajson = validate_request(request)
if not valid:
return status_response(status_code)
if userdb is not None:
accountjson = json.loads(datajson['account'])
account_id = accountjson['account_id']
try:
a = Account.objects.get(account_user=userdb, account_id=account_id)
a.delete()
return status_response(1)
except Account.DoesNotExist:
pass
return status_response(-1)
return status_response(-3)
return status_response(-6)
@csrf_exempt
def update_account(request):
if request.method == 'POST':
valid, status_code, userdb, user, certjson, datajson = validate_request(request)
if valid == False:
return status_response(status_code)
accountjson = json.loads(datajson['account'])
account_id = accountjson['account_id']
if userdb is not None:
try:
account = Account.objects.get(account_user=userdb, account_id=account_id)
except Account.DoesNotExist:
pass
if account is not None:
userjsonstr = datajson['user']
userjson = json.loads(userjsonstr)
user_password = userjson['user_password']
accountjson = json.loads(datajson['account'])
account_username = accountjson['account_username']
account_password = accountjson['account_password']
user_id = accountjson['account_user']['user_id']
account_type = accountjson['account_type']
account_user = User.objects.get(user_id=user_id)
account.account_username = account_username
account.account_password = account_password
account.account_type = account_type
if 'account_app' in accountjson:
app_id = accountjson['account_app']['app_id']
account_app = App.objects.get(app_id=app_id)
account.account_app = account_app
if 'account_email' in accountjson:
account.account_email = accountjson['account_email']
else:
account.account_email = None
if 'account_cellphone' in accountjson:
account.account_cellphone = accountjson['account_cellphone']
else:
account.account_cellphone = None
account.encrypt_save(user_password)
account.decrypt(user_password)
data = {}
data['account'] = account.to_json()
return encrypt_response(data, certjson)
return status_response(-1)
return status_response(-3)
return status_response(-6)
|
{
"content_hash": "4201a5da07060a83ab7ec8afd6a84980",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 99,
"avg_line_length": 40.407766990291265,
"alnum_prop": 0.5737626141278231,
"repo_name": "osgee/keychainserver",
"id": "2b6d5145b12904a336d022391c5d20b68a2dcb50",
"size": "8324",
"binary": false,
"copies": "1",
"ref": "refs/heads/c9version",
"path": "keychain/views/client/user/serviceview.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45763"
},
{
"name": "HTML",
"bytes": "7051"
},
{
"name": "JavaScript",
"bytes": "88987"
},
{
"name": "Python",
"bytes": "80649"
},
{
"name": "Shell",
"bytes": "4402"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import lambdainst.models
from django.conf import settings
import datetime
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='GiftCode',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('code', models.CharField(default=lambdainst.models.random_gift_code, max_length=32)),
('time', models.DurationField(default=datetime.timedelta(30))),
('created', models.DateTimeField(null=True, auto_now_add=True)),
('single_use', models.BooleanField(default=True)),
('free_only', models.BooleanField(default=True)),
('available', models.BooleanField(default=True)),
('comment', models.TextField(blank=True)),
('created_by', models.ForeignKey(related_name='created_giftcode_set', null=True, blank=True, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Gift Codes',
'verbose_name': 'Gift Code',
},
),
migrations.CreateModel(
name='GiftCodeUser',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('date', models.DateTimeField(null=True, auto_now_add=True)),
('code', models.ForeignKey(to='lambdainst.GiftCode')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Gift Code Users',
'verbose_name': 'Gift Code User',
},
),
migrations.CreateModel(
name='VPNUser',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('notes', models.TextField(blank=True)),
('expiration', models.DateTimeField(null=True, blank=True)),
('last_expiry_notice', models.DateTimeField(null=True, blank=True)),
('notify_expiration', models.BooleanField(default=True)),
('trial_periods_given', models.IntegerField(default=0)),
('last_vpn_auth', models.DateTimeField(null=True, blank=True)),
('referrer_used', models.BooleanField(default=False)),
('referrer', models.ForeignKey(related_name='referrals', null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'VPN Users',
'verbose_name': 'VPN User',
},
),
migrations.AddField(
model_name='giftcode',
name='users',
field=models.ManyToManyField(through='lambdainst.GiftCodeUser', to=settings.AUTH_USER_MODEL),
),
]
|
{
"content_hash": "6bb1bf117be882d994a64bd6b21de31d",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 172,
"avg_line_length": 45.763888888888886,
"alnum_prop": 0.5723823975720789,
"repo_name": "CCrypto/ccvpn3",
"id": "ed7ecb5b160e1203651d2442215f54674350d4d2",
"size": "3319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lambdainst/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16467"
},
{
"name": "HTML",
"bytes": "54117"
},
{
"name": "JavaScript",
"bytes": "3594"
},
{
"name": "Python",
"bytes": "171906"
},
{
"name": "Shell",
"bytes": "320"
}
],
"symlink_target": ""
}
|
from Penguin.ClubPenguin import ClubPenguin
from Penguin.ClubPenguin import PenguinFactory
from Penguin.Penguin import Penguin
class MyPenguin(Penguin):
def __init__(self, player):
super(MyPenguin, self).__init__(player)
self.addListener("jr", self.handleJoinRoom)
# Joins room 800 (the dock) at coordinates 223 and 333
self.addListener("partycookie", lambda x: self.joinRoom(800, 223, 333))
def handleJoinRoom(self, data):
self.logger.info("Joined room!")
self.sendPhraseMessage("hi master")
class MyPenguinFactory(PenguinFactory):
def __init__(self):
super(MyPenguinFactory, self).__init__()
self.logger.debug("MyPenguinFactory constructed")
def buildProtocol(self, addr):
player = self.queue.pop()
penguin = MyPenguin(player)
return penguin
cp = ClubPenguin()
myPenguinFactory = MyPenguinFactory()
cp.connect(username="Username", password="Password", server="Frostbite", \
factory=myPenguinFactory)
cp.start()
|
{
"content_hash": "5add5a3e5efd3bf79a4dde121ca53239",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 74,
"avg_line_length": 23.317073170731707,
"alnum_prop": 0.7437238493723849,
"repo_name": "TunnelBlanket/Penguin",
"id": "d25d897ee8bf1a5b4f78e9d4324ff2cea1643364",
"size": "956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Example_CustomFactory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13736"
}
],
"symlink_target": ""
}
|
from .base import TestBase
from flask import session
from flask.sessions import SecureCookieSession
import carafe
from carafe.ext.session import SessionInterface
class TestSession(TestBase):
class __config__(object):
CARAFE_SESSION_PERMANENT = True
CARAFE_SESSION_SALT = 'carafe-salt'
SECRET_KEY = 'carafe-secret'
def test_session(self):
@self.app.route('/foo')
@carafe.utils.jsonify
def foo():
session['foo'] = True
return session
@self.app.route('/bar')
@carafe.utils.jsonify
def bar():
session['bar'] = True
return session
self.assertEqual(
self.client.get('/foo').json,
{'foo': True, '_permanent': True})
self.assertEqual(
self.client.get('/bar').json,
{'bar': True, 'foo': True, '_permanent': True})
self.assertEqual(
self.client.get('/foo').json,
{'bar': True, 'foo': True, '_permanent': True})
def test_session_interface_enabled(self):
self.assertIsInstance(self.app.session_interface, SessionInterface)
class TestSessionDisabled(TestBase):
class __config__(object):
CARAFE_SESSION_ENABLED = False
def test_session_interface_disabled(self):
self.assertNotIsInstance(self.app.session_interface, SessionInterface)
|
{
"content_hash": "410dbcb82bb6b10e3fe6bfe3bfb2b296",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 28.958333333333332,
"alnum_prop": 0.6100719424460431,
"repo_name": "dgilland/carafe",
"id": "90a50eb973092f24a900b208771e8e21ed6c9914",
"size": "1391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1433"
},
{
"name": "Python",
"bytes": "70613"
}
],
"symlink_target": ""
}
|
"""
.. _tut-sensors-time-freq:
============================================
Frequency and time-frequency sensor analysis
============================================
The objective is to show you how to explore the spectral content
of your data (frequency and time-frequency). Here we'll work on Epochs.
We will use this dataset: :ref:`somato-dataset`. It contains so-called event
related synchronizations (ERS) / desynchronizations (ERD) in the beta band.
""" # noqa: E501
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
# Richard Höchenberger <richard.hoechenberger@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet, psd_multitaper, psd_welch
from mne.datasets import somato
###############################################################################
# Set parameters
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
# Construct Epochs
event_id, tmin, tmax = 1, -1., 3.
baseline = (None, 0)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
epochs.resample(200., npad='auto') # resample to reduce computation time
###############################################################################
# Frequency analysis
# ------------------
#
# We start by exploring the frequence content of our epochs.
###############################################################################
# Let's first check out all channel types by averaging across epochs.
epochs.plot_psd(fmin=2., fmax=40., average=True, spatial_colors=False)
###############################################################################
# Now let's take a look at the spatial distributions of the PSD.
epochs.plot_psd_topomap(ch_type='grad', normalize=True)
###############################################################################
# Alternatively, you can also create PSDs from Epochs objects with functions
# that start with ``psd_`` such as
# :func:`mne.time_frequency.psd_multitaper` and
# :func:`mne.time_frequency.psd_welch`.
f, ax = plt.subplots()
psds, freqs = psd_multitaper(epochs, fmin=2, fmax=40, n_jobs=1)
psds = 10. * np.log10(psds)
psds_mean = psds.mean(0).mean(0)
psds_std = psds.mean(0).std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD (gradiometers)', xlabel='Frequency (Hz)',
ylabel='Power Spectral Density (dB)')
plt.show()
###############################################################################
# Notably, :func:`mne.time_frequency.psd_welch` supports the keyword argument
# ``average``, which specifies how to estimate the PSD based on the individual
# windowed segments. The default is ``average='mean'``, which simply calculates
# the arithmetic mean across segments. Specifying ``average='median'``, in
# contrast, returns the PSD based on the median of the segments (corrected for
# bias relative to the mean), which is a more robust measure.
# Estimate PSDs based on "mean" and "median" averaging for comparison.
kwargs = dict(fmin=2, fmax=40, n_jobs=1)
psds_welch_mean, freqs_mean = psd_welch(epochs, average='mean', **kwargs)
psds_welch_median, freqs_median = psd_welch(epochs, average='median', **kwargs)
# Convert power to dB scale.
psds_welch_mean = 10 * np.log10(psds_welch_mean)
psds_welch_median = 10 * np.log10(psds_welch_median)
# We will only plot the PSD for a single sensor in the first epoch.
ch_name = 'MEG 0122'
ch_idx = epochs.info['ch_names'].index(ch_name)
epo_idx = 0
_, ax = plt.subplots()
ax.plot(freqs_mean, psds_welch_mean[epo_idx, ch_idx, :], color='k',
ls='-', label='mean of segments')
ax.plot(freqs_median, psds_welch_median[epo_idx, ch_idx, :], color='k',
ls='--', label='median of segments')
ax.set(title='Welch PSD ({}, Epoch {})'.format(ch_name, epo_idx),
xlabel='Frequency (Hz)', ylabel='Power Spectral Density (dB)')
ax.legend(loc='upper right')
plt.show()
###############################################################################
# Lastly, we can also retrieve the unaggregated segments by passing
# ``average=None`` to :func:`mne.time_frequency.psd_welch`. The dimensions of
# the returned array are ``(n_epochs, n_sensors, n_freqs, n_segments)``.
psds_welch_unagg, freqs_unagg = psd_welch(epochs, average=None, **kwargs)
print(psds_welch_unagg.shape)
###############################################################################
# .. _inter-trial-coherence:
#
# Time-frequency analysis: power and inter-trial coherence
# --------------------------------------------------------
#
# We now compute time-frequency representations (TFRs) from our Epochs.
# We'll look at power and inter-trial coherence (ITC).
#
# To this we'll use the function :func:`mne.time_frequency.tfr_morlet`
# but you can also use :func:`mne.time_frequency.tfr_multitaper`
# or :func:`mne.time_frequency.tfr_stockwell`.
#
# .. note::
# The ``decim`` parameter reduces the sampling rate of the time-frequency
# decomposition by the defined factor. This is usually done to reduce
# memory usage. For more information refer to the documentation of
# :func:`mne.time_frequency.tfr_morlet`.
#
# define frequencies of interest (log-spaced)
freqs = np.logspace(*np.log10([6, 35]), num=8)
n_cycles = freqs / 2. # different number of cycle per frequency
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, decim=3, n_jobs=1)
###############################################################################
# Inspect power
# -------------
#
# .. note::
# The generated figures are interactive. In the topo you can click
# on an image to visualize the data for one sensor.
# You can also select a portion in the time-frequency plane to
# obtain a topomap for a certain time-frequency region.
power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
power.plot([82], baseline=(-0.5, 0), mode='logratio', title=power.ch_names[82])
fig, axis = plt.subplots(1, 2, figsize=(7, 4))
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,
baseline=(-0.5, 0), mode='logratio', axes=axis[0],
title='Alpha', show=False)
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,
baseline=(-0.5, 0), mode='logratio', axes=axis[1],
title='Beta', show=False)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Joint Plot
# ----------
# You can also create a joint plot showing both the aggregated TFR
# across channels and topomaps at specific times and frequencies to obtain
# a quick overview regarding oscillatory effects across time and space.
power.plot_joint(baseline=(-0.5, 0), mode='mean', tmin=-.5, tmax=2,
timefreqs=[(.5, 10), (1.3, 8)])
###############################################################################
# Inspect ITC
# -----------
itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')
###############################################################################
# .. note::
# Baseline correction can be applied to power or done in plots.
# To illustrate the baseline correction in plots, the next line is
# commented power.apply_baseline(baseline=(-0.5, 0), mode='logratio')
#
# Exercise
# --------
#
# - Visualize the inter-trial coherence values as topomaps as done with
# power.
|
{
"content_hash": "05bbf5a732028fcbcd517cf01458d6bc",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 79,
"avg_line_length": 40.98994974874372,
"alnum_prop": 0.5910261125413755,
"repo_name": "mne-tools/mne-tools.github.io",
"id": "66a46924d1a78e00ec287865c1e90a10654b83bd",
"size": "8158",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "0.23/_downloads/6c0a4b0f82866a352bb6cdc80e98c145/20_sensors_time_frequency.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "708696"
},
{
"name": "Dockerfile",
"bytes": "1820"
},
{
"name": "HTML",
"bytes": "1526247783"
},
{
"name": "JavaScript",
"bytes": "1323087"
},
{
"name": "Jupyter Notebook",
"bytes": "24820047"
},
{
"name": "Python",
"bytes": "18575494"
}
],
"symlink_target": ""
}
|
"""
Compare tagging speed for LSTM, using dummy data.
Results on CPU laptop:
PyTorchLSTM.v1:
Predicted 39017 4.033804399892688 Ys[0] 0.05000001 5.551115e-17
LSTM (NumpyOps):
Predicted 39018 13.174870599992573 Ys[0] 0.05000001 5.551115e-17
So PyTorch is 3x faster currently.
"""
from typing import List
import typer
import tqdm
import numpy.random
from timeit import default_timer as timer
from thinc.api import Model, Config, registry, chain, list2padded, with_array
from thinc.api import to_categorical, set_current_ops
from thinc.api import NumpyOps, CupyOps, fix_random_seed, require_gpu
from thinc.types import Array2d, Padded
CONFIG = """
[data]
n_samples = 1000
n_tags = 20
n_vocab = 10000
length_mean = 50
length_variance = 5
[common]
width = 300
[model]
@layers = "LSTMTagger.v1"
[model.embed]
@layers = "Embed.v1"
nO = ${common:width}
nV = ${data:n_vocab}
column = 0
[model.encode]
@layers = "LSTM.v1"
nO = ${common:width}
nI = ${common:width}
depth = 2
bi = true
[model.predict]
@layers = "Linear.v1"
nO = ${data:n_tags}
"""
@registry.layers("LSTMTagger.v1")
def build_tagger(
embed: Model[Array2d, Array2d],
encode: Model[Padded, Padded],
predict: Model[Array2d, Array2d],
) -> Model[List[Array2d], Padded]:
model = chain(
list2padded(),
with_array(embed),
encode,
with_array(predict),
)
model.set_ref("embed", embed)
model.set_ref("encode", encode)
model.set_ref("predict", model.layers[-1])
return model
def get_dummy_data(n_samples, n_tags, n_vocab, length_mean, length_variance):
Xs = []
Ys = []
for _ in range(n_samples):
length = numpy.random.normal(size=1, scale=length_variance) + length_mean
shape = (max(1, int(length)),)
X = numpy.random.uniform(0, n_vocab - 1, shape)
Y = numpy.random.uniform(0, n_tags - 1, shape)
assert X.size, length
assert Y.size, length
Xs.append(X.reshape((-1, 1)).astype("i"))
Ys.append(to_categorical(Y.astype("i")))
return Xs, Ys
def run_forward(model, Xs, n_times=1):
total = 0.0
for _ in range(n_times):
for batch in tqdm.tqdm(Xs):
Y = model.predict(batch)
total += Y.data.sum()
return float(total)
def run_forward_backward(model, batches, n_times=1):
total = 0.0
for _ in range(n_times):
for X, Y in tqdm.tqdm(batches):
Yh, get_dX = model.begin_update(X)
dX = get_dX(Yh)
total += Yh.data.sum()
return float(total)
def set_backend(name, gpu_id):
global CONFIG
if name == "generic":
set_current_ops(Ops())
else:
if gpu_id == -1:
set_current_ops(NumpyOps(use_blis=True))
else:
set_current_ops(CupyOps())
if name == "pytorch":
import torch
torch.set_num_threads(1)
CONFIG = CONFIG.replace("LSTM.v1", "PyTorchLSTM.v1")
def main(numpy: bool=False, pytorch: bool = False,
generic: bool=False, gpu_id: int = -1):
global CONFIG
fix_random_seed(0)
if gpu_id >= 0:
require_gpu(gpu_id)
print("Set GPU", gpu_id)
backends = {"pytorch": pytorch, "numpy": numpy, "generic": generic}
for name, use_backend in backends.items():
if not use_backend:
print(f"Skipping {name}")
continue
set_backend(name, gpu_id)
print("Getting data")
C = registry.resolve(Config().from_str(CONFIG))
model = C["model"]
X, Y = get_dummy_data(**C["data"])
print("Copy to device")
X = [model.ops.asarray(x) for x in X]
Y = [model.ops.asarray(y) for y in Y]
print("Begin init", len(X))
model.initialize(X=X[:5])
print("Pre-batch")
n_words = sum(len(x) for x in X)
batches = model.ops.multibatch(16, X, Y)
batches = [(model.layers[0].predict(x), y) for x, y in batches]
model.layers.pop(0)
print("Start")
start_time = timer()
total = run_forward(model, [x for x, y in batches])
end_time = timer()
print(name, n_words, total, end_time - start_time)
start_time = timer()
total = run_forward_backward(model, batches)
end_time = timer()
print(name, n_words, total, end_time - start_time)
if __name__ == "__main__":
typer.run(main)
|
{
"content_hash": "3ef54183ec46a4633d2744877e7508f3",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 81,
"avg_line_length": 26.895705521472394,
"alnum_prop": 0.5983120437956204,
"repo_name": "spacy-io/thinc",
"id": "b4e986b44f0a6be1b0040b00dbdb0e5f42c8ebcf",
"size": "4384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/benchmarks/lstm_tagger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259926"
},
{
"name": "C++",
"bytes": "5131"
},
{
"name": "Python",
"bytes": "135654"
}
],
"symlink_target": ""
}
|
import uuid
from django import forms
from django.template.defaultfilters import slugify
from .compat import six
from .models import Answer, Campaign, EnumeratedQuestions, Sample
from .utils import get_question_model
def _create_field(ui_hint, text,
has_other=False, required=False, choices=None):
fields = (None, None)
question_model = get_question_model()
if ui_hint == question_model.TEXT:
fields = (forms.CharField(label=text, required=required,
widget=forms.Textarea), None)
elif ui_hint == question_model.RADIO:
radio = forms.ChoiceField(label=text, required=required,
widget=forms.RadioSelect(), choices=choices)
if has_other:
fields = (radio, forms.CharField(required=False,
label="Please could you specify?",
widget=forms.TextInput(attrs={'class':'other-input'})))
else:
fields = (radio, None)
elif ui_hint == question_model.DROPDOWN:
radio = forms.ChoiceField(label=text, required=required,
widget=forms.Select(), choices=choices)
if has_other:
fields = (radio, forms.CharField(required=False,
label="Please could you specify?",
widget=forms.TextInput(attrs={'class':'other-input'})))
else:
fields = (radio, None)
elif ui_hint == question_model.SELECT_MULTIPLE:
multiple = forms.MultipleChoiceField(label=text, required=required,
widget=forms.CheckboxSelectMultiple, choices=choices)
if has_other:
fields = (multiple, forms.CharField(required=False,
label="Please could you specify?",
widget=forms.TextInput(attrs={'class':'other-input'})))
else:
fields = (multiple, None)
elif ui_hint == question_model.NUMBER:
fields = (forms.IntegerField(label=text, required=required), None)
return fields
class AnswerForm(forms.ModelForm):
"""
Form used to submit an Answer to a Question as part of Sample to a Campaign.
"""
class Meta:
model = Answer
fields = []
def __init__(self, *args, **kwargs):
super(AnswerForm, self).__init__(*args, **kwargs)
if self.instance.question:
question = self.instance.question
elif 'question' in kwargs.get('initial', {}):
question = kwargs['initial']['question']
required = True
if self.instance.sample and self.instance.sample.campaign:
campaign_attrs = EnumeratedQuestions.objects.filter(
campaign=self.instance.sample.campaign,
question=question).first()
if campaign_attrs:
required = campaign_attrs.required
fields = _create_field(question.ui_hint, question.text,
required=required, choices=question.choices)
self.fields['text'] = fields[0]
def save(self, commit=True):
# We same in the view.
pass
class QuestionForm(forms.ModelForm):
title = forms.CharField(label="Title", required=False)
text = forms.CharField(label="Text", required=False)
class Meta:
model = get_question_model()
fields = ('path', 'default_unit', 'extra')
def clean_choices(self):
self.cleaned_data['choices'] = self.cleaned_data['choices'].strip()
return self.cleaned_data['choices']
class SampleCreateForm(forms.ModelForm):
class Meta:
model = Sample
fields = []
def __init__(self, *args, **kwargs):
super(SampleCreateForm, self).__init__(*args, **kwargs)
for idx, question in enumerate(self.initial.get('questions', [])):
key = 'question-%d' % (idx + 1)
required = True
campaign_attrs = EnumeratedQuestions.objects.filter(
campaign=self.instance.campaign,
question=question).first()
if campaign_attrs:
required = campaign_attrs.required
fields = _create_field(question.ui_hint, question.text,
required=required, choices=question.choices)
self.fields[key] = fields[0]
if fields[1]:
self.fields[key.replace('question-', 'other-')] = fields[1]
def clean(self):
super(SampleCreateForm, self).clean()
items = six.iteritems(self.cleaned_data)
for key, value in items:
if key.startswith('other-'):
if value:
self.cleaned_data[key.replace(
'other-', 'question-')] = value
del self.cleaned_data[key]
return self.cleaned_data
def save(self, commit=True):
if 'account' in self.initial:
self.instance.account = self.initial['account']
if 'campaign' in self.initial:
self.instance.campaign = self.initial['campaign']
self.instance.slug = slugify(uuid.uuid4().hex)
return super(SampleCreateForm, self).save(commit)
class SampleUpdateForm(forms.ModelForm):
"""
Auto-generated ``Form`` from a list of ``Question`` in a ``Campaign``.
"""
class Meta:
model = Sample
fields = []
def __init__(self, *args, **kwargs):
super(SampleUpdateForm, self).__init__(*args, **kwargs)
for idx, answer in enumerate(self.instance.get_answers_by_rank()):
question = answer.question
required = True
rank = idx
campaign_attrs = EnumeratedQuestions.objects.filter(
campaign=self.instance.campaign,
question=question).first()
if campaign_attrs:
required = campaign_attrs.required
rank = campaign_attrs.rank
fields = _create_field(question.ui_hint, question.text,
required=required, choices=question.choices)
# XXX set value.
self.fields['question-%d' % rank] = fields[0]
if fields[1]:
self.fields['other-%d' % rank] = fields[1]
class CampaignForm(forms.ModelForm):
class Meta:
model = Campaign
fields = ['title', 'description', 'quizz_mode']
def clean_title(self):
"""
Creates a slug from the campaign title and
checks it does not yet exists.
"""
slug = slugify(self.cleaned_data.get('title'))
if Campaign.objects.filter(slug__exact=slug).exists():
raise forms.ValidationError(
"Title conflicts with an existing campaign.")
return self.cleaned_data['title']
def save(self, commit=True):
if 'account' in self.initial:
self.instance.account = self.initial['account']
self.instance.slug = slugify(self.cleaned_data.get('title'))
return super(CampaignForm, self).save(commit)
class SendCampaignForm(forms.Form):
from_address = forms.EmailField(
help_text="add your email addresse to be contacted")
to_addresses = forms.CharField(
widget=forms.Textarea,
help_text="add email addresses separated by new line")
message = forms.CharField(widget=forms.Textarea,
help_text="You can explain the aim of this campaign")
|
{
"content_hash": "6bf6431deffbce576973e091c8be1c4e",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 80,
"avg_line_length": 36.888324873096444,
"alnum_prop": 0.5999724783266822,
"repo_name": "djaodjin/djaodjin-survey",
"id": "998ef9ffcaaeac7b31be46f4611266325fe63186",
"size": "8639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "survey/forms.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "296"
},
{
"name": "HTML",
"bytes": "29371"
},
{
"name": "JavaScript",
"bytes": "111241"
},
{
"name": "Makefile",
"bytes": "4583"
},
{
"name": "Python",
"bytes": "344027"
}
],
"symlink_target": ""
}
|
"""Controller that returns information on the heat API versions.
Now it's an subclass of module versions, because of identity with OpenStack
module versions.
"""
from heat.api import versions
Controller = versions.Controller
|
{
"content_hash": "12449ac59fe31355c4d760ccd3ca568f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 75,
"avg_line_length": 28.375,
"alnum_prop": 0.7973568281938326,
"repo_name": "gonzolino/heat",
"id": "66183145b73e304dc51d51c2a7fbb7259c18bdde",
"size": "802",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "heat/api/cfn/versions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7214144"
},
{
"name": "Shell",
"bytes": "32170"
}
],
"symlink_target": ""
}
|
"""ASkiBot, cloned from IRC to TG because newfags can't even."""
import tgbot
import logging
import socket
import threading
import time
import random
import errno
import pickle
import collections
TOKEN_TXT = 'token.txt'
KEULII_TXT = 'keulii.txt'
QUOTES_DIR = 'quotes'
MOPOPOSTERPORT = 6688
class Mopoposter:
"""Simple message receiver on a tcp socket.
Keulii messages go here too in realtime.
They get logged to a file elsewhere.
Listen for messages on new connections.
One message per connection, closed automatically.
Messages sent to a callback.
"""
ENCODING = 'latin-1'
def __init__(self, port, sendfunc):
self.port = port
self.sendfunc = sendfunc
self.serversocket = None
self.thread = None
def start(self):
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.serversocket.bind(('127.0.0.1', self.port))
self.serversocket.listen(20)
self.thread = threading.Thread(target=self.acceptLoop)
self.thread.start()
def acceptLoop(self):
while True:
try:
(clientsocket, address) = self.serversocket.accept()
except OSError as err:
if err.errno == errno.EINVAL:
# invalid argument, servsocket closed
break
if err.errno == errno.EBADF:
# bad file descriptor also equals to closing
break
raise
self.handleConnection(clientsocket)
def handleConnection(self, sock):
sock.settimeout(5.0)
try:
msg = sock.recv(1024)
except socket.timeout:
# just clean up
pass
else:
if len(msg) > 0:
self.sendfunc(msg.decode(self.ENCODING))
finally:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
def stop(self):
if self.serversocket:
self.serversocket.shutdown(socket.SHUT_RDWR)
self.serversocket.close()
if self.thread:
self.thread.join()
class QuotesBase:
"""Get a random quote for a chat channel."""
TIME_LIMIT = 15*60
ERR_MSG = 'Elä quottaile liikaa'
def __init__(self):
self.last_requests = {}
def get(self, chan_id, user_id, search_term):
"""Public api to get one message; search term is for whole lines.
The number of gets is restricted to one within the time limit for a
single user, unless another user asks for one; then the limit starts
for that user."""
now = time.time()
last_user, last_time = self.last_requests.get(chan_id, (None, 0))
if user_id == last_user and now - last_time < self.TIME_LIMIT:
return (user_id, self.ERR_MSG)
msg = self._search(chan_id, search_term)
# the user can try again if nothing was found
if msg is not None:
self.last_requests[chan_id] = (user_id, now)
return (chan_id, msg)
return (chan_id, None)
def _search(self, chan_id, term):
"""Find that message on a chat channel."""
term = term.lower().strip()
lines = [x.strip() for x in self._listQuotes(chan_id)
if term in x.lower()]
return random.choice(lines) if len(lines) else None
def _listQuotes(self):
"""Subclasses should do this"""
raise NotImplementedError
class Keulii(QuotesBase):
"""One global quotefile for all chats.
Time limit is still per chat.
Adding not supported, since it's done elsewhere.
They're just read in here.
"""
def __init__(self, filename):
super().__init__()
self.filename = filename
def _listQuotes(self, chan_id):
try:
# FIXME utf8
with open(self.filename, encoding='latin-1') as fh:
return list(fh)
except IOError:
return []
class Quotes(QuotesBase):
"""Unique quote file for each chat."""
def __init__(self, quotefile_dir):
super().__init__()
self.quotefile_dir = quotefile_dir
def _listQuotes(self, chan_id):
try:
with open('%s/%s' % (self.quotefile_dir, chan_id), 'rb') as fh:
return pickle.load(fh)
except IOError:
return []
def addQuote(self, chan_id, quote):
quotes = self._listQuotes(chan_id)
quotes.append(quote)
with open('%s/%s' % (self.quotefile_dir, chan_id), 'wb') as fh:
pickle.dump(quotes, fh)
class TgQuote(collections.namedtuple('TgQuoteBase', 'origin msgid text adder')):
def strip(self):
return self
def lower(self):
return self
def __contains__(self, item):
# origin is always a user; try all of those three for easier searching
return item in ('%s %s %s %s' % (
self.origin.get('username', ''),
self.origin.get('first_name', ''),
self.origin.get('last_name', ''),
self.text)).lower()
def quotemerge(a, b, result):
a = pickle.load(open(a, 'rb'))
b = pickle.load(open(b, 'rb'))
pickle.dump(a + b, open(result, 'wb'))
def getUserDesc(user):
"""Either "username" or "first last" (one of those should exist)
Use only for human interaction, not for detecting stuff like with IDs"""
return user.get('username',
'%s %s' % (
user.get('first_name', ''),
user.get('last_name', '')))
def getChatDesc(chat):
"""Either chat title or the user if it's a personal 1-on-1 chat
Use only for human interaction, not for detecting stuff like with IDs"""
return chat.get('title', getUserDesc(chat))
class AskibotTg:
MOPOPOSTER_SAVE_FILENAME = 'mopoposter.pickle'
def __init__(self, connection, keuliifilename, mopoposterport, quotesdir):
self.conn = connection
self.update_offset = 0
try:
with open(self.MOPOPOSTER_SAVE_FILENAME, 'rb') as fh:
self.mopoposter_broadcast = pickle.load(fh)
except IOError:
self.mopoposter_broadcast = {}
self.mopoposter = Mopoposter(mopoposterport, self.sendMopoposter)
self.keulii = Keulii(keuliifilename)
self.quotes = Quotes(quotesdir)
# record the last /addq place to save the quote to the right place when
# forwarded to the bot.
self.last_addq_chat = {}
self.running = False
me = self.conn.getMe()
self.username = me['username']
def saveMopoposterBroadcast(self):
try:
with open(self.MOPOPOSTER_SAVE_FILENAME, 'wb') as fh:
pickle.dump(self.mopoposter_broadcast, fh)
except IOError:
logging.error('Cannot open mopoposter save %s' % self.MOPOPOSTER_SAVE_FILENAME)
def helpMsg(self):
return '''Olen ASkiBot, killan irkistä tuttu robotti. Living tissue over metal endoskeleton.
/keulii HAKUTEKSTI - Hae mopopostereista tekstinpätkää, hakutekstillä tai ilman.
/keuliiregister - Rekisteröi tämä kanava reaaliaikaiseksi mopoposterikuuntelijaksi.
/keuliiunregister - Kumoa rekisteröinti, viestejä ei enää tule. Sallittu vain rekisteröijälle ja ylläpitäjälle.
/q HAKUTEKSTI - kuin mopoposter, mutta kanavakohtaisille quoteille.
/addq - merkitse lisättävä quote tälle kanavalle. Lisää se sitten forwardaamalla yksityisesti botille.
Bottia ylläpitää sooda. https://github.com/sooda/askibot-tg
'''
def run(self):
"""Start the main loop that goes on until user ^C's this."""
self.running = True
try:
self.mopoposter.start()
self.loopUpdates()
except KeyboardInterrupt:
pass
self.mopoposter.stop()
def stop(self):
# just for the tests
self.running = False
def sendMopoposter(self, msg):
"""Got a message, broadcast it to the listeners."""
for chatid in self.mopoposter_broadcast.keys():
self.conn.sendMessage(chatid, 'KEULII! ' + msg)
def loopUpdates(self):
while self.running:
# btw, looks like the server timeouts with status ok and an empty
# result set after just 20 seconds
for update in self.conn.getUpdates(
offset=self.update_offset, timeout=60):
self.handleUpdate(update)
def handleUpdate(self, update):
"""Got one line from the server."""
upid = update['update_id']
try:
msg = update['message']
except KeyError:
logging.warning("what?? no message in update: <%s>" % update)
else:
self.handleMessage(msg)
self.update_offset = upid + 1
def handleMessage(self, msg):
"""Manage the message itself; just pass it around to a handler."""
if 'text' in msg:
text = msg['text']
commands = {
'/help': self.cmdHelp,
'/start': self.cmdStart,
'/keulii': self.cmdKeulii,
'/keuliiregister': self.cmdKeuliiRegister,
'/keuliiunregister': self.cmdKeuliiUnRegister,
'/mopoposterpost': self.cmdMopoposterPost,
'/q': self.cmdQuote,
'/addq': self.cmdAddQuote,
}
if 'forward_from' in msg:
# this is a private message; from and chat are the same (the
# bot can't see public ones). forward_from is the original
# user, but the original chat is lost
self.cmdForwardedMessage(msg, msg['from'],
msg['forward_from'])
try:
cmdname, args = text.split(' ', 1)
except ValueError:
# no args
cmdname = text
args = ''
# tg specifies that /cmd@nick should work just for us
if '@' in cmdname:
cmdname, target = cmdname.split('@', 1)
if target.lower() != self.username.lower():
return
cmdname = cmdname.lower()
# just silently ignore other commands: they may be directed to
# other bots
if cmdname in commands:
commands[cmdname](args, msg['chat'], msg['from'])
def cmdHelp(self, text, chat, user):
"""Respond in the chat with the command list."""
self.conn.sendMessage(chat['id'], self.helpMsg())
def cmdStart(self, text, chat, user):
"""Was this suggested by the protocol or something?"""
self.conn.sendMessage(chat['id'], 'please stop')
def cmdKeulii(self, text, chat, user):
"""Query for a keulii msg."""
target, response = self.keulii.get(chat['id'], user['id'], text)
if response is not None:
self.conn.sendMessage(target, response)
def cmdKeuliiRegister(self, text, chat, user):
"""Register this chat to the keulii broadcast list."""
# public and private registrations are accepted, chat is one of them
title = getChatDesc(chat)
if self.mopoposter_broadcast.get(chat['id'], None):
self.conn.sendMessage(user['id'],
'Pöh, keuliiviestit jo rekisteröity (' + title + ')')
else:
self.mopoposter_broadcast[chat['id']] = user['id']
self.saveMopoposterBroadcast()
self.conn.sendMessage(user['id'],
'OK, keuliiviestit rekisteröity: ' + title)
def cmdKeuliiUnRegister(self, text, chat, user):
"""Unregister this chat from the keulii broadcast list.
Others can re-register immediately and the ownership changes then.
"""
title = getChatDesc(chat)
owner = self.mopoposter_broadcast.get(chat['id'], None)
if owner == user['id']:
del self.mopoposter_broadcast[chat['id']]
self.saveMopoposterBroadcast()
self.conn.sendMessage(user['id'],
'OK, keuliiviestejä ei enää lähetetä: ' + title)
elif owner is None:
self.conn.sendMessage(user['id'],
'Pöh, keuliiviestejä ei rekisteröity (' + title + ')')
else:
self.conn.sendMessage(user['id'],
'Pöh, keuliiviestit on rekisteröinyt joku muu (' + title + ')')
def cmdMopoposterPost(self, text, chat, user):
self.conn.sendMessage(user['id'],
'Ei toimi vielä')
def cmdQuote(self, text, chat, user):
"""Query for a quote."""
target, response = self.quotes.get(chat['id'], user['id'], text)
if isinstance(response, TgQuote):
# the from-id is somehow paired to the msgid, but doesn't seem to
# show in the chat ui (or the forward_from field). can't send the
# msg if from-id is wrong.
self.conn.forwardMessage(target, response.adder['id'], response.msgid)
elif response is not None:
# nag the user
self.conn.sendMessage(target, response)
def cmdForwardedMessage(self, msg, user, fwd_from):
"""Received a private forward, interpreted as a quote to be added"""
chat = self.last_addq_chat.get(user['id'])
if chat is None:
self.conn.sendMessage(user['id'],
'Virhe: Mistä tämä tuli? Merkitse keskustelukanava ensin komentamalla siellä /addq')
return
msgid = msg['message_id']
text = msg['text']
quote = TgQuote(fwd_from, msgid, text, user)
self.quotes.addQuote(chat['id'], quote)
self.conn.sendMessage(chat['id'],
'addq ({} lisäsi) {}: {}'.format(getUserDesc(user), getUserDesc(fwd_from), text))
del self.last_addq_chat[user['id']]
def cmdAddQuote(self, text, chat, user):
"""addq marks the chat to record the next forward on"""
self.last_addq_chat[user['id']] = chat
title = getChatDesc(chat)
self.conn.sendMessage(user['id'],
'addq: Forwardaa viesti niin tallennan (' + title + ')')
def main():
logging.basicConfig(filename='debug.log', level=logging.DEBUG,
format='%(asctime)s [%(levelname)-8s] %(message)s')
token = open(TOKEN_TXT).read().strip()
bot = AskibotTg(tgbot.TgbotConnection(token), KEULII_TXT,
MOPOPOSTERPORT, QUOTES_DIR)
print(bot.conn.getMe())
bot.run()
if __name__ == '__main__':
main()
|
{
"content_hash": "bc644099b9a0d56dacbd36ad60385c32",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 111,
"avg_line_length": 35.56310679611651,
"alnum_prop": 0.5832650832650833,
"repo_name": "sooda/askibot-tg",
"id": "03be92ef03cbda9b76097d0a9120ea6f144639bc",
"size": "14747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "askibot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30742"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os.path
import shutil
from datetime import datetime
from subprocess import call
from util import render
from util import tempdir
from s3 import S3
class LambdaBuilder(object):
"""
Create and upload ZIP archive to S3 for AWS lambda function from CFN.
"""
def __init__(self, *zip_args, **zip_kwargs):
self._zip_args = zip_args
self._zip_kwargs = zip_kwargs
self._created = {} # "filename": "file body"
self._copied = {} # "filename": "source filename"
def create_file(self, file_body, path, render_mapping=None):
print(" * created `%s` from LambdaBuilder" % path)
self._created[path] = file_body if render_mapping is None else render(file_body, render_mapping)
return self
def append(self, file_name, path=None, render_mapping=None):
assert os.path.exists(file_name)
assert os.path.isfile(file_name) or not render_mapping
path = path or os.path.basename(file_name)
if render_mapping:
with open(file_name) as fp:
self.create_file(fp.read(), path, render_mapping)
else:
print(" * copied `%s` from LambdaBuilder" % path)
self._copied[path] = file_name
return self
def upload_to_s3(self, bucket_name, prefix=None, key_name=None):
"""
Upload composed archive (.zip) to S3
:param str prefix: without leading and trailing slashes
"""
s3 = S3()
prefix = prefix or "lambda-src"
key_name = key_name or datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ.zip")
uploaded_path = "%s/%s" % (prefix, key_name)
with tempdir() as td:
archive_name = self._compile(td.path, key_name)
s3.upload(archive_name, bucket_name, uploaded_path)
return uploaded_path
def _compile(self, temp_path, archive_name):
"""
Compiles to an zip archive and return its fullpath.
Uses temp_path as working directory.
:param temp_path:
:param archive_name:
:return: Fullpath to archive
"""
old_umask = os.umask(0o022)
build_path = os.path.join(temp_path, "build")
os.mkdir(build_path, 0o755)
archive_name = os.path.join(temp_path, archive_name)
try:
for filename, body in self._created.items():
filename = os.path.join(build_path, filename)
with open(filename, "wb") as fp:
fp.write(body)
for filename, source in self._copied.items():
filename = os.path.join(build_path, filename)
if os.path.isdir(source):
shutil.copytree(source, filename)
else:
shutil.copy(source, filename)
call(["chmod", "-R", "go+rX", temp_path])
call(["zip", "-qr", archive_name, ".", "-x", ".*", "*/.*", "__pycache__", "*/__pycache__"], cwd=build_path)
finally:
os.umask(old_umask)
return archive_name
|
{
"content_hash": "95a0a2bc0c6ffcf4f5b3422c25ab49a1",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 119,
"avg_line_length": 31.373737373737374,
"alnum_prop": 0.5724404378622022,
"repo_name": "clifflu/awscatalyst",
"id": "d32e140d4b6de51f62f03832f5c465a83f178f35",
"size": "3106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awscatalyst/awslambda.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35114"
}
],
"symlink_target": ""
}
|
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import multiprocessing
def create_and_start_process_with_queue(target_module, args_dict, jobs_list, output_q, p_name=''):
"""Creates python multiprocesses for the provided target module with the
provided arguments and starts them
Arguments:
1. target_module = module for which multiple processes has to be started
2. args_list = list of arguments to be passed to the target module
3. jobs_list = list of process created
4. output_q = multiprocessing.Queue object to handle returns from the target module
"""
# THis is to handle the first process when
# output_q wll be none,create a new q and use the
# same q for all instances of process started
if output_q is None:
# output_q = multiprocessing.JoinableQueue()
output_q = multiprocessing.Manager().Queue()
args_dict["output_q"] = output_q
# now we need to convert the args_dict into
# a tuple so first create a listout of the dict
# and then convert the list into a tuple
args_list = []
for _, value in args_dict.iteritems():
args_list.append(value)
args_tuple = tuple(args_list)
process = multiprocessing.Process(name=p_name, target=target_module, args=args_tuple)
jobs_list.append(process)
process.start()
return process, jobs_list, output_q
def get_results_from_queue(queue):
"""Get the result form the provided multiprocessing queue object """
result_list = []
for _ in range(queue.qsize()):
# print type(queue), queue.qsize()
result_list.append(queue.get())
return result_list
def update_ts_junit_resultfile(ts_junit_obj, tc_junit_list, ts_timestamp):
"""loop through tc_junit object and attach testcase result to testsuite
Arguments:
1. ts_junit_obj = target testsuite
2. tc_junit_list = list of testcase junit objects
"""
for master_ts in ts_junit_obj.root.iter('testsuite'):
# make sure we are modifying the correct testsuite
if master_ts.get('timestamp') == ts_timestamp:
for tc_junit_obj in tc_junit_list:
for ts_part in tc_junit_obj.root.iter('testsuite'):
# make sure we are obtaining only the wanted testcases
if ts_part.get('timestamp') == ts_timestamp:
# add testcase element to testsuite, update count
for tc in ts_part.iter('testcase'):
master_ts.append(tc)
master_ts.attrib = update_attribute(master_ts.attrib, ts_part.attrib)
return ts_junit_obj
def update_pj_junit_resultfile(pj_junit_obj, ts_junit_list):
"""loop through ts_junit object and attach suite result to project(testsuites)
:Arguments:
1. pj_junit_obj = target project
2. ts_junit_list = list of suite junit objects
"""
for ts_junit_obj in ts_junit_list:
for ts in ts_junit_obj.root.iter('testsuite'):
# append suite result to testsuites
pj_junit_obj.root.append(ts)
# update the count in testsuites attribute
pj_junit_obj.attrib = update_attribute(pj_junit_obj.root.attrib,
ts_junit_obj.root.attrib)
return pj_junit_obj
def update_attribute(dict1, dict2):
"""merge the count for 2 attribute dictionary
Arguments:
1. dict1 = target dict
2. dict2 = obtain count from this dict and put in dict1
"""
keys = ["errors", "failures", "skipped", "passes", "exceptions",
"keywords", 'tests', 'suites']
for key in keys:
if key in dict1 and key in dict2:
dict1[key] = str(int(dict1[key])+int(dict2[key]))
return dict1
def update_tc_junit_resultfile(tc_junit_obj, kw_junit_list, tc_timestamp):
"""loop through kw_junit object and attach keyword result to testcase
Arguments:
1. tc_junit_obj = target testcase
2. kw_junit_list = list of keyword junit objects
3. tc_timestamp = target testcase timestamp
"""
for master_tc in tc_junit_obj.root.iter('testcase'):
# make sure we are modifying the correct testcase
if master_tc.get('timestamp') == tc_timestamp:
for kw_junit_obj in kw_junit_list:
for tc_part in kw_junit_obj.root.iter('testcase'):
# make sure we are obtaining only the wanted keywords
if tc_part.get('timestamp') == tc_timestamp:
# add keyword element to testcase, add property result
# to properties, update count
for result in tc_part.find('properties').iter('property'):
if result.get('type') == "keyword":
master_tc.find('properties').append(result)
master_tc.attrib = update_attribute(master_tc.attrib, tc_part.attrib)
return tc_junit_obj
|
{
"content_hash": "8e3bb5e5e34e066aa52cb48b932d0dfb",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 98,
"avg_line_length": 40.82222222222222,
"alnum_prop": 0.6410814734168028,
"repo_name": "warriorframework/warriorframework",
"id": "014145465df120fff59d3d5bd10aa02c7fbf9531",
"size": "5511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "warrior/WarriorCore/multiprocessing_utils.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "226699"
},
{
"name": "HTML",
"bytes": "1971325"
},
{
"name": "JavaScript",
"bytes": "1488764"
},
{
"name": "Python",
"bytes": "4217003"
},
{
"name": "Shell",
"bytes": "914"
},
{
"name": "XSLT",
"bytes": "2391"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('devices', '0006_ups'),
]
operations = [
migrations.RemoveField(
model_name='ups',
name='id',
),
migrations.AlterField(
model_name='ups',
name='ip',
field=models.GenericIPAddressField(serialize=False, verbose_name=b'IP', primary_key=True),
),
]
|
{
"content_hash": "741d530a1ac68c4bca0dc614bae4e83b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 102,
"avg_line_length": 22.90909090909091,
"alnum_prop": 0.5654761904761905,
"repo_name": "lindseypack/NIM",
"id": "e0d7fc0a6a8cf0d6b2e5d5991f5670cf6b010b35",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devices/migrations/0007_auto_20140919_1741.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "55802"
},
{
"name": "HTML",
"bytes": "6204"
},
{
"name": "JavaScript",
"bytes": "114263"
},
{
"name": "Python",
"bytes": "63070"
}
],
"symlink_target": ""
}
|
"""
Module that enables the use of ensembles in NowTrade.
"""
import cPickle
from itertools import chain
import numpy as np
from sklearn.ensemble import RandomForestRegressor #, RandomForestClassifier
from nowtrade import logger
RANDOM_FOREST_REGRESSOR = 'Random Forest Regressor'
RANDOM_FOREST_CLASSIFIER = 'Random Forest Classifier'
EXTRA_TREES_REGRESSOR = 'Extra Trees Regressor'
EXTRA_TREES_CLASSIFIER = 'Extra Trees Classifier'
ADA_BOOST_REGRESSOR = 'Ada Boost Regressor'
ADA_BOOST_CLASSIFIER = 'Ada Boost Classifier'
GRADIENT_BOOSTING_REGRESSOR = 'Gradient Boosting Regressor'
GRADIENT_BOOSTING_CLASSIFIER = 'Gradient Boosting Classifier'
class UnknownEnsembleType(Exception):
"""
Exception used when an invalid ensemble type was specified.
"""
pass
def load(ensemble):
"""
Load a previously pickled ensemble.
"""
return cPickle.loads(ensemble)
def load_from_file(filename):
"""
Load an ensemble from a previous one saved to file.
"""
file_handler = open(filename, 'rb')
ensemble = cPickle.load(file_handler)
file_handler.close()
return ensemble
class Ensemble(object):
"""
The ensemble class does all the heavy lifting to incorporate sklearn
ensembles into the NowTrade ecosystem.
"""
def __init__(self, train_data, prediction_data, ensemble_type=RANDOM_FOREST_REGRESSOR):
self.train_data = train_data
self.prediction_data = prediction_data
self.ensemble_type = ensemble_type
self.ensemble = None
self.prediction_window = None
self.look_back_window = None
self.training_set = []
self.target_set = []
self.normalize = True
self.number_of_estimators = 150
self.max_depth = None
self.random_state = 0
self.min_samples_split = 2
self.number_of_jobs = 2
self.learning_rate = 1.0 # For Gradient Boosting
self.rating = None
self.feature_importances = None
self.logger = logger.Logger(self.__class__.__name__)
self.logger.info('train_data: %s prediction_data: %s ensemble_type: %s'
%(train_data, prediction_data, ensemble_type))
def save(self):
"""
Returns the pickled fitted ensemble as a string.
WARNING: Can be very big in size.
"""
return cPickle.dumps(self)
def save_to_file(self, filename):
"""
Saves an ensemble to file for later use.
WARNING: Can be very big in size.
"""
file_handler = open(filename, 'wb')
cPickle.dump(self, file_handler)
file_handler.close()
def build_ensemble(self, dataset, **kwargs):
"""
Builds an ensemble using the dataset provided.
Expected keyword args:
- 'normalize'
- 'prediction_window'
- 'look_back_window'
- 'number_of_estimators'
Optional keyword args:
- 'max_depth'
- 'random_state'
- 'min_samples_split'
- 'number_of_jobs'
- 'learning_rate'
@see: http://scikit-learn.org/0.15/modules/generated/sklearn.ensemble.\
RandomForestRegressor.html#sklearn.ensemble.RandomForestRegressor
"""
self.training_set = []
self.target_set = []
self.normalize = kwargs.get('normalize', True)
self.prediction_window = kwargs.get('prediction_window', 1)
self.look_back_window = kwargs.get('look_back_window', 10)
self.number_of_estimators = kwargs.get('number_of_estimators', 100)
self.max_depth = kwargs.get('max_depth', None)
self.random_state = kwargs.get('random_state', 0)
self.min_samples_split = kwargs.get('min_samples_split', 2)
self.number_of_jobs = kwargs.get('number_of_jobs', 1)
self.learning_rate = kwargs.get('learning_rate', 1.0)
if self.normalize:
training_values = np.log(dataset.data_frame[self.train_data])
#training_values.fillna(method='backfill', inplace=True)
results = \
np.log(dataset.data_frame[self.prediction_data[0]].shift(-self.prediction_window))
#results.fillna(method='backfill', inplace=True)
# Replace all 0's that have been log'ed to -inf with -999
# -999 is sufficient as np.exp(-999) brings it back to 0
training_values.replace(-np.inf, -999, inplace=True)
results.replace(-np.inf, -999, inplace=True)
else:
training_values = dataset.data_frame[self.train_data]
results = dataset.data_frame[self.prediction_data[0]].shift(-self.prediction_window)
for i in range(self.look_back_window, len(training_values)):
values = training_values[i-self.look_back_window:i+1]
values = list(chain.from_iterable(values.values))
result = results.iloc[i] # Prediction window already calculated with shift
if np.isnan(np.sum(values)) or np.isnan(np.sum(result)):
#assert False, 'NaN values found in data while preparing data to fit'
continue
if np.inf in values or -np.inf in values or result == np.inf or result == -np.inf:
#assert False, 'Infinite values found in data while preparing data to fit'
continue
self.training_set.append(values)
self.target_set.append(result)
# Need to get rid of the last few values that represent things we couldn't predict yet
# Need to shuffle Training/Target Sets
self.training_set = self.training_set[:-self.prediction_window]
self.target_set = self.target_set[:-self.prediction_window]
def fit(self, compute_importances=True):
"""
Fits the model as configured.
"""
assert len(self.training_set) > 0
assert len(self.target_set) > 0
if self.ensemble_type == RANDOM_FOREST_REGRESSOR:
self.ensemble = \
RandomForestRegressor(n_estimators=self.number_of_estimators,
max_depth=self.max_depth,
random_state=self.random_state,
min_samples_split=self.min_samples_split,
n_jobs=self.number_of_jobs)
else: raise UnknownEnsembleType()
self.ensemble.fit(self.training_set, self.target_set)
if compute_importances:
self.feature_importances = self.ensemble.feature_importances_
def _activate(self, data):
"""
Activates the ensemble using the data specified.
Returns the ensemble's prediction.
"""
#data.replace(-np.inf, -999, inplace=True)
if np.isnan(np.sum(data)):
#assert False, 'NaN values found in data while activating the ensemble'
return np.nan
if np.inf in data or -np.inf in data:
#assert False, 'Infinite values found in data while activating the ensemble'
return np.nan
return self.ensemble.predict(data)[0]
def activate_all(self, data_frame):
"""
Activates the network for all values in the dataframe specified.
"""
assert self.ensemble != None, 'Please ensure you have fit your ensemble'
if self.normalize:
dataframe = np.log(data_frame[self.train_data])
else:
dataframe = data_frame[self.train_data]
res = []
for i in range(self.look_back_window, len(dataframe)):
values = dataframe[i-self.look_back_window:i+1]
values = list(chain.from_iterable(values.values))
res.append(self._activate(values))
if self.normalize:
return np.exp(res)
else:
return res
|
{
"content_hash": "0b210c536686d3f2eac9235d2afaea64",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 98,
"avg_line_length": 41.046875,
"alnum_prop": 0.6108361883009771,
"repo_name": "edouardpoitras/NowTrade",
"id": "ec9a76dc9da064a897f1e0167bb4ceb748401e4c",
"size": "7881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nowtrade/ensemble.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "223131"
},
{
"name": "Shell",
"bytes": "386"
}
],
"symlink_target": ""
}
|
"""
Role
====
The ``MultiprocessPluginProxy`` is instanciated by the MultiprocessPluginManager to replace the real implementation
that is run in a different process.
You cannot access your plugin directly from the parent process. You should use the child_pipe to communicate
with your plugin. The `MultiprocessPluginProxy`` role is to keep reference of the communication pipe to the
child process as well as the process informations.
API
===
"""
from yapsy.IPlugin import IPlugin
class MultiprocessPluginProxy(IPlugin):
"""
This class contains two members that are initialized by the :doc:`MultiprocessPluginManager`.
self.proc is a reference that holds the multiprocessing.Process instance of the child process.
self.child_pipe is a reference that holds the multiprocessing.Pipe instance to communicate with the child.
"""
def __init__(self):
IPlugin.__init__(self)
self.proc = None # This attribute holds the multiprocessing.Process instance
self.child_pipe = None # This attribute holds the multiprocessing.Pipe instance
|
{
"content_hash": "41c64928db84a16e0f94aeb4b0b52de7",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 115,
"avg_line_length": 34.86666666666667,
"alnum_prop": 0.7829827915869981,
"repo_name": "WhySoGeeky/DroidPot",
"id": "9bd2da374ec7efe129fd01a60383ea4fd59a44b3",
"size": "1125",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/yapsy/MultiprocessPluginProxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "874"
},
{
"name": "C",
"bytes": "31005"
},
{
"name": "CSS",
"bytes": "791857"
},
{
"name": "HTML",
"bytes": "1896759"
},
{
"name": "JavaScript",
"bytes": "2509094"
},
{
"name": "Makefile",
"bytes": "2057"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "13513559"
},
{
"name": "Shell",
"bytes": "3886"
},
{
"name": "TeX",
"bytes": "57070"
}
],
"symlink_target": ""
}
|
from pythonforandroid.toolchain import Bootstrap, shprint, current_directory, info, warning, ArchAndroid, logger, info_main, which
from os.path import join, exists
from os import walk
import glob
import sh
class PygameBootstrap(Bootstrap):
name = 'pygame'
recipe_depends = ['hostpython2', 'python2', 'pyjnius', 'sdl', 'pygame',
'android', 'kivy']
def run_distribute(self):
info_main('# Creating Android project from build and {} bootstrap'.format(
self.name))
# src_path = join(self.ctx.root_dir, 'bootstrap_templates',
# self.name)
src_path = join(self.bootstrap_dir, 'build')
with current_directory(self.dist_dir):
info('Creating initial layout')
for dirname in ('assets', 'bin', 'private', 'res', 'templates'):
if not exists(dirname):
shprint(sh.mkdir, dirname)
info('Copying default files')
shprint(sh.cp, '-a', join(src_path, 'default.properties'), '.')
shprint(sh.cp, '-a', join(src_path, 'local.properties'), '.')
shprint(sh.cp, '-a', join(src_path, 'build.py'), '.')
shprint(sh.cp, '-a', join(src_path, 'buildlib'), '.')
shprint(sh.cp, '-a', join(src_path, 'src'), '.')
shprint(sh.cp, '-a', join(src_path, 'templates'), '.')
shprint(sh.cp, '-a', join(src_path, 'res'), '.')
shprint(sh.cp, '-a', join(src_path, 'blacklist.txt'), '.')
shprint(sh.cp, '-a', join(src_path, 'whitelist.txt'), '.')
info('Copying python distribution')
hostpython = sh.Command(self.ctx.hostpython)
# AND: This *doesn't* need to be in arm env?
shprint(hostpython, '-OO', '-m', 'compileall', join(self.ctx.build_dir, 'python-install'))
if not exists('python-install'):
shprint(sh.cp, '-a', join(self.ctx.build_dir, 'python-install'), '.')
info('Copying libs')
# AND: Hardcoding armeabi - naughty!
shprint(sh.mkdir, '-p', join('libs', 'armeabi'))
for lib in glob.glob(join(self.ctx.libs_dir, '*')):
shprint(sh.cp, '-a', lib, join('libs', 'armeabi'))
info('Copying java files')
for filename in glob.glob(join(self.ctx.build_dir, 'java', '*')):
shprint(sh.cp, '-a', filename, 'src')
info('Filling private directory')
if not exists(join('private', 'lib')):
shprint(sh.cp, '-a', join('python-install', 'lib'), 'private')
shprint(sh.mkdir, '-p', join('private', 'include', 'python2.7'))
# AND: Copylibs stuff should go here
shprint(sh.mv, join('libs', 'armeabi', 'libpymodules.so'), 'private/')
shprint(sh.cp, join('python-install', 'include' , 'python2.7', 'pyconfig.h'), join('private', 'include', 'python2.7/'))
info('Removing some unwanted files')
shprint(sh.rm, '-f', join('private', 'lib', 'libpython2.7.so'))
shprint(sh.rm, '-rf', join('private', 'lib', 'pkgconfig'))
with current_directory(join(self.dist_dir, 'private', 'lib', 'python2.7')):
# shprint(sh.xargs, 'rm', sh.grep('-E', '*\.(py|pyx|so\.o|so\.a|so\.libs)$', sh.find('.')))
removes = []
for dirname, something, filens in walk('.'):
for filename in filens:
for suffix in ('py', 'pyc', 'so.o', 'so.a', 'so.libs'):
if filename.endswith(suffix):
removes.append(filename)
shprint(sh.rm, '-f', *removes)
info('Deleting some other stuff not used on android')
# To quote the original distribute.sh, 'well...'
shprint(sh.rm, '-rf', 'ctypes')
shprint(sh.rm, '-rf', 'lib2to3')
shprint(sh.rm, '-rf', 'idlelib')
for filename in glob.glob('config/libpython*.a'):
shprint(sh.rm, '-f', filename)
shprint(sh.rm, '-rf', 'config/python.o')
shprint(sh.rm, '-rf', 'lib-dynload/_ctypes_test.so')
shprint(sh.rm, '-rf', 'lib-dynload/_testcapi.so')
info('Stripping libraries')
env = ArchAndroid(self.ctx).get_env()
strip = which('arm-linux-androideabi-strip', env['PATH'])
if strip is None:
warning('Can\'t find strip in PATH...')
strip = sh.Command(strip)
filens = shprint(sh.find, join(self.dist_dir, 'private'), join(self.dist_dir, 'libs'),
'-iname', '*.so', _env=env).stdout.decode('utf-8')
logger.info('Stripping libraries in private dir')
for filen in filens.split('\n'):
try:
strip(filen, _env=env)
except sh.ErrorReturnCode_1:
logger.debug('Failed to strip ' + 'filen')
super(PygameBootstrap, self).run_distribute()
bootstrap = PygameBootstrap()
|
{
"content_hash": "fd53e45f1a610956f480852881a4fc05",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 131,
"avg_line_length": 46.81651376146789,
"alnum_prop": 0.5263570448755633,
"repo_name": "inclement/python-for-android-revamp",
"id": "eae552155854ed23c85c8ff47e9c7372deb8181f",
"size": "5103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonforandroid/bootstraps/pygame/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "81220"
},
{
"name": "C++",
"bytes": "491"
},
{
"name": "Java",
"bytes": "353371"
},
{
"name": "Makefile",
"bytes": "24210"
},
{
"name": "Python",
"bytes": "1068171"
},
{
"name": "Shell",
"bytes": "16734"
}
],
"symlink_target": ""
}
|
def is_decreasing(seq):
decreasing = True
for i in range(len(seq) - 1):
if seq[i] <= seq[i + 1]:
decreasing = False
return decreasing
|
{
"content_hash": "b919496a9c848465dfa3c79de7074e7d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 33,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.5508982035928144,
"repo_name": "stoilov/Programming101",
"id": "844b81ed9f38443afd6e534a4faeacd906b8d261",
"size": "167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "week0/monday/is_decreasing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "510"
},
{
"name": "C++",
"bytes": "4288"
},
{
"name": "Go",
"bytes": "357"
},
{
"name": "Haskell",
"bytes": "221"
},
{
"name": "Java",
"bytes": "512"
},
{
"name": "JavaScript",
"bytes": "785"
},
{
"name": "Python",
"bytes": "121139"
}
],
"symlink_target": ""
}
|
from m5.params import *
from m5.SimObject import SimObject
class RubySystem(SimObject):
type = 'RubySystem'
random_seed = Param.Int(1234, "random seed used by the simulation");
randomization = Param.Bool(False,
"insert random delays on message enqueue times");
clock = Param.Clock('1GHz', "")
block_size_bytes = Param.Int(64,
"default cache block size; must be a power of two");
mem_size = Param.MemorySize("total memory size of the system");
stats_filename = Param.String("ruby.stats",
"file to which ruby dumps its stats")
no_mem_vec = Param.Bool(False, "do not allocate Ruby's mem vector");
# added by Ankit
num_cpus = Param.Int("number of CPUs passed as options");
# end added by Ankit
|
{
"content_hash": "d849c66eb1224195a4886fdbc03b35eb",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 72,
"avg_line_length": 42.166666666666664,
"alnum_prop": 0.6732542819499341,
"repo_name": "dpac-vlsi/SynchroTrace",
"id": "11663e27165ce04c8e8c1d8edc7c6e1f51177af9",
"size": "2348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mem/ruby/system/RubySystem.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "232078"
},
{
"name": "C",
"bytes": "1371174"
},
{
"name": "C++",
"bytes": "14015506"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "HTML",
"bytes": "273593"
},
{
"name": "Hack",
"bytes": "5230"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "Makefile",
"bytes": "13933"
},
{
"name": "Perl",
"bytes": "26383"
},
{
"name": "Python",
"bytes": "4265656"
},
{
"name": "Shell",
"bytes": "97839"
},
{
"name": "TeX",
"bytes": "19361"
},
{
"name": "Visual Basic",
"bytes": "5768"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import pytest
from django.core.exceptions import ValidationError
from machina.apps.forum.signals import forum_moved
from machina.core.db.models import get_model
from machina.test.context_managers import mock_signal_receiver
from machina.test.factories import PostFactory
from machina.test.factories import UserFactory
from machina.test.factories import build_category_forum
from machina.test.factories import build_link_forum
from machina.test.factories import create_category_forum
from machina.test.factories import create_forum
from machina.test.factories import create_link_forum
from machina.test.factories import create_topic
Forum = get_model('forum', 'Forum')
Post = get_model('forum_conversation', 'Post')
Topic = get_model('forum_conversation', 'Topic')
@pytest.mark.django_db
class TestForum(object):
@pytest.fixture(autouse=True)
def setup(self):
self.u1 = UserFactory.create()
# Set up top-level forums: a category, a default forum and a link forum
self.top_level_cat = create_category_forum()
self.top_level_forum = create_forum()
self.top_level_link = create_link_forum()
def test_has_a_margin_level_two_times_greater_than_its_real_level(self):
# Run
sub_level_forum = create_forum(parent=self.top_level_forum)
# Check
assert self.top_level_forum.margin_level == 0
assert sub_level_forum.margin_level == 2
def test_category_cannot_be_the_child_of_another_category(self):
# Run & check
with pytest.raises(ValidationError):
cat = build_category_forum(parent=self.top_level_cat)
cat.full_clean()
def test_can_not_be_the_child_of_a_forum_link(self):
# Run & check
for forum_type, _ in Forum.TYPE_CHOICES:
with pytest.raises(ValidationError):
forum = build_link_forum(parent=self.top_level_link)
forum.full_clean()
def test_must_have_a_link_in_case_of_a_link_forum(self):
# Run & check
with pytest.raises(ValidationError):
forum = Forum(parent=self.top_level_forum, name='sub_link_forum', type=Forum.FORUM_LINK)
forum.full_clean()
def test_saves_its_numbers_of_posts_and_topics(self):
# Run & check
topic = create_topic(forum=self.top_level_forum, poster=self.u1)
PostFactory.create(topic=topic, poster=self.u1)
PostFactory.create(topic=topic, poster=self.u1)
assert self.top_level_forum.direct_posts_count == topic.posts.filter(approved=True).count()
assert self.top_level_forum.direct_topics_count == self.top_level_forum.topics.count()
topic2 = create_topic(forum=self.top_level_forum, poster=self.u1, approved=False)
PostFactory.create(topic=topic2, poster=self.u1, approved=False)
assert self.top_level_forum.direct_posts_count == \
topic.posts.filter(approved=True).count() + topic2.posts.filter(approved=True).count()
assert self.top_level_forum.direct_topics_count == \
self.top_level_forum.topics.filter(approved=True).count()
def test_can_indicate_its_appartenance_to_a_forum_type(self):
# Run & check
assert self.top_level_cat.is_category
assert self.top_level_forum.is_forum
assert self.top_level_link.is_link
def test_stores_its_last_post_datetime(self):
# Setup
sub_level_forum = create_forum(parent=self.top_level_forum)
topic = create_topic(forum=sub_level_forum, poster=self.u1)
PostFactory.create(topic=topic, poster=self.u1)
# Run
p2 = PostFactory.create(topic=topic, poster=self.u1)
# Check
sub_level_forum.refresh_from_db()
assert sub_level_forum.last_post_on == p2.created
def test_can_reset_last_post_datetime_if_all_topics_have_been_deleted(self):
# Setup
sub_level_forum = create_forum(parent=self.top_level_forum)
topic = create_topic(forum=sub_level_forum, poster=self.u1)
PostFactory.create(topic=topic, poster=self.u1)
# Run
topic.delete()
# Check
sub_level_forum.refresh_from_db()
assert sub_level_forum.last_post_on is None
def test_can_send_a_specific_signal_when_a_forum_is_moved(self):
# Setup
topic = create_topic(forum=self.top_level_forum, poster=self.u1)
PostFactory.create(topic=topic, poster=self.u1)
PostFactory.create(topic=topic, poster=self.u1)
# Run & check
with mock_signal_receiver(forum_moved) as receiver:
self.top_level_forum.parent = self.top_level_cat
self.top_level_forum.save()
assert receiver.call_count == 1
def test_get_or_create(self):
forum, created = Forum.objects.get_or_create(name="Test Forum", type=0)
assert created is True
assert isinstance(forum, Forum)
assert forum.name == "Test Forum"
|
{
"content_hash": "4dee766250c1d6024b34d1c2293adfc2",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 100,
"avg_line_length": 42.25423728813559,
"alnum_prop": 0.672282390693943,
"repo_name": "reinbach/django-machina",
"id": "5d6cafe1ede88f4077f73be26d39fc16c1e48fed",
"size": "5011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/forum/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "28438"
},
{
"name": "HTML",
"bytes": "174154"
},
{
"name": "JavaScript",
"bytes": "7785"
},
{
"name": "Makefile",
"bytes": "592"
},
{
"name": "Python",
"bytes": "714564"
}
],
"symlink_target": ""
}
|
def test_stickers(appmarket):
wd = appmarket.wd
appmarket.navigation.open_home_page()
ducks = wd.find_elements_by_css_selector("div.image-wrapper")
for duck in ducks:
if len(duck.find_elements_by_xpath(".//*[starts-with(@class,'sticker')]")) != 1:
print(duck.find_element_by_css_selector("img").get_attribute("src"))
assert len(duck.find_elements_by_xpath(".//*[starts-with(@class,'sticker')]")) == 1 #assert len(duck.find_elements_by_css_selector("[class^=sticker]")) == 1
# можно так через css
|
{
"content_hash": "d35c56fabe54f2e1e8365518c9e750dc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 176,
"avg_line_length": 35.05263157894737,
"alnum_prop": 0.53003003003003,
"repo_name": "Dob3r/python_seleniumwebdriver",
"id": "c0be027d9c25581864877d5201490bad9fb15772",
"size": "681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/test_stickers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "29296"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import os
import sys
import astropy
try:
from sphinx_astropy.conf.v1 import * # noqa
except ImportError:
print('ERROR: the documentation requires the sphinx-astropy package to be installed')
sys.exit(1)
plot_rcparams = {}
plot_rcparams['figure.figsize'] = (6, 6)
plot_rcparams['savefig.facecolor'] = 'none'
plot_rcparams['savefig.bbox'] = 'tight'
plot_rcparams['axes.labelsize'] = 'large'
plot_rcparams['figure.subplot.hspace'] = 0.5
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ['png', 'svg', 'pdf']
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.7'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
check_sphinx_version("1.2.1")
# The intersphinx_mapping in astropy_helpers.sphinx.conf refers to astropy for
# the benefit of affiliated packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping['astropy']
# add any custom intersphinx for astropy
intersphinx_mapping['pytest'] = ('https://pytest.readthedocs.io/en/stable/', None)
intersphinx_mapping['ipython'] = ('https://ipython.readthedocs.io/en/stable/', None)
intersphinx_mapping['pandas'] = ('https://pandas.pydata.org/pandas-docs/stable/', None)
intersphinx_mapping['sphinx_automodapi'] = ('https://sphinx-automodapi.readthedocs.io/en/stable/', None)
intersphinx_mapping['packagetemplate'] = ('http://docs.astropy.org/projects/package-template/en/latest/', None)
intersphinx_mapping['h5py'] = ('http://docs.h5py.org/en/stable/', None)
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
exclude_patterns.append('_pkgtemplate.rst')
exclude_patterns.append('**/*.inc.rst') # .inc.rst mean *include* files, don't have sphinx process them
# Add any paths that contain templates here, relative to this directory.
if 'templates_path' not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
.. |minimum_python_version| replace:: {0.__minimum_python_version__}
.. |minimum_numpy_version| replace:: {0.__minimum_numpy_version__}
.. Astropy
.. _`Astropy mailing list`: https://mail.python.org/mailman/listinfo/astropy
.. _`astropy-dev mailing list`: http://groups.google.com/group/astropy-dev
""".format(astropy)
# -- Project information ------------------------------------------------------
project = u'Astropy'
author = u'The Astropy Developers'
copyright = u'2011–{0}, '.format(datetime.utcnow().year) + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = astropy.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = astropy.__version__
# -- Options for the module index ---------------------------------------------
modindex_common_prefix = ['astropy.']
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
#
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. The
# theme has options for controlling the text of the logo in the upper
# left corner. This is how you would specify the options in order to
# override the theme defaults (The following options *are* the
# defaults, so we do not actually need to set them here.)
#html_theme_options = {
# 'logotext1': 'astro', # white, semi-bold
# 'logotext2': 'py', # orange, light
# 'logotext3': ':docs' # white, light
# }
# A different theme can be used, or other parts of this theme can be
# modified, by overriding some of the variables set in the global
# configuration. The variables set in the global configuration are
# listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {
'to_be_indexed': ['stable', 'latest']
}
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
latex_logo = '_static/astropy_logo.pdf'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# Setting this URL is requited by sphinx-astropy
github_issues_url = 'https://github.com/astropy/astropy/issues/'
edit_on_github_branch = 'master'
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
nitpick_ignore = []
for line in open('nitpick-exceptions'):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import sphinx_gallery
extensions += ["sphinx_gallery.gen_gallery"]
sphinx_gallery_conf = {
'backreferences_dir': 'generated/modules', # path to store the module using example template
'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_"
'examples_dirs': '..{}examples'.format(os.sep), # path to the examples scripts
'gallery_dirs': 'generated/examples', # path to save gallery generated examples
'reference_url': {
'astropy': None,
'matplotlib': 'https://matplotlib.org/',
'numpy': 'http://docs.scipy.org/doc/numpy/',
},
'abort_on_example_error': True
}
# Filter out backend-related warnings as described in
# https://github.com/sphinx-gallery/sphinx-gallery/pull/564
warnings.filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
except ImportError:
def setup(app):
msg = ('The sphinx_gallery extension is not installed, so the '
'gallery will not be built. You will probably see '
'additional warnings about undefined references due '
'to this.')
try:
app.warn(msg)
except AttributeError:
# Sphinx 1.6+
from sphinx.util import logging
logger = logging.getLogger(__name__)
logger.warning(msg)
# -- Options for linkcheck output -------------------------------------------
linkcheck_retry = 5
linkcheck_ignore = ['https://journals.aas.org/manuscript-preparation/',
'https://maia.usno.navy.mil/',
'https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer',
'https://aa.usno.navy.mil/publications/docs/Circular_179.php',
r'https://github\.com/astropy/astropy/(?:issues|pull)/\d+']
linkcheck_timeout = 180
linkcheck_anchors = False
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ['robots.txt']
|
{
"content_hash": "12de2f9dabb838a99fcf9b5095e1eabc",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 111,
"avg_line_length": 39.06808510638298,
"alnum_prop": 0.6614747848818211,
"repo_name": "stargaser/astropy",
"id": "0914d286a3f76a32a350bd26b26d3d592969e375",
"size": "10669",
"binary": false,
"copies": "2",
"ref": "refs/heads/placeholder",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "444651"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9898387"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
import sqlite3
import gevent
from copy import copy
from gevent.event import Event
from gevent.queue import Queue
from shemutils.logger import Logger
from shemutils.database_errors import InvalidInitializer, InvalidSize
'# Define static variables for module'
INTEGER = "INTEGER"
TEXT = "TEXT"
NULL = "NULL"
class Controller(object):
"""
Controller object made to control execution operations
"""
def __init__(self, objects):
if len(objects) < 2:
del self
self.handle = objects[0]
self.cursor = objects[1]
self.logger = None
self.queue = Queue()
def _log(self, s):
if self.logger:
self.logger.debug(s)
return 0
def _error(self, s):
if self.logger:
self.logger.error(s)
return 0
def execute(self, sql):
"""
Function to execute a sql query generated by other methods from this module.
:param sql: string containing sql query
:return: void
"""
if type(sql) is not str:
return -1
try:
self._log("Executing SQL: {0}".format(sql))
self.cursor.execute(sql) # execute query passed to function
d = self.cursor.fetchall() # actual data returned by the query
n = len(d) # number of elements returned by the query
if n > 0:
self._log("Adding query result with {0} elements to queue ...".format(n))
self.queue.put(d)
else:
self._log("Speficied query does not returned any result.")
except sqlite3.OperationalError as e:
self._error("Error executing query: {0}".format(e))
raise sqlite3.OperationalError
self._log("Executed SQL successfully\n")
return
def get(self):
"""
Function to get the top element of result's queue.
:return: Queue top element, or -1 error
"""
try:
if self.queue.empty() is False:
return self.queue.get()
else:
return []
except gevent.hub.LoopExit:
self._error("Error retrieving data from queue")
return -1
class Database(object):
def __init__(self, db_name, verbose=False):
"""
Database object to control data storage
Procedures:
1. Database opening
2. Controller creation
:param db_name: string
"""
'# --------------------------------------- #'
'# Natural variables '
self.db_filename = self._parse_db_name(db_name)
self.open = Event() # this Flag is used to get database status concerning its ability to operate.
self.logger = Logger("DATABASE", logfile="%s.log" % db_name)
'# --------------------------------------- #'
'# Try to open database fd'
handle, cursor = self._open()
'# Give the handles and cursor to Controller Object'
self.controller = Controller((handle, cursor))
if verbose is not False:
self.controller.logger = self.logger
def _open(self):
try:
handle = sqlite3.connect(self.db_filename)
cursor = handle.cursor()
self.open.set()
return handle, cursor
except Exception as e:
self.logger.error("Error opening database: {0}".format(e))
return None
@staticmethod
def _parse_db_name(db_filename, extension=".db"):
k = len(db_filename)
if db_filename[k-3:] != extension:
db_filename += extension
return db_filename
def save(self):
return self.controller.handle.commit()
def close(self):
return self.controller.handle.close() if self.controller.handle is not None else None
class Table(object):
"""
To create a table use the following syntax:
t1 = Table("TableName", {"Name":TEXT, "Age":INTEGER})
"""
def __init__(self, name, columns):
self.name = str(name)
self.columns = columns if self._validate(columns) is 0 else None
self.num_col = len(self.columns) if self.columns is not None else None
@staticmethod
def _validate(columns):
if type(columns) != dict:
raise InvalidInitializer
return 0
def _colstr(self):
output = ""
if not self.columns:
return -1
for column in self.columns.keys():
p = self.columns[column]
output += "{0} {1}, ".format(p[0], p[1])
return output[:-2] + ")"
def create(self):
"""
:return: string containing SQL to construct the table
"""
return "CREATE TABLE IF NOT EXISTS {0} (id INTEGER PRIMARY KEY AUTOINCREMENT, {1}".format(self.name,
self._colstr())
def remove_row(self, c, k):
"""
:param c: string containing column name
:param k: string to search through table
:return: string containing SQL query to do the desired operation
"""
return "DELETE FROM {0} WHERE {1} = '{2}'".format(self.name, c, k)
def remove_rows(self, c, k):
"""
:param c: string containing column name
:param k: string to search through table
:return: string containing SQL query to do the desired operation
"""
return "DELETE FROM {0} WHERE {1} LIKE '%{2}%'".format(self.name, c, k)
def update_row(self, c2, k, c1, v):
"""
:param c1: string containing column to be updated
:param v: string containing new value for the row
:param c2: string containing column name for the query condition
:param k: string containing keyword value for the query condition
:return: string containing SQL query to do the desired operation
"""
return "UPDATE {0} SET {1} = '{2}' WHERE {3} LIKE '%{4}%'".format(self.name, c1, k, c2, v)
def search(self, t, k, c=None):
"""
Function to generate a sql query to retrieve information from a database.
:param t: String containing column name for the query condition
:param k: String containing keyword value for the query condition
:param c: None or string containing a list with column names desired for information retrieval
:return: string containing SQL query to do the desired operation
"""
if c is None: # all columns
c = "*"
else: # if specified any column list
if type(c) is not list:
raise TypeError("Variable 'c' must be 'list' type.")
k = copy(c)
c = str()
for p in k:
c += "{0},".format(p)
c = c[:-1]
return "SELECT {0} FROM {1} WHERE {2} LIKE '%{3}%'".format(c, self.name, t, k)
def insert_data(self, data):
"""
:param data: list containing data in the same number of elements that this table has -1 (for id column)
:return: string containing SQL query to do the desired operation
"""
if type(data) != list:
raise TypeError
return "INSERT INTO {0} VALUES (NULL, {1})".format(self.name, self._format_data(data))
def _format_data(self, data):
output = ""
if type(data) != list:
raise TypeError
if len(data) != self.num_col:
raise InvalidSize
for d in data:
output += "'{0}', ".format(d)
return output[:-2]
|
{
"content_hash": "eee3e5479398ef60b49be1cd836a1b0d",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 113,
"avg_line_length": 34.74885844748859,
"alnum_prop": 0.5587385019710907,
"repo_name": "0x00-0x00/shemutils",
"id": "aa17695bea3532a798fc692418fe2d9589a64bbd",
"size": "7610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30346"
},
{
"name": "Shell",
"bytes": "836"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include
from django.conf.urls import url
from rest_framework import routers
from .viewsets import LessonViewset
router = routers.SimpleRouter()
router.register(r"lesson", LessonViewset, base_name="lesson")
urlpatterns = [url(r"^", include(router.urls))]
|
{
"content_hash": "74a826dc04e3203903a41b995645deae",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 61,
"avg_line_length": 28.6,
"alnum_prop": 0.7832167832167832,
"repo_name": "mrpau/kolibri",
"id": "1429a7f6de132d40dc9fa2a3e3f8e342b9d630b0",
"size": "286",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "kolibri/core/lessons/api_urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "601"
},
{
"name": "CSS",
"bytes": "1716299"
},
{
"name": "Dockerfile",
"bytes": "7303"
},
{
"name": "Gherkin",
"bytes": "278074"
},
{
"name": "HTML",
"bytes": "26440"
},
{
"name": "JavaScript",
"bytes": "1537923"
},
{
"name": "Makefile",
"bytes": "13308"
},
{
"name": "Python",
"bytes": "2298911"
},
{
"name": "Shell",
"bytes": "11777"
},
{
"name": "Vue",
"bytes": "1558714"
}
],
"symlink_target": ""
}
|
"""
Functions for examining different file types.
"""
import struct
import binascii
def assert_eq(a, b):
assert a == b, "'%s' (%r) != '%s' (%r)" % (a, a, b, b)
class FlashBootImageFile(object):
"""
FlashBootImage (.fbi) file.
Used for firmware loaded from flash into main memory by the MiSoC/LiteX
BIOS.
Generate with something like;
mkmscimg -f firmware.bin -o firmware.fbi
python3 -m litex.soc.tools.mkmscimg -f firmware.bin -o firmware.fbi
Consists of;
* File Length - 32bits
* File CRC - 32bits
* File Data - bytes
"""
header = struct.Struct(
">" # big endian
"I" # flength
"I" # fcrc
)
def __init__(self, filename):
try:
assert filename.endswith('.fbi'), "Filename should end in .fbi"
f = open(filename, 'rb')
# Read the header
data = f.read(self.header.size)
flength, fcrc = self.header.unpack_from(data)
fdata = f.read(flength)
extradata = f.read()
assert len(extradata) == 0, "Extra data found ({} bytes)".format(
len(extradata))
ccrc = binascii.crc32(fdata)
assert_eq(fcrc, ccrc)
self.len = flength
self.crc = ccrc
except AssertionError as e:
raise TypeError(e)
def __str__(self):
return "{}(len={}, crc=0x{:x})".format(
self.__class__.__name__, self.len, self.crc)
class XilinxBitFile(object):
"""
This page describes the format
http://www.fpga-faq.com/FAQ_Pages/0026_Tell_me_about_bit_files.htm
Field 1
2 bytes length 0x0009 (big endian)
9 bytes 0f f0 0f f0 0f f0 0f f0 00
2 bytes 00 01
Field 3
1 byte key 0x61 (The letter "a")
2 bytes length 0x000a (value depends on file name length)
10 bytes string design name "xform.ncd" (including a trailing 0x00)
Field 4
1 byte key 0x62 (The letter "b")
2 bytes length 0x000c (value depends on part name length)
12 bytes string part name "v1000efg860" (including a trailing 0x00)
Field 4
1 byte key 0x63 (The letter "c")
2 bytes length 0x000b
11 bytes string date "2001/08/10" (including a trailing 0x00)
Field 5
1 byte key 0x64 (The letter "d")
2 bytes length 0x0009
9 bytes string time "06:55:04" (including a trailing 0x00)
Field 6
1 byte key 0x65 (The letter "e")
4 bytes length 0x000c9090 (value depends on device type,
and maybe design details)
"""
header = struct.Struct(
">" # big endian
"H" # h1, beshort == 0x0009
"9s" # 0f f0 0f f0 0f f0 0f f0 00
"2s" # h4, null byte
)
sfmt = struct.Struct(">ch")
@classmethod
def unpack_key(cls, f):
d = f.read(cls.sfmt.size)
key, slen = cls.sfmt.unpack(d)
s = f.read(slen - 1)
null = f.read(1)
assert_eq(null, b'\x00')
return key.decode('ascii'), s.decode('ascii')
def __init__(self, filename):
try:
assert filename.endswith('.bit'), "Filename should end in .bit"
f = open(filename, 'rb')
# Read the header
data = f.read(self.header.size)
(h1, h2, h3) = self.header.unpack_from(data)
assert_eq(h1, 0x0009)
assert_eq(h2, b'\x0f\xf0\x0f\xf0\x0f\xf0\x0f\xf0\x00')
assert_eq(h3, b'\x00\x01')
self.ncdname = None
self.part = None
self.date = None
while True:
key, value = self.unpack_key(f)
if key == 'a':
self.ncdname = value
elif key == 'b': # Part type
self.part = value
elif key == 'c': # Build date
self.date = value
elif key == 'd': # Build time
self.date += " " + value
break
elif key == 'e': # ????
break
assert self.ncdname
assert self.part
assert self.date
except AssertionError as e:
raise TypeError(e)
def __str__(self):
return "{}(ncdname={!r}, part={!r}, date={!r})".format(
self.__class__.__name__, self.ncdname, self.part, self.date)
class XilinxBinFile(object):
HEADER = b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xaa\x99Uf0\xa1\x00\x07' # noqa
def __init__(self, filename):
if not filename.endswith('.bin'):
raise TypeError("Filename should end in .bin")
hdr = open(filename, 'rb').read(len(self.HEADER))
if hdr != self.HEADER:
raise TypeError("File doesn't start with required header.")
if __name__ == "__main__":
import sys
fname = sys.argv[1]
if fname.endswith('.bin'):
print(XilinxBinFile(fname))
elif fname.endswith('.bit'):
print(XilinxBitFile(fname))
elif fname.endswith('.fbi'):
print(FlashBootImageFile(fname))
else:
sys.exit(1)
|
{
"content_hash": "f3984d7b78d8e6d8992c63fc0460f81e",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 111,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.5192883895131086,
"repo_name": "timvideos/HDMI2USB-mode-switch",
"id": "b3e44502df274b46e55f11e4cb0a13b92b3afc26",
"size": "5398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hdmi2usb/modeswitch/files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1808"
},
{
"name": "Makefile",
"bytes": "3452"
},
{
"name": "Python",
"bytes": "150202"
},
{
"name": "Shell",
"bytes": "1204"
}
],
"symlink_target": ""
}
|
"""This example pauses a line item.
Line items must be paused before they can be updated. To determine which line
items exist, run get_all_line_items.py
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the ID of the line item to pause.
LINE_ITEM_ID = 'INSERT_LINE_ITEM_ID_HERE'
def main(client):
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v201808')
# Create a statement to select the line item.
# Change this to operate on more than one Line Item.
statement = (ad_manager.StatementBuilder(version='v201808')
.Where('id = :id')
.OrderBy('id', ascending=True)
.WithBindVariable('id', LINE_ITEM_ID))
result_set_size = 0
should_continue = True
# Iterate over paged results from the statement.
while should_continue:
page = line_item_service.getLineItemsByStatement(statement.ToStatement())
if 'results' in page and len(page['results']):
result_set_size = page['totalResultSetSize']
# Iterate over individual results in the page.
for line_item in page['results']:
print 'Pausing line item with ID %d' % line_item['id']
# Update statement for next page.
statement.offset += statement.limit
should_continue = statement.offset < result_set_size
print 'Pausing %d line item(s)' % result_set_size
if result_set_size > 0:
statement.offset = None
statement.limit = None
# Perform Pause action on all Line Items that match the statement.
update_result = line_item_service.performLineItemAction(
{'xsi_type': 'PauseLineItems'}, statement.ToStatement())
if update_result and update_result['numChanges'] > 0:
print 'Updated %d line item(s)' % update_result['numChanges']
else:
print 'No line items were paused'
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
{
"content_hash": "6c307ae1d45a76d80013ab663252efe2",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 34.827586206896555,
"alnum_prop": 0.6905940594059405,
"repo_name": "Aloomaio/googleads-python-lib",
"id": "5a5a3873de185a9b490d7222c732724710e89890",
"size": "2642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ad_manager/v201808/line_item_service/pause_line_items.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "491015"
}
],
"symlink_target": ""
}
|
from dal.oracle.ae2 import db
from dal.oracle.common import execute_insert, execute_select
__author__ = 'Ahmed G. Ali'
def insert_publication(article):
id = execute_select('select SEQ_PUBLICATION.nextval*50 as id from dual', db)[0].id
sql = """INSERT INTO PUBLICATION
(
ID,
ACC,
DOI,
AUTHORLIST,
TITLE,
EDITOR,
ISSUE,
PAGES,
PUBLICATION,
PUBLISHER,
URI,
VOLUME,
PUBMEDID,
YEAR
)
VALUES(
'{id}',
'{PUBMED}',
'{DOI}',
'{AUTHORLIST}',
'{TITLE}',
'{EDITOR}',
'{ISSUE}',
'{PAGES}',
'{PUBLICATION}',
'{PUBLISHER}',
'{URI}',
'{VOLUME}',
'{PUBMED}',
'{YEAR}'
)""".format(
id=str(id),
PUBMED=article.get('id', ''),
DOI=article.get('doi', None),
AUTHORLIST=article.get('authorString', '').replace("'", '').encode('utf8'),
TITLE=article.get('title', '').replace("'", '').encode('utf8'),
EDITOR=article.get('editor', '').replace("'", '').encode('utf8'),
ISSUE=article.get('issue', None),
PAGES=article.get('pageInfo', None),
PUBLICATION=article.get('journalTitle', '').encode('utf8'),
PUBLISHER=article.get('publisher', '').encode('utf8'),
URI=article.get('uri', '').encode('utf8'),
VOLUME=article.get('journalVolume', ''),
YEAR=article.get('pubYear', ''))
execute_insert(sql, db)
return str(id)
def retrieve_pub(acc, pubmed):
sql = """SELECT * FROM PUBLICATION WHERE PUBMEDID ='{pubmed}' OR ACC='{acc}'""".format(acc=str(acc), pubmed=str(pubmed))
# sql = "SELECT * FROM PUBLICATION WHERE PUBMEDID ='{pubmed}'".format(acc=str(acc), pubmed=str(pubmed))
# print db
# sql = "SELECT * FROM PUBLICATION WHERE PUBMEDID ='6696735'"
return execute_select(sql, db)
def retrieve_publication_by_acc(acc):
sql = """SELECT * from PUBLICATION WHERE ACC = '%s'""" % str(acc)
return execute_select(sql, db)
def delete_publication_by_id(pub_id):
sql = """DELETE FROM PUBLICATION WHERE ID = %s""" % str(pub_id)
execute_insert(sql, db)
if __name__ == '__main__':
print retrieve_pub(acc=26273587, pubmed=20976176)
|
{
"content_hash": "efc0abf1f2e23a25caf03d4613d72b5a",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 125,
"avg_line_length": 36.05128205128205,
"alnum_prop": 0.44985775248933146,
"repo_name": "arrayexpress/ae_auto",
"id": "caed7e317d9c09eaa6b8fe51e71ae7c0992ee134",
"size": "2812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dal/oracle/ae2/publication.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "CSS",
"bytes": "596162"
},
{
"name": "HTML",
"bytes": "62396"
},
{
"name": "JavaScript",
"bytes": "605485"
},
{
"name": "Makefile",
"bytes": "1574"
},
{
"name": "PowerShell",
"bytes": "939"
},
{
"name": "Python",
"bytes": "2169498"
},
{
"name": "Ruby",
"bytes": "1030"
}
],
"symlink_target": ""
}
|
import os
import sys
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
def showURL( url ) :
if sys.platform == "darwin" :
os.system( "open \"" + url + "\"" )
else :
QtGui.QDesktopServices.openUrl( QtCore.QUrl( url, QtCore.QUrl.TolerantMode ) )
|
{
"content_hash": "036644eb82d9cad2acda6e6939d3d4db",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 80,
"avg_line_length": 21.285714285714285,
"alnum_prop": 0.6677852348993288,
"repo_name": "chippey/gaffer",
"id": "951287d2f41b43fd68ae8f0ac79cf50b799d2e63",
"size": "2158",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/GafferUI/ShowURL.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2258"
},
{
"name": "C++",
"bytes": "5420141"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "GLSL",
"bytes": "6250"
},
{
"name": "Objective-C",
"bytes": "2228"
},
{
"name": "Python",
"bytes": "5348174"
},
{
"name": "Shell",
"bytes": "8370"
},
{
"name": "Slash",
"bytes": "41159"
}
],
"symlink_target": ""
}
|
from admin_tools.dashboard import modules
from linkcheck.views import get_status_message
try:
from django.urls import reverse
except ImportError: # Django < 1.10
from django.core.urlresolvers import reverse
linkcheck_dashboard_module = modules.LinkList(
title="Linkchecker",
pre_content=get_status_message,
children=(
{'title': 'Valid links', 'url': reverse('linkcheck_report') + '?filters=show_valid'},
{'title': 'Broken links', 'url': reverse('linkcheck_report')},
{'title': 'Untested links', 'url': reverse('linkcheck_report') + '?filters=show_unchecked'},
{'title': 'Ignored links', 'url': reverse('linkcheck_report') + '?filters=ignored'},
)
)
|
{
"content_hash": "748cf87f7fa4ee5cc5225548657c9911",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 100,
"avg_line_length": 37.26315789473684,
"alnum_prop": 0.672316384180791,
"repo_name": "claudep/django-linkcheck",
"id": "4a7afcd7638bdfe53c003e377ff4f6cf301479e1",
"size": "708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linkcheck/dashboard.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "11357"
},
{
"name": "Python",
"bytes": "81297"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(r'^uploads$', 'books.views.uploads'),
url(r'^uploads/new$', 'books.views.new_upload'),
url(r'^uploads/(\d+)/edit$', 'books.views.edit_upload'),
url(r'^uploads/(\d+)/cover$', 'books.views.upload_cover'),
url(r'^uploads/(\d+)/cover/upload$', 'books.views.upload_cover_upload'),
url(r'^uploads/(\d+)/generate_torrents$', 'books.views.upload_generate_torrents'),
url(r'^uploads/(\d+)/what/upload', 'books.views.upload_to_what'),
url(r'^uploads/(\d+)/what/skip', 'books.views.skip_what'),
url(r'^uploads/(\d+)/bibliotik/skip', 'books.views.skip_bibliotik'),
)
|
{
"content_hash": "697e9b5e168e0dddce783381b6c0b6c2",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 86,
"avg_line_length": 47.92857142857143,
"alnum_prop": 0.639344262295082,
"repo_name": "davols/WhatManager2",
"id": "8cf78673e703fa4dcc49124318ccd095e5630267",
"size": "671",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "books/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "202604"
},
{
"name": "JavaScript",
"bytes": "711008"
},
{
"name": "Python",
"bytes": "310036"
},
{
"name": "Shell",
"bytes": "953"
}
],
"symlink_target": ""
}
|
from django.db import models
class ConnectionState(models.TextChoices):
ENABLED = "enabled", "Enabled"
DISABLED = "disabled", "Disabled"
|
{
"content_hash": "20c4392181ea4d33dbbe9ae0659dba50",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 42,
"avg_line_length": 24.5,
"alnum_prop": 0.7210884353741497,
"repo_name": "respawner/peering-manager",
"id": "65e3e6b342701d252586618edddc9cc848b4b18b",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "net/enums.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "147540"
},
{
"name": "HTML",
"bytes": "199981"
},
{
"name": "JavaScript",
"bytes": "691695"
},
{
"name": "Python",
"bytes": "1075126"
},
{
"name": "Shell",
"bytes": "3446"
}
],
"symlink_target": ""
}
|
from ..AbstractTree import AbstractTree
from .Node import Node
__author__ = "Luka Avbreht"
class AvlTree(AbstractTree):
"""
Clas that implements Avl tree on top of abstract tree class
"""
def __init__(self, data=None):
self.root = None
if data is not None:
for i in data:
self.insert(i)
super().__init__()
def height(self):
"""
Depth of tree (max depth)
"""
if self.root:
return self.root.depth
else:
return 0
def recompute_heights(self, start_from_node):
changed = True
node = start_from_node
while node and changed:
old_height = node.depth
node.depth = node._depth()
changed = node.depth != old_height
node = node.parent
def left_most(self, root_node):
"""returns the left most node in tree with root root_node """
node = root_node
while node.left is not None:
node = node.left
return node
def insert(self, T):
# So we insert into empty tree
adding_to = self.root
if adding_to is None:
self.root = Node(T)
self.root.depth = 1
return
else:
while True:
if T == adding_to.value:
adding_to.value = T
break
if T < adding_to.value:
if adding_to.left is not None:
adding_to = adding_to.left
else:
adding_to.left = Node(T,parent=adding_to)
if adding_to.depth == 0:
adding_to.depth = 1
adding_to = adding_to.left
break
elif T > adding_to.value:
if adding_to.right is not None:
adding_to = adding_to.right
else:
adding_to.right = Node(T,parent=adding_to)
if adding_to.depth == 0:
adding_to.depth = 1
adding_to = adding_to.right
break
while adding_to.parent is not None:
adding_to._depth()
adding_to = adding_to.parent
self.rebalance(adding_to)
adding_to._depth()
def remove(self, T):
"""
Removes item T form Avl tree
"""
subroot = self.root
i = 0
while i < self.root.depth+1:
if T == subroot.value:
break
elif T < subroot.value:
if subroot.left is not None:
subroot = subroot.left
else:
assert ValueError("no such element with value {0}".format(T))
break
else:
if subroot.right is not None:
subroot = subroot.right
else:
assert ValueError("no such element with value {0}".format(T))
break
rotatefrom = None
parentof = subroot.parent
if parentof is None:
camefrom = 0
elif parentof.right == subroot:
camefrom = 1
else:
camefrom = -1
if subroot.left is None:
if subroot.right is None:
if camefrom == 1:
parentof.right = None
self.recompute_heights(parentof)
rotatefrom = parentof
elif camefrom == -1:
parentof.left = None
self.recompute_heights(parentof)
rotatefrom = parentof
elif camefrom == 0:
self.root = None
else:
assert ValueError("od nikjer nismo prsli....")
else:
if camefrom == 1:
parentof.right = subroot.right
subroot.right.parent = parentof
self.recompute_heights(parentof.right)
rotatefrom = parentof.right
elif camefrom == -1:
parentof.left = subroot.right
subroot.right.parent = parentof
self.recompute_heights(parentof.left)
rotatefrom = parentof.left
elif camefrom == 0:
self.root = subroot.right
self.root.parent = None
self.recompute_heights(self.root)
rotatefrom = self.root
else:
assert ValueError("od nikjer nismo prsli....")
else:
# left is not None
if subroot.right is None:
if camefrom == 0:
self.root = subroot.left
self.root.parent = None
self.recompute_heights(self.root)
elif camefrom == 1:
parentof.right = subroot.left
subroot.left.parent = parentof
self.recompute_heights(parentof.right)
elif camefrom == -1:
parentof.left = subroot.left
subroot.left.parent = parentof
self.recompute_heights(parentof.left)
else:
assert ValueError("od nikjer nismo prsli....")
else:
najbollev = self.left_most(subroot.right)
if camefrom == 0:
self.root.value = najbollev.value
elif camefrom == 1 or camefrom == -1:
subroot.value = najbollev.value
else:
assert ValueError("od nikjer nismo prsli....")
if najbollev.parent.left == najbollev:
rotatefrom = najbollev.parent
if najbollev.right:
najbollev.parent.left = najbollev.right
najbollev.right.parent = najbollev.parent
else:
najbollev.parent.left = None
else:
rotatefrom = najbollev.parent
if najbollev.right:
najbollev.parent.right = najbollev.right
najbollev.right.parent = najbollev.parent
else:
najbollev.parent.right = None
if rotatefrom is not None:
while rotatefrom.parent is not None:
rotatefrom = rotatefrom.parent
rotatefrom._depth()
self.rebalance(rotatefrom)
self.rebalance(rotatefrom)
rotatefrom._depth()
def search(self, T):
"""
Returns True if T is an item of Avl tree
"""
if self.root is None:
return False
subroot = self.root
i = 0
while i < self.root.depth+1:
if T == subroot.value:
return True
elif T < subroot.value:
if subroot.left is not None:
subroot = subroot.left
else:
return False
else:
if subroot.right is not None:
subroot = subroot.right
else:
return False
i += 1
return False
def rebalance(self, RebalanceNode):
"""
Rebalances the tree with the root in RebalanceNode, it calls the sub method depending on type of rebalancing required
"""
if RebalanceNode.balance() < -1: # uresnici je -2
# left heavy
if RebalanceNode.left.balance() < 1:
# it is not right heavy
self.RRRotation(RebalanceNode)
else:
# it is left heavy
self.LRRotation(RebalanceNode)
elif RebalanceNode.balance() > 1: # uresici 2
if RebalanceNode.right.balance() > -1: # da je njegov desn 0 al pa 1
self.LLRotation(RebalanceNode)
else:
self.RLRotation(RebalanceNode)
def LLRotation(self, A):
"""
Does the Left-Left rebalancing od Subtree with root node RebalanceNode,
"""
parenttt = A.parent
B = A.right
C = B.right
assert (A is not None and B is not None and C is not None)
A.right = B.left
if A.right:
A.right.parent = A
B.left = A
A.parent = B
if parenttt is None:
# We are at the root
self.root = B
self.root.parent = None
else:
if parenttt.left == A:
parenttt.left = B
else:
parenttt.right = B
B.parent = parenttt
self.recompute_heights(A)
self.recompute_heights(B.parent)
def LRRotation(self, A):
"""
Does the Left-Right rebalancing od Subtree with root node RebalanceNode,
"""
parenttt = A.parent
B = A.left
C = B.right
assert (A is not None and B is not None and C is not None)
A.left = C.right
if A.left:
A.left.parent = A
B.right = C.left
if B.right:
B.right.parent = B
C.left = B
C.right = A
A.parent = C
B.parent = C
if parenttt is None:
self.root = C
else:
if parenttt.left == A:
parenttt.left = C
else:
parenttt.right = C
C.parent = parenttt
self.recompute_heights(A)
self.recompute_heights(B)
def RLRotation(self, A):
"""
Does the Right-Left rebalancing od Subtree with root node RebalanceNode,
"""
parenttt = A.parent
B = A.right
C = B.left
assert (A is not None and B is not None and C is not None)
A.right = C.left
if A.right:
A.right.parent = A
B.left = C.right
if B.left:
B.left.parent = B
C.right = B
C.left = A
A.parent = C
B.parent = C
if parenttt is None:
self.root = C
else:
if parenttt.left == A:
parenttt.left = C
else:
parenttt.right = C
C.parent = parenttt
self.recompute_heights(A)
self.recompute_heights(B)
def RRRotation(self, A):
"""
Does the Right-Right rebalancing od Subtree with root node RebalanceNode,
"""
parenttt = A.parent
B = A.left
C = B.left
assert (A is not None and B is not None and C is not None)
A.left = B.right
if A.left:
A.left.parent = A
B.right = A
A.parent = B
if parenttt is None:
# We are at the root
self.root = B
self.root.parent = None
else:
if parenttt.right == A:
parenttt.right = B
else:
parenttt.left = B
B.parent = parenttt
self.recompute_heights(A)
self.recompute_heights(B.parent)
def __str__(self):
if self.root is None:
return "None"
return str(self.root)
|
{
"content_hash": "282b5a28c32447d90046aefa73014772",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 125,
"avg_line_length": 33.24418604651163,
"alnum_prop": 0.4681706890521161,
"repo_name": "jO-Osko/PSA2",
"id": "1f5c2dc0124d5a6421f50de24c34cb7fabc82aa3",
"size": "11436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "naloge/dn1/tree/LukaAvbreht/AvlTree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "32"
},
{
"name": "Python",
"bytes": "118929"
},
{
"name": "Shell",
"bytes": "33"
}
],
"symlink_target": ""
}
|
"""
Module holds base stuff regarding JMX format
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import traceback
from cssselect import GenericTranslator
from lxml import etree
from urllib import parse
from bzt import TaurusInternalException, TaurusConfigError
from bzt.engine import Scenario
from bzt.utils import BetterDict, iteritems, numeric_types
from bzt.requests_model import has_variable_pattern
LOG = logging.getLogger("")
def try_convert(val, func=int, default=None):
if val is None:
res = val
elif has_variable_pattern(val): # it's property...
if default is not None:
val = get_prop_default(val) or default
res = func(val)
else:
res = val
else:
res = func(val)
return res
def get_prop_default(val):
comma_ind = val.find(",")
comma_found = comma_ind > -1
is_property = val.startswith("${__property(") or val.startswith("${__P(")
if has_variable_pattern(val) and is_property and comma_found:
return val[comma_ind + 1: -2]
else:
return None
def cond_int(val):
if isinstance(val, float):
return int(val)
return val
def cond_float(val, rounding=None):
if isinstance(val, numeric_types):
return round(float(val), rounding) if rounding is not None else float(val)
return val
class JMX(object):
"""
A class to manipulate and generate JMX test plans for JMeter
:param original: path to existing JMX to load. If it is None, then creates
empty test plan
"""
TEST_PLAN_SEL = "jmeterTestPlan>hashTree>hashTree"
THR_GROUP_SEL = TEST_PLAN_SEL + ">hashTree[type=tg]"
THR_TIMER = "kg.apc.jmeter.timers.VariableThroughputTimer"
SET_VAR_ACTION = "kg.apc.jmeter.control.sampler.SetVariablesAction"
def __init__(self, original=None, test_plan_name="BZT Generated Test Plan"):
self.log = logging.getLogger(self.__class__.__name__)
if original:
self.load(original)
else:
root = etree.Element("jmeterTestPlan")
self.tree = etree.ElementTree(root)
test_plan = etree.Element("TestPlan", guiclass="TestPlanGui",
testname=test_plan_name,
testclass="TestPlan", enabled="true")
htree = etree.Element("hashTree")
htree.append(test_plan)
htree.append(etree.Element("hashTree"))
self.append("jmeterTestPlan", htree)
element_prop = self._get_arguments_panel("TestPlan.user_defined_variables")
self.append("jmeterTestPlan>hashTree>TestPlan", element_prop)
def load(self, original):
"""
Load existing JMX file
:param original: JMX file path
:raise TaurusInternalException: in case of XML parsing error
"""
try:
self.tree = etree.ElementTree()
self.tree.parse(original)
except BaseException as exc:
msg = "XML parsing failed for file %s: %s"
raise TaurusInternalException(msg % (original, exc))
def get(self, selector):
"""
Returns tree elements by CSS selector
:type selector: str
:return:
"""
expression = GenericTranslator().css_to_xpath(selector)
nodes = self.tree.xpath(expression)
return nodes
def append(self, selector, node):
"""
Add node to container specified by selector. If multiple nodes will
match the selector, first of them will be used as container.
:param selector: CSS selector for container
:param node: Element instance to add
:raise TaurusInternalException: if container was not found
"""
container = self.get(selector)
if not len(container):
msg = "Failed to find TestPlan node in file: %s"
raise TaurusInternalException(msg % selector)
container[0].append(node)
def save(self, filename):
"""
Save JMX into file
:param filename:
"""
self.log.debug("Saving JMX to: %s", filename)
with open(filename, "wb") as fhd:
self.tree.write(fhd, pretty_print=True, encoding="UTF-8", xml_declaration=True)
@staticmethod
def _flag(flag_name, bool_value):
"""
Generates element for JMX flag node
:param flag_name:
:param bool_value:
:return:
"""
elm = etree.Element(flag_name)
elm.text = "true" if bool_value else "false"
return elm
@staticmethod
def __jtl_writer(filename, label, flags):
"""
Generates JTL writer
:param filename:
:return:
"""
jtl = etree.Element("stringProp", {"name": "filename"})
jtl.text = filename
name = etree.Element("name")
name.text = "saveConfig"
value = etree.Element("value")
value.set("class", "SampleSaveConfiguration")
for key, val in iteritems(flags):
value.append(JMX._flag(key, val))
obj_prop = etree.Element("objProp")
obj_prop.append(name)
obj_prop.append(value)
listener = etree.Element("ResultCollector",
testname=label,
testclass="ResultCollector",
guiclass="SimpleDataWriter")
listener.append(jtl)
listener.append(obj_prop)
return listener
@staticmethod
def new_kpi_listener(filename, flag_overrides=None):
"""
Generates listener for writing basic KPI data in CSV format
:param filename:
:return:
"""
defaults = {
"xml": False,
"fieldNames": True,
"time": True,
"timestamp": True,
"latency": True,
"connectTime": True,
"success": True,
"label": True,
"code": True,
"message": True,
"threadName": True,
"dataType": False,
"encoding": False,
"assertions": False,
"subresults": False,
"responseData": False,
"samplerData": False,
"responseHeaders": False,
"requestHeaders": False,
"responseDataOnError": False,
"saveAssertionResultsFailureMessage": False,
"bytes": True,
"hostname": True,
"threadCounts": True,
"url": False
}
flags = BetterDict.from_dict(defaults)
if flag_overrides:
flags.merge(flag_overrides)
return JMX.__jtl_writer(filename, "KPI Writer", flags)
@staticmethod
def new_xml_listener(filename, is_full, user_flags):
"""
:param is_full: bool
:param filename: str
:param user_flags: BetterDict
:return:
"""
default_flags = {
"xml": True,
"fieldNames": True,
"time": True,
"timestamp": True,
"latency": True,
"success": True,
"label": True,
"code": True,
"message": True,
"threadName": True,
"dataType": True,
"encoding": True,
"assertions": True,
"subresults": True,
"responseData": False,
"samplerData": False,
"responseHeaders": True,
"requestHeaders": True,
"responseDataOnError": True,
"saveAssertionResultsFailureMessage": True,
"bytes": True,
"threadCounts": True,
"url": True
}
flags = BetterDict.from_dict(default_flags)
flags.merge(user_flags)
if is_full:
writer = JMX.__jtl_writer(filename, "Trace Writer", flags)
else:
writer = JMX.__jtl_writer(filename, "Errors Writer", flags)
writer.append(JMX._bool_prop("ResultCollector.error_logging", True))
return writer
@staticmethod
def _get_arguments_panel(name):
"""
Generates ArgumentsPanel node
:param name:
:return:
"""
return etree.Element("elementProp", name=name, elementType="Arguments",
guiclass="ArgumentsPanel", testclass="Arguments")
@staticmethod
def get_auth_manager(authorizations, clear_flag):
mgr = etree.Element("AuthManager", guiclass="AuthPanel", testclass="AuthManager",
testname="HTTP Authorization Manager")
if clear_flag:
mgr.append(JMX._bool_prop("AuthManager.clearEachIteration", True))
auth_coll = JMX._collection_prop("AuthManager.auth_list")
mgr.append(auth_coll)
for authorization in authorizations:
auth_element = JMX._element_prop(name="", element_type="Authorization")
conf_url = authorization.get("url", "")
conf_name = authorization.get("name", "")
conf_pass = authorization.get("password", "")
conf_domain = authorization.get("domain", "")
conf_realm = authorization.get("realm", "")
conf_mech = authorization.get("mechanism", "").upper()
if not (conf_name and conf_pass and (conf_url or conf_domain)):
LOG.warning("Wrong authorization: %s" % authorization)
continue
auth_element.append(JMX._string_prop("Authorization.url", conf_url))
auth_element.append(JMX._string_prop("Authorization.username", conf_name))
auth_element.append(JMX._string_prop("Authorization.password", conf_pass))
auth_element.append(JMX._string_prop("Authorization.domain", conf_domain))
auth_element.append(JMX._string_prop("Authorization.realm", conf_realm))
if conf_mech == "KERBEROS": # optional prop
auth_element.append(JMX._string_prop("Authorization.mechanism", "KERBEROS"))
auth_coll.append(auth_element)
return mgr
@staticmethod
def _get_http_request(url, label, method, timeout, body, keepalive, files=(), encoding=None, follow_redirects=True,
use_random_host_ip=False, host_ips=()):
"""
Generates HTTP request
:type method: str
:type label: str
:type url: str
:rtype: lxml.etree.Element
"""
proxy = etree.Element("HTTPSamplerProxy", guiclass="HttpTestSampleGui", testclass="HTTPSamplerProxy")
proxy.set("testname", label)
args = JMX._get_arguments_panel("HTTPsampler.Arguments")
if isinstance(body, str):
JMX.__add_body_from_string(args, body, proxy)
elif isinstance(body, dict):
JMX.__add_body_from_script(args, body, proxy)
elif body:
msg = "Cannot handle 'body' option of type %s: %s"
raise TaurusInternalException(msg % (type(body), body))
parsed_url = parse.urlparse(url)
JMX.__add_hostnameport_2sampler(parsed_url, proxy, url)
path = parsed_url.path
if parsed_url.params:
path += ";" + parsed_url.params
if parsed_url.query:
path += "?" + parsed_url.query
proxy.append(JMX._string_prop("HTTPSampler.path", path))
proxy.append(JMX._string_prop("HTTPSampler.method", method))
proxy.append(JMX._bool_prop("HTTPSampler.use_keepalive", keepalive))
proxy.append(JMX._bool_prop("HTTPSampler.follow_redirects", follow_redirects))
proxy.append(JMX._bool_prop("HTTPSampler.auto_redirects", False))
if timeout:
proxy.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout))
proxy.append(JMX._string_prop("HTTPSampler.response_timeout", timeout))
if encoding is not None:
proxy.append(JMX._string_prop("HTTPSampler.contentEncoding", encoding))
proxy.extend(JMX.get_files_elements(files))
if use_random_host_ip and host_ips:
if len(host_ips) > 1:
expr = "${__chooseRandom(%s,randomAddr)}" % ",".join(host_ips)
else:
expr = host_ips[0]
proxy.append(JMX._string_prop("HTTPSampler.ipSource", expr))
return proxy
@staticmethod
def get_files_elements(files):
elements = []
if files:
files_prop = JMX._element_prop("HTTPsampler.Files", "HTTPFileArgs")
elements.append(files_prop)
files_coll = JMX._collection_prop("HTTPFileArgs.files")
for file_dict in files:
file_elem = JMX._element_prop(file_dict.get("path", ""), "HTTPFileArg")
file_elem.append(JMX._string_prop("File.path", file_dict.get("path", "")))
file_elem.append(JMX._string_prop("File.paramname", file_dict.get("param", "")))
file_elem.append(JMX._string_prop("File.mimetype", file_dict.get("mime-type", "")))
files_coll.append(file_elem)
files_prop.append(files_coll)
return elements
@staticmethod
def get_keystore_config_elements(variable_name, start_index, end_index, preload):
elements = []
if variable_name:
elements = etree.Element("KeystoreConfig", guiclass="TestBeanGUI", testclass="KeystoreConfig",
testname="Taurus-Keystore-Configuration")
elements.append(JMX._string_prop("clientCertAliasVarName", variable_name))
elements.append(JMX._string_prop("startIndex", start_index))
elements.append(JMX._string_prop("endIndex", end_index))
elements.append(JMX._string_prop("preload", preload))
return elements
@staticmethod
def __add_body_from_string(args, body, proxy):
proxy.append(JMX._bool_prop("HTTPSampler.postBodyRaw", True))
coll_prop = JMX._collection_prop("Arguments.arguments")
header = JMX._element_prop("elementProp", "HTTPArgument")
try:
header.append(JMX._string_prop("Argument.value", body))
except ValueError:
LOG.warning("Failed to set body: %s", traceback.format_exc())
header.append(JMX._string_prop("Argument.value", "BINARY-STUB"))
coll_prop.append(header)
args.append(coll_prop)
proxy.append(args)
@staticmethod
def __add_body_from_script(args, body, proxy):
http_args_coll_prop = JMX._collection_prop("Arguments.arguments")
for arg_name, arg_value in body.items():
try:
http_element_prop = JMX._element_prop(arg_name, "HTTPArgument")
except ValueError:
LOG.warning("Failed to get element property: %s", traceback.format_exc())
http_element_prop = JMX._element_prop('BINARY-STUB', "HTTPArgument")
try:
http_element_prop.append(JMX._string_prop("Argument.name", arg_name))
except ValueError:
LOG.warning("Failed to set arg name: %s", traceback.format_exc())
http_element_prop.append(JMX._string_prop("Argument.name", "BINARY-STUB"))
try:
http_element_prop.append(
JMX._string_prop("Argument.value", arg_value if arg_value is not None else ''))
except ValueError:
LOG.warning("Failed to set arg name: %s", traceback.format_exc())
http_element_prop.append(JMX._string_prop("Argument.value", "BINARY-STUB"))
http_element_prop.append(JMX._bool_prop("HTTPArgument.always_encode", True))
use_equals = arg_value is not None
http_element_prop.append(JMX._bool_prop("HTTPArgument.use_equals", arg_value is not None))
http_element_prop.append(JMX._string_prop("Argument.metadata", '=' if use_equals else ''))
http_args_coll_prop.append(http_element_prop)
args.append(http_args_coll_prop)
proxy.append(args)
@staticmethod
def __add_hostnameport_2sampler(parsed_url, proxy, url):
if parsed_url.scheme:
proxy.append(JMX._string_prop("HTTPSampler.protocol", parsed_url.scheme))
if parsed_url.netloc:
netloc_parts = parsed_url.netloc.split(':')
if netloc_parts[0]:
proxy.append(JMX._string_prop("HTTPSampler.domain", netloc_parts[0]))
if len(netloc_parts) > 1 and netloc_parts[1]:
proxy.append(JMX._string_prop("HTTPSampler.port", netloc_parts[1]))
else:
try:
if parsed_url.port:
proxy.append(JMX._string_prop("HTTPSampler.port", parsed_url.port))
else:
proxy.append(JMX._string_prop("HTTPSampler.port", ""))
except ValueError:
LOG.debug("Non-parsable port: %s", url)
proxy.append(JMX._string_prop("HTTPSampler.port", ""))
@staticmethod
def _element_prop(name, element_type):
"""
Generates element property node
:param name:
:param element_type:
:return:
"""
res = etree.Element("elementProp", name=name, elementType=element_type)
return res
@staticmethod
def _collection_prop(name):
"""
Adds Collection prop
:param name:
:return:
"""
res = etree.Element("collectionProp", name=name)
return res
@staticmethod
def _string_prop(name, value):
"""
Generates string property node
:param name:
:param value:
:return:
"""
res = etree.Element("stringProp", name=name)
res.text = str(value)
return res
@staticmethod
def _long_prop(name, value):
"""
Generates long property node
:param name:
:param value:
:return:
"""
res = etree.Element("longProp", name=name)
res.text = str(value)
return res
@staticmethod
def _bool_prop(name, value):
"""
Generates boolean property
:param name:
:param value:
:return:
"""
res = etree.Element("boolProp", name=name)
res.text = 'true' if value else 'false'
return res
@staticmethod
def int_prop(name, value):
"""
JMX int property
:param name:
:param value:
:return:
"""
res = etree.Element("intProp", name=name)
res.text = str(value)
return res
@staticmethod
def get_thread_group(concurrency=None, rampup=0, hold=0, iterations=None,
testname="ThreadGroup", on_error="continue", thread_delay=False, scheduler_delay=None):
"""
Generates ThreadGroup
Expected values (by JMeter):
ThreadGroup.num_threads (concurrency): int
ThreadGroup.ramp_time (rampup): int
ThreadGroup.scheduler (need to hold): boolean
ThreadGroup.duration (rampup + hold): int
LoopController.loops (iterations): int
ThreadGroup.delayedStart: boolean
:return: etree element, ThreadGroup
"""
rampup = cond_int(rampup or 0)
hold = cond_int(hold or 0)
if concurrency is None:
concurrency = 1
if isinstance(concurrency, numeric_types) and concurrency <= 0:
enabled = "false"
else:
enabled = "true"
if not hold:
duration = rampup
elif not rampup:
duration = hold
elif isinstance(rampup, numeric_types) and isinstance(hold, numeric_types):
duration = hold + rampup
else:
duration = "${__intSum(%s,%s)}" % (rampup, hold)
trg = etree.Element("ThreadGroup", guiclass="ThreadGroupGui",
testclass="ThreadGroup", testname=testname, enabled=enabled)
if not iterations:
if duration:
iterations = -1
else:
iterations = 1
scheduler = False
if hold or (rampup and (iterations == -1)):
scheduler = True
if on_error is not None:
trg.append(JMX._string_prop("ThreadGroup.on_sample_error", on_error))
loop = etree.Element("elementProp",
name="ThreadGroup.main_controller",
elementType="LoopController",
guiclass="LoopControlPanel",
testclass="LoopController")
# 'true' causes endless execution of TG in non-gui mode
loop.append(JMX._bool_prop("LoopController.continue_forever", False))
loop.append(JMX._string_prop("LoopController.loops", iterations))
trg.append(loop)
trg.append(JMX._string_prop("ThreadGroup.num_threads", concurrency))
trg.append(JMX._string_prop("ThreadGroup.ramp_time", rampup))
trg.append(JMX._string_prop("ThreadGroup.start_time", ""))
trg.append(JMX._string_prop("ThreadGroup.end_time", ""))
trg.append(JMX._bool_prop("ThreadGroup.scheduler", scheduler))
trg.append(JMX._string_prop("ThreadGroup.duration", duration))
if scheduler_delay:
trg.append(JMX._string_prop("ThreadGroup.delay", scheduler_delay))
if thread_delay:
trg.append(JMX._bool_prop("ThreadGroup.delayedStart", thread_delay))
return trg
def get_rps_shaper(self):
"""
:return: etree.Element
"""
throughput_timer_element = etree.Element(self.THR_TIMER,
guiclass=self.THR_TIMER + "Gui",
testclass=self.THR_TIMER,
testname="Throughput_Limiter",
enabled="true")
shaper_load_prof = self._collection_prop("load_profile")
throughput_timer_element.append(shaper_load_prof)
return throughput_timer_element
def add_rps_shaper_schedule(self, shaper_etree, start_rps, end_rps, duration):
"""
Adds schedule to rps shaper
Expected values (by JMeter):
<first> ('start_rps'): float
<second> ('end_rps'): float
<third> ('duration'): int
"""
shaper_collection = shaper_etree.find(".//collectionProp[@name='load_profile']")
coll_prop = self._collection_prop("")
start_rps_prop = self._string_prop("", cond_float(start_rps, 3))
end_rps_prop = self._string_prop("", cond_float(end_rps, 3))
duration_prop = self._string_prop("", cond_int(duration))
coll_prop.append(start_rps_prop)
coll_prop.append(end_rps_prop)
coll_prop.append(duration_prop)
shaper_collection.append(coll_prop)
@staticmethod
def get_set_var_action(udv_dict, testname="Variables from Taurus"):
"""
:type testname: str
:type udv_dict: dict[str,str]
:rtype: etree.Element
"""
udv_element = etree.Element(JMX.SET_VAR_ACTION, guiclass=JMX.SET_VAR_ACTION + "Gui",
testclass=JMX.SET_VAR_ACTION, testname=testname)
arg_element = etree.Element("elementProp", name="SetVariablesAction", guiclass="ArgumentsPanel",
testclass="Arguments", testname="User Defined Variables", elementType="Arguments")
udv_element.append(arg_element)
udv_collection_prop = JMX._collection_prop("Arguments.arguments")
arg_element.append(udv_collection_prop)
for var_name in sorted(udv_dict.keys(), key=str):
udv_element_prop = JMX._element_prop(name=str(var_name), element_type="Argument")
udv_collection_prop.append(udv_element_prop)
udv_arg_name_prop = JMX._string_prop("Argument.name", var_name)
udv_arg_value_prop = JMX._string_prop("Argument.value", udv_dict[var_name])
udv_arg_meta_prop = JMX._string_prop("Argument.metadata", "=")
udv_element_prop.append(udv_arg_name_prop)
udv_element_prop.append(udv_arg_value_prop)
udv_element_prop.append(udv_arg_meta_prop)
return udv_element
@staticmethod
def add_user_def_vars_elements(udv_dict, testname="Variables from Taurus"):
"""
:type testname: str
:type udv_dict: dict[str,str]
:rtype: etree.Element
"""
udv_element = etree.Element("Arguments", guiclass="ArgumentsPanel", testclass="Arguments",
testname=testname)
udv_collection_prop = JMX._collection_prop("Arguments.arguments")
for var_name in sorted(udv_dict.keys(), key=str):
udv_element_prop = JMX._element_prop(str(var_name), "Argument")
udv_arg_name_prop = JMX._string_prop("Argument.name", var_name)
udv_arg_value_prop = JMX._string_prop("Argument.value", udv_dict[var_name])
udv_arg_desc_prop = JMX._string_prop("Argument.desc", "")
udv_arg_meta_prop = JMX._string_prop("Argument.metadata", "=")
udv_element_prop.append(udv_arg_name_prop)
udv_element_prop.append(udv_arg_value_prop)
udv_element_prop.append(udv_arg_desc_prop)
udv_element_prop.append(udv_arg_meta_prop)
udv_collection_prop.append(udv_element_prop)
udv_element.append(udv_collection_prop)
return udv_element
@staticmethod
def get_concurrency_thread_group(concurrency=None, rampup=0, hold=0, steps=None, on_error="continue",
testname="ConcurrencyThreadGroup", iterations=""):
"""
Generates ConcurrencyThreadGroup
Expected values (by JMeter):
Targetlevel (concurrency): int
RampUp (rampup): float
Steps (steps): boolean
Hold (hold): float
:return: etree element, Concurrency Thread Group
"""
if not rampup:
rampup = 0
if concurrency is None:
concurrency = 1
if isinstance(concurrency, numeric_types) and concurrency <= 0:
enabled = "false"
else:
enabled = "true"
if steps is None: # zero means infinity of steps
steps = 0
name = 'com.blazemeter.jmeter.threads.concurrency.ConcurrencyThreadGroup'
concurrency_thread_group = etree.Element(
name, guiclass=name + "Gui", testclass=name, testname=testname, enabled=enabled)
virtual_user_controller = etree.Element(
"elementProp",
name="ThreadGroup.main_controller",
elementType="com.blazemeter.jmeter.control.VirtualUserController")
concurrency_thread_group.append(virtual_user_controller)
concurrency_thread_group.append(JMX._string_prop("ThreadGroup.on_sample_error", on_error))
concurrency_thread_group.append(JMX._string_prop("TargetLevel", str(concurrency)))
concurrency_thread_group.append(JMX._string_prop("RampUp", str(cond_int(rampup))))
concurrency_thread_group.append(JMX._string_prop("Steps", steps))
concurrency_thread_group.append(JMX._string_prop("Hold", str(cond_int(hold))))
concurrency_thread_group.append(JMX._string_prop("LogFilename", ""))
concurrency_thread_group.append(JMX._string_prop("Iterations", iterations or ""))
concurrency_thread_group.append(JMX._string_prop("Unit", "S"))
return concurrency_thread_group
@staticmethod
def get_dns_cache_mgr():
"""
Adds dns cache element with defaults parameters
:return:
"""
dns_element = etree.Element("DNSCacheManager", guiclass="DNSCachePanel", testclass="DNSCacheManager",
testname="DNS Cache Manager")
dns_element.append(JMX._collection_prop("DNSCacheManager.servers"))
dns_element.append(JMX._bool_prop("DNSCacheManager.clearEachIteration", False))
dns_element.append(JMX._bool_prop("DNSCacheManager.isCustomResolver", False))
return dns_element
@staticmethod
def _get_header_mgr(hdict):
"""
:type hdict: dict[str,str]
:rtype: lxml.etree.Element
"""
mgr = etree.Element("HeaderManager", guiclass="HeaderPanel", testclass="HeaderManager", testname="Headers")
coll_prop = etree.Element("collectionProp", name="HeaderManager.headers")
for hname, hval in iteritems(hdict):
header = etree.Element("elementProp", name="", elementType="Header")
header.append(JMX._string_prop("Header.name", hname))
header.append(JMX._string_prop("Header.value", hval))
coll_prop.append(header)
mgr.append(coll_prop)
return mgr
@staticmethod
def _get_cache_mgr():
"""
:rtype: lxml.etree.Element
"""
mgr = etree.Element("CacheManager", guiclass="CacheManagerGui", testclass="CacheManager", testname="Cache")
mgr.append(JMX._bool_prop("clearEachIteration", True))
mgr.append(JMX._bool_prop("useExpires", True))
return mgr
@staticmethod
def _get_cookie_mgr(scenario=None):
"""
:rtype: lxml.etree.Element
"""
mgr = etree.Element("CookieManager", guiclass="CookiePanel", testclass="CookieManager", testname="Cookies")
mgr.append(JMX._bool_prop("CookieManager.clearEachIteration", False))
mgr.append(JMX._string_prop("CookieManager.implementation",
"org.apache.jmeter.protocol.http.control.HC4CookieHandler"))
if scenario:
cookies = scenario.get(Scenario.COOKIES)
if cookies:
cookies_coll = JMX._collection_prop("CookieManager.cookies")
mgr.append(cookies_coll)
for cookie in cookies:
if not isinstance(cookie, dict):
raise TaurusConfigError("Cookie must be dictionary: %s" % cookie)
c_name = cookie.get("name", TaurusConfigError("Name of cookie isn't found: %s" % cookie))
c_value = cookie.get("value", TaurusConfigError("Value of cookie isn't found: %s" % cookie))
c_domain = cookie.get("domain", TaurusConfigError("Domain of cookie isn't found: %s" % cookie))
c_path = cookie.get("path", "")
c_secure = cookie.get("secure", False)
# follow params are hardcoded in JMeter
c_expires = 0
c_path_specified = True
c_domain_specified = True
c_elem = etree.Element("elementProp", name=c_name, elementType="Cookie", testname=c_name)
c_elem.append(JMX._string_prop("Cookie.value", c_value))
c_elem.append(JMX._string_prop("Cookie.domain", c_domain))
c_elem.append(JMX._string_prop("Cookie.path", c_path))
c_elem.append(JMX._bool_prop("Cookie.secure", c_secure))
c_elem.append(JMX._long_prop("Cookie.expires", c_expires))
c_elem.append(JMX._bool_prop("Cookie.path_specified", c_path_specified))
c_elem.append(JMX._bool_prop("Cookie.domain_specified", c_domain_specified))
cookies_coll.append(c_elem)
return mgr
@staticmethod
def _get_http_defaults(default_address=None, timeout=None, retrieve_resources=None, concurrent_pool_size=4,
content_encoding=None, resources_regex=None):
"""
:rtype: lxml.etree.Element
"""
cfg = etree.Element("ConfigTestElement", guiclass="HttpDefaultsGui",
testclass="ConfigTestElement", testname="Defaults")
if retrieve_resources:
cfg.append(JMX._bool_prop("HTTPSampler.image_parser", True))
cfg.append(JMX._bool_prop("HTTPSampler.concurrentDwn", True))
if concurrent_pool_size:
cfg.append(JMX._string_prop("HTTPSampler.concurrentPool", concurrent_pool_size))
params = etree.Element("elementProp",
name="HTTPsampler.Arguments",
elementType="Arguments",
guiclass="HTTPArgumentsPanel",
testclass="Arguments", testname="user_defined")
cfg.append(params)
if default_address:
parsed_url = parse.urlsplit(default_address)
if parsed_url.scheme:
cfg.append(JMX._string_prop("HTTPSampler.protocol", parsed_url.scheme))
if parsed_url.netloc:
netloc = parsed_url.netloc
if ':' in netloc:
index = netloc.rfind(':')
cfg.append(JMX._string_prop("HTTPSampler.port", netloc[index + 1:]))
netloc = netloc[:index]
cfg.append(JMX._string_prop("HTTPSampler.domain", netloc))
if timeout:
cfg.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout))
cfg.append(JMX._string_prop("HTTPSampler.response_timeout", timeout))
if content_encoding:
cfg.append(JMX._string_prop("HTTPSampler.contentEncoding", content_encoding))
if resources_regex:
cfg.append(JMX._string_prop("HTTPSampler.embedded_url_re", resources_regex))
return cfg
@staticmethod
def get_constant_timer(delay):
timer_type = "ConstantTimer"
element = etree.Element(timer_type, guiclass="%sGui" % timer_type, testclass=timer_type, testname="Think-Time")
element.append(JMX._string_prop("%s.delay" % timer_type, delay))
return [element, etree.Element("hashTree")]
@staticmethod
def get_uniform_timer(maximum, offset):
timer_type = "UniformRandomTimer"
element = etree.Element(timer_type, guiclass="%sGui" % timer_type, testclass=timer_type, testname="Think-Time")
element.append(JMX._string_prop("ConstantTimer.delay", offset))
element.append(JMX._string_prop("RandomTimer.range", maximum))
return [element, etree.Element("hashTree")]
@staticmethod
def get_gaussian_timer(dev, offset):
timer_type = "GaussianRandomTimer"
element = etree.Element(timer_type, guiclass="%sGui" % timer_type, testclass=timer_type, testname="Think-Time")
element.append(JMX._string_prop("ConstantTimer.delay", offset))
element.append(JMX._string_prop("RandomTimer.range", dev))
return [element, etree.Element("hashTree")]
@staticmethod
def get_poisson_timer(lam, delay):
timer_type = "PoissonRandomTimer"
element = etree.Element(timer_type, guiclass="%sGui" % timer_type, testclass=timer_type, testname="Think-Time")
element.append(JMX._string_prop("ConstantTimer.delay", delay))
element.append(JMX._string_prop("RandomTimer.range", lam))
return [element, etree.Element("hashTree")]
@staticmethod
def _get_extractor(varname, headers, regexp, template, match_no, default='NOT_FOUND', scope='', from_var=''):
"""
:type varname: str
:type regexp: str
:type template: str|int
:type match_no: int
:type default: str
:type scope: str
:type from_var: str
:rtype: lxml.etree.Element
"""
if isinstance(template, int):
template = '$%s$' % template
if headers.lower() == 'headers':
headers = 'true'
elif headers.lower() == 'http-code':
headers = 'code'
elif headers.lower() == 'url':
headers = 'URL'
else:
headers = 'body'
element = etree.Element("RegexExtractor", guiclass="RegexExtractorGui",
testclass="RegexExtractor", testname="Get %s" % varname, enabled="true")
element.append(JMX._string_prop("RegexExtractor.useHeaders", headers))
element.append(JMX._string_prop("RegexExtractor.refname", varname))
element.append(JMX._string_prop("RegexExtractor.regex", regexp))
element.append(JMX._string_prop("RegexExtractor.template", template))
element.append(JMX._string_prop("RegexExtractor.default", default))
element.append(JMX._string_prop("RegexExtractor.match_number", match_no))
element.extend(JMX.get_scope_props(scope, from_var))
return element
@staticmethod
def _get_boundary_extractor(varname, subject, left, right, match_no, defvalue='NOT_FOUND', scope='', from_var=''):
"""
:type varname: str
:type regexp: str
:type template: str|int
:type match_no: int
:type default: str
:type scope: str
:type from_var: str
:rtype: lxml.etree.Element
"""
subjects = {
'body': 'false',
'body-unescaped': 'unescaped',
'body-as-document': 'as_document',
'response-headers': 'true',
'request-headers': 'request_headers',
'url': 'URL',
'code': 'code',
'message': 'message',
}
subject = subjects.get(subject)
element = etree.Element("BoundaryExtractor", guiclass="BoundaryExtractorGui",
testclass="BoundaryExtractor", testname="Get %s" % varname, enabled="true")
element.append(JMX._string_prop("BoundaryExtractor.useHeaders", subject))
element.append(JMX._string_prop("BoundaryExtractor.refname", varname))
element.append(JMX._string_prop("BoundaryExtractor.lboundary", left))
element.append(JMX._string_prop("BoundaryExtractor.rboundary", right))
element.append(JMX._string_prop("RegexExtractor.default", defvalue))
element.append(JMX._string_prop("RegexExtractor.match_number", match_no))
element.extend(JMX.get_scope_props(scope, from_var))
return element
@staticmethod
def _get_jquerycss_extractor(varname, selector, attribute, match_no, default="NOT_FOUND", scope='', from_var=''):
"""
:type varname: str
:type regexp: str
:type match_no: int
:type default: str
:type scope: str
:type from_var: str
:rtype: lxml.etree.Element
"""
element = etree.Element("HtmlExtractor", guiclass="HtmlExtractorGui", testclass="HtmlExtractor",
testname="Get %s" % varname)
element.append(JMX._string_prop("HtmlExtractor.refname", varname))
element.append(JMX._string_prop("HtmlExtractor.expr", selector))
element.append(JMX._string_prop("HtmlExtractor.attribute", attribute))
element.append(JMX._string_prop("HtmlExtractor.match_number", match_no))
element.append(JMX._string_prop("HtmlExtractor.default", default))
element.extend(JMX.get_scope_props(scope, from_var))
return element
@staticmethod
def _get_json_extractor(varname, jsonpath, default='NOT_FOUND', from_variable=None):
"""
:type varname: str
:type default: str
:rtype: lxml.etree.Element
"""
package = "com.atlantbh.jmeter.plugins.jsonutils.jsonpathextractor"
element = etree.Element("%s.JSONPathExtractor" % package,
guiclass="%s.gui.JSONPathExtractorGui" % package,
testclass="%s.JSONPathExtractor" % package,
testname="Get %s" % varname)
element.append(JMX._string_prop("VAR", varname))
element.append(JMX._string_prop("JSONPATH", jsonpath))
element.append(JMX._string_prop("DEFAULT", default))
if from_variable:
element.append(JMX._string_prop("VARIABLE", from_variable))
element.append(JMX._string_prop("SUBJECT", "VAR"))
return element
@staticmethod
def get_scope_props(scope, from_variable):
props = []
if scope:
props.append(JMX._string_prop("Sample.scope", scope))
if scope == "variable":
props.append(JMX._string_prop("Scope.variable", from_variable))
return props
@staticmethod
def _get_internal_json_extractor(varname, jsonpath, default, scope, from_variable, match_no, concat):
"""
:type varname: str
:type default: str
:rtype: lxml.etree.Element
"""
package = "JSONPostProcessor"
element = etree.Element(package,
guiclass="%sGui" % package,
testclass="%s" % package,
testname="Get %s" % varname)
element.append(JMX._string_prop("JSONPostProcessor.referenceNames", varname))
element.append(JMX._string_prop("JSONPostProcessor.jsonPathExprs", jsonpath))
element.append(JMX._string_prop("JSONPostProcessor.match_numbers", match_no))
if default:
element.append(JMX._string_prop("JSONPostProcessor.defaultValues", default))
element.extend(JMX.get_scope_props(scope, from_variable))
if concat:
element.append(JMX._bool_prop("JSONPostProcessor.compute_concat", True))
return element
@staticmethod
def _get_json_path_assertion(jsonpath, expected_value, json_validation, expect_null, invert, regexp=True):
"""
:type jsonpath: str
:type expected_value: str
:type json_validation: bool
:type expect_null: bool
:type invert: bool
:type regexp: bool
:return: lxml.etree.Element
"""
package = "com.atlantbh.jmeter.plugins.jsonutils.jsonpathassertion"
element = etree.Element("%s.JSONPathAssertion" % package,
guiclass="%s.gui.JSONPathAssertionGui" % package,
testclass="%s.JSONPathAssertion" % package,
testname="JSon path assertion")
element.append(JMX._string_prop("JSON_PATH", jsonpath))
element.append(JMX._string_prop("EXPECTED_VALUE", expected_value))
element.append(JMX._bool_prop("JSONVALIDATION", json_validation))
element.append(JMX._bool_prop("EXPECT_NULL", expect_null))
element.append(JMX._bool_prop("INVERT", invert))
element.append(JMX._bool_prop("ISREGEX", regexp))
return element
@staticmethod
def _get_xpath_extractor(varname, xpath, default, validate_xml, ignore_whitespace, match_no, use_namespaces,
use_tolerant_parser, scope, from_var):
"""
:type varname: str
:type xpath: str
:type default: str
:type validate_xml: bool
:type ignore_whitespace: bool
:type use_tolerant_parser: bool
:type scope: str
:type from_var: str
:rtype: lxml.etree.Element
"""
element = etree.Element("XPathExtractor",
guiclass="XPathExtractorGui",
testclass="XPathExtractor",
testname="Get %s" % varname)
element.append(JMX._string_prop("XPathExtractor.refname", varname))
element.append(JMX._string_prop("XPathExtractor.xpathQuery", xpath))
element.append(JMX._string_prop("XPathExtractor.default", default))
element.append(JMX._bool_prop("XPathExtractor.validate", validate_xml))
element.append(JMX._bool_prop("XPathExtractor.whitespace", ignore_whitespace))
element.append(JMX._string_prop("XPathExtractor.matchNumber", match_no))
element.append(JMX._bool_prop("XPathExtractor.namespace", use_namespaces))
element.append(JMX._bool_prop("XPathExtractor.tolerant", use_tolerant_parser))
element.extend(JMX.get_scope_props(scope, from_var))
return element
@staticmethod
def _get_xpath_assertion(xpath, validate_xml, ignore_whitespace, use_tolerant_parser, invert):
"""
:type xpath: str
:type validate_xml: bool
:type ignore_whitespace: bool
:type use_tolerant_parser: bool
:return: lxml.etree.Element
"""
element = etree.Element("XPathAssertion",
guiclass="XPathAssertionGui",
testclass="XPathAssertion",
testname="XPath Assertion")
element.append(JMX._string_prop("XPath.xpath", xpath))
element.append(JMX._bool_prop("XPath.validate", validate_xml))
element.append(JMX._bool_prop("XPath.whitespace", ignore_whitespace))
element.append(JMX._bool_prop("XPath.tolerant", use_tolerant_parser))
element.append(JMX._bool_prop("XPath.negate", invert))
return element
@staticmethod
def _get_resp_assertion(field, contains, is_regexp, is_invert, assume_success=False):
"""
:type field: str
:type contains: list[str]
:type is_regexp: bool
:type is_invert: bool
:rtype: lxml.etree.Element
"""
tname = "Assert %s %s" % ("hasn't" if is_invert else "has",
"[" + ", ".join('"' + str(x) + '"' for x in contains) + "]")
element = etree.Element("ResponseAssertion", guiclass="AssertionGui",
testclass="ResponseAssertion", testname=tname)
if field == Scenario.FIELD_HEADERS:
fld = "Assertion.response_headers"
elif field == Scenario.FIELD_RESP_CODE:
fld = "Assertion.response_code"
else:
fld = "Assertion.response_data"
if is_regexp:
if is_invert:
mtype = 6 # not contains
else:
mtype = 2 # contains
else:
if is_invert:
mtype = 20 # not substring
else:
mtype = 16 # substring
element.append(JMX._string_prop("Assertion.test_field", fld))
element.append(JMX._string_prop("Assertion.test_type", mtype))
element.append(JMX._bool_prop("Assertion.assume_success", assume_success))
coll_prop = etree.Element("collectionProp", name="Asserion.test_strings")
for string in contains:
coll_prop.append(JMX._string_prop("", string))
element.append(coll_prop)
return element
@staticmethod
def _get_jsr223_element(language, script_file, parameters, execute, script_text=None, cache_key='true'):
if execute == "before":
proc = "JSR223PreProcessor"
else:
proc = "JSR223PostProcessor"
element = etree.Element(proc, guiclass="TestBeanGUI", testclass=proc, testname=proc)
element.append(JMX._string_prop("filename", script_file if script_file else ''))
element.append(JMX._string_prop("script", script_text if script_text else ''))
element.append(JMX._string_prop("parameters", parameters))
element.append(JMX._string_prop("scriptLanguage", language))
element.append(JMX._string_prop("cacheKey", cache_key))
return element
@staticmethod
def _get_csv_config(path, delimiter, loop, variable_names, is_quoted):
"""
:type path: str
:type delimiter: str
:type is_quoted: bool
:type loop: bool
:type variable_names: string
:return:
"""
element = etree.Element("CSVDataSet", guiclass="TestBeanGUI",
testclass="CSVDataSet", testname="CSV %s" % os.path.basename(path))
element.append(JMX._string_prop("filename", path))
element.append(JMX._string_prop("delimiter", delimiter))
element.append(JMX._bool_prop("quotedData", is_quoted))
element.append(JMX._bool_prop("recycle", loop))
element.append(JMX._bool_prop("stopThread", not loop))
element.append(JMX._string_prop("variableNames", variable_names))
return element
@staticmethod
def _get_csv_config_random(path, delimiter, loop, variable_names):
"""
:type path: str
:type delimiter: str
:type loop: bool
:type variable_names: string
:return:
"""
element = etree.Element("com.blazemeter.jmeter.RandomCSVDataSetConfig",
guiclass="com.blazemeter.jmeter.RandomCSVDataSetConfigGui",
testclass="com.blazemeter.jmeter.RandomCSVDataSetConfig",
testname="bzm - Random CSV Data Set Config")
element.append(JMX._string_prop("filename", path))
element.append(JMX._string_prop("fileEncoding", "UTF-8"))
element.append(JMX._string_prop("delimiter", delimiter))
element.append(JMX._string_prop("variableNames", variable_names))
element.append(JMX._bool_prop("randomOrder", True))
element.append(JMX._bool_prop("ignoreFirstLine", False if variable_names else True))
element.append(JMX._bool_prop("rewindOnTheEndOfList", loop))
element.append(JMX._bool_prop("independentListPerThread", False))
return element
def set_enabled(self, sel, state):
"""
Toggle items by selector
:type sel: str
:type state: bool
"""
items = self.get(sel)
self.log.debug("Enable %s elements %s: %s", state, sel, items)
for item in items:
item.set("enabled", 'true' if state else 'false')
def set_text(self, sel, text):
"""
Set text value
:type sel: str
:type text: str
"""
items = self.get(sel)
res = 0
for item in items:
item.text = str(text)
res += 1
return res
@staticmethod
def _get_simple_controller(name):
return etree.Element("GenericController", guiclass="LogicControllerGui", testclass="GenericController",
testname=name)
def _add_results_tree(self):
dbg_tree = etree.Element("ResultCollector",
testname="View Results Tree",
testclass="ResultCollector",
guiclass="ViewResultsFullVisualizer")
self.append(self.TEST_PLAN_SEL, dbg_tree)
self.append(self.TEST_PLAN_SEL, etree.Element("hashTree"))
@staticmethod
def _get_results_tree():
dbg_tree = etree.Element("ResultCollector",
testname="View Results Tree",
testclass="ResultCollector",
guiclass="ViewResultsFullVisualizer")
return dbg_tree
@staticmethod
def _get_if_controller(condition):
controller = etree.Element("IfController", guiclass="IfControllerPanel", testclass="IfController",
testname="If Controller")
controller.append(JMX._string_prop("IfController.condition", condition))
return controller
@staticmethod
def _get_once_controller():
"""
Generates Once Only Controller
:return: etree element, OnceOnlyController
"""
controller = etree.Element("OnceOnlyController", guiclass="OnceOnlyControllerGui",
testclass="OnceOnlyController", testname="Once Only Controller")
return controller
@staticmethod
def _get_loop_controller(loops):
"""
Generates Loop Controller
Expected values(by JMeter):
LoopController.loops(iterations): int
LoopController.continue_forever: boolean
:return: etree element, LoopController
"""
if loops == 'forever':
iterations = -1
else:
iterations = loops
controller = etree.Element("LoopController", guiclass="LoopControlPanel", testclass="LoopController",
testname="Loop Controller")
# 'false' means controller can be called only one time (by parent)
controller.append(JMX._bool_prop("LoopController.continue_forever", True))
controller.append(JMX._string_prop("LoopController.loops", str(iterations)))
return controller
@staticmethod
def _get_foreach_controller(input_var, loop_var):
controller = etree.Element("ForeachController", guiclass="ForeachControlPanel", testclass="ForeachController",
testname="ForEach Controller")
controller.append(JMX._string_prop("ForeachController.inputVal", input_var))
controller.append(JMX._string_prop("ForeachController.returnVal", loop_var))
controller.append(JMX._bool_prop("ForeachController.useSeparator", True))
return controller
@staticmethod
def _get_while_controller(condition):
controller = etree.Element("WhileController", guiclass="WhileControllerGui", testclass="WhileController",
testname="While Controller")
controller.append(JMX._string_prop("WhileController.condition", condition))
return controller
@staticmethod
def _get_transaction_controller(transaction_name, force_parent_sample=False, include_timers=False):
controller = etree.Element("TransactionController", guiclass="TransactionControllerGui",
testclass="TransactionController", testname=transaction_name)
controller.append(JMX._bool_prop("TransactionController.parent", force_parent_sample))
controller.append(JMX._bool_prop("TransactionController.includeTimers", include_timers))
return controller
@staticmethod
def _get_functional_mode_prop(enabled):
return JMX._bool_prop("TestPlan.functional_mode", enabled)
@staticmethod
def _get_action_block(action_index, target_index, duration_ms):
action = etree.Element("TestAction", guiclass="TestActionGui", testclass="TestAction", testname="Test Action")
action.append(JMX.int_prop("ActionProcessor.action", action_index))
action.append(JMX.int_prop("ActionProcessor.target", target_index))
action.append(JMX._string_prop("ActionProcessor.duration", str(duration_ms)))
return action
|
{
"content_hash": "a912cc6b17af8d2eb510b74977d71002",
"timestamp": "",
"source": "github",
"line_count": 1396,
"max_line_length": 119,
"avg_line_length": 39.56375358166189,
"alnum_prop": 0.5905017109956365,
"repo_name": "greyfenrir/taurus",
"id": "85543779f4707d908320aa3b1785f4aa43974409",
"size": "55231",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bzt/jmx/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4380"
},
{
"name": "C",
"bytes": "5131"
},
{
"name": "C#",
"bytes": "18482"
},
{
"name": "CSS",
"bytes": "5298"
},
{
"name": "Dockerfile",
"bytes": "4518"
},
{
"name": "Groovy",
"bytes": "3280"
},
{
"name": "HTML",
"bytes": "5136"
},
{
"name": "Java",
"bytes": "9586"
},
{
"name": "JavaScript",
"bytes": "27121"
},
{
"name": "PHP",
"bytes": "8787"
},
{
"name": "PLpgSQL",
"bytes": "3712"
},
{
"name": "Python",
"bytes": "2167783"
},
{
"name": "RobotFramework",
"bytes": "6383"
},
{
"name": "Ruby",
"bytes": "4184"
},
{
"name": "Scala",
"bytes": "15526"
},
{
"name": "Shell",
"bytes": "12083"
},
{
"name": "Smarty",
"bytes": "13606"
}
],
"symlink_target": ""
}
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Length, Email
class LoginForm(FlaskForm):
email = StringField(u'邮箱', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField(u'密码', validators=[DataRequired()])
remember_me = BooleanField(u'记住我')
submit = SubmitField(u'登录')
class RegisterForm(FlaskForm):
email = StringField(u'邮箱', validators=[DataRequired(), Length(1, 64),
Email()])
username = StringField(u'名称', validators=[DataRequired(), Length(1, 128)])
password = PasswordField(u'密码', validators=[DataRequired()])
password_confirm = PasswordField(u'密码确认', validators=[DataRequired()])
submit = SubmitField(u'注册')
|
{
"content_hash": "7056f2798e9f84ca8722ba7a895fcdd7",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 45.89473684210526,
"alnum_prop": 0.6502293577981652,
"repo_name": "goalong/flask-demo",
"id": "4bea5c90f16bafd0ed85ef3266af20789e882aea",
"size": "932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/auth/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8031"
},
{
"name": "HTML",
"bytes": "304551"
},
{
"name": "JavaScript",
"bytes": "15796"
},
{
"name": "Python",
"bytes": "52466"
}
],
"symlink_target": ""
}
|
from page_sets.rendering import rendering_story
from page_sets.rendering import story_tags
from page_sets.system_health import platforms
class ToughWebglPage(rendering_story.RenderingStory):
ABSTRACT_STORY = True
TAGS = [story_tags.REQUIRED_WEBGL, story_tags.TOUGH_WEBGL]
def __init__(self,
page_set,
shared_page_state_class,
name_suffix='',
extra_browser_args=None):
if extra_browser_args is None:
extra_browser_args = []
extra_browser_args.append("--enable-webgl-draft-extensions")
extra_browser_args.append("--disable-features=V8TurboFastApiCalls")
super(ToughWebglPage, self).__init__(
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args,
make_javascript_deterministic=False)
@property
def skipped_gpus(self):
# crbug.com/462729
return ['arm', 'broadcom', 'hisilicon', 'imagination', 'vivante']
def RunNavigateSteps(self, action_runner):
super(ToughWebglPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'document.readyState == "complete"')
action_runner.Wait(2)
def RunPageInteractions(self, action_runner):
with action_runner.CreateInteraction('WebGLAnimation'):
action_runner.Wait(10)
class NvidiaVertexBufferObjectPage(ToughWebglPage):
BASE_NAME = 'nvidia_vertex_buffer_object'
# pylint: disable=line-too-long
URL = 'http://www.khronos.org/registry/webgl/sdk/demos/google/nvidia-vertex-buffer-object/index.html'
TAGS = ToughWebglPage.TAGS + [story_tags.REPRESENTATIVE_WIN_DESKTOP]
class SansAngelesPage(ToughWebglPage):
BASE_NAME = 'san_angeles'
# pylint: disable=line-too-long
URL = 'http://www.khronos.org/registry/webgl/sdk/demos/google/san-angeles/index.html'
class ParticlesPage(ToughWebglPage):
BASE_NAME = 'particles'
# pylint: disable=line-too-long
URL = 'http://www.khronos.org/registry/webgl/sdk/demos/google/particles/index.html'
class EarthPage(ToughWebglPage):
BASE_NAME = 'earth'
URL = 'http://www.khronos.org/registry/webgl/sdk/demos/webkit/Earth.html'
class ManyPlanetsDeepPage(ToughWebglPage):
BASE_NAME = 'many_planets_deep'
# pylint: disable=line-too-long
URL = 'http://www.khronos.org/registry/webgl/sdk/demos/webkit/ManyPlanetsDeep.html'
TAGS = ToughWebglPage.TAGS + [story_tags.REPRESENTATIVE_WIN_DESKTOP]
class Aquarium(ToughWebglPage):
BASE_NAME = 'aquarium'
URL = 'http://webglsamples.org/aquarium/aquarium.html'
TAGS = ToughWebglPage.TAGS + [story_tags.REPRESENTATIVE_WIN_DESKTOP]
class Aquarium20KFish(ToughWebglPage):
BASE_NAME = 'aquarium_20k'
URL = 'http://webglsamples.org/aquarium/aquarium.html?numFish=20000'
TAGS = ToughWebglPage.TAGS + [story_tags.REPRESENTATIVE_WIN_DESKTOP]
class Blob(ToughWebglPage):
BASE_NAME = 'blob'
URL = 'http://webglsamples.org/blob/blob.html'
class DynamicCubeMap(ToughWebglPage):
BASE_NAME = 'dynamic_cube_map'
URL = 'http://webglsamples.org/dynamic-cubemap/dynamic-cubemap.html'
class AnimometerWebGL(ToughWebglPage):
BASE_NAME = 'animometer_webgl'
# pylint: disable=line-too-long
URL = 'http://kenrussell.github.io/webgl-animometer/Animometer/tests/3d/webgl.html'
class AnimometerWebGLMultiDraw(ToughWebglPage):
BASE_NAME = 'animometer_webgl_multi_draw'
# pylint: disable=line-too-long
URL = 'http://kenrussell.github.io/webgl-animometer/Animometer/tests/3d/webgl.html?webgl_version=2&use_ubos=1&use_multi_draw=1'
class AnimometerWebGLIndexed(ToughWebglPage):
BASE_NAME = 'animometer_webgl_indexed'
SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY
# pylint: disable=line-too-long
URL = 'http://kenrussell.github.io/webgl-animometer/Animometer/tests/3d/webgl-indexed-instanced.html?webgl_version=2&use_attributes=1&num_geometries=120000'
class AnimometerWebGLIndexedMultiDraw(ToughWebglPage):
BASE_NAME = 'animometer_webgl_indexed_multi_draw'
SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY
# pylint: disable=line-too-long
URL = 'http://kenrussell.github.io/webgl-animometer/Animometer/tests/3d/webgl-indexed-instanced.html?webgl_version=2&use_attributes=1&use_multi_draw=1&num_geometries=120000'
class AnimometerWebGLIndexedBaseVertexBaseInstance(ToughWebglPage):
BASE_NAME = 'animometer_webgl_indexed_multi_draw_base_vertex_base_instance'
SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY
# pylint: disable=line-too-long
URL = 'http://kenrussell.github.io/webgl-animometer/Animometer/tests/3d/webgl-indexed-instanced.html?webgl_version=2&use_attributes=1&use_multi_draw=1&use_base_vertex_base_instance=1&num_geometries=120000'
class AnimometerWebGLAttribArrays(ToughWebglPage):
BASE_NAME = 'animometer_webgl_attrib_arrays'
# pylint: disable=line-too-long
URL = 'http://kenrussell.github.io/webgl-animometer/Animometer/tests/3d/webgl.html?use_attributes=1'
TAGS = ToughWebglPage.TAGS + [
story_tags.REPRESENTATIVE_MAC_DESKTOP
]
class CameraToWebGL(ToughWebglPage):
TAGS = ToughWebglPage.TAGS + [story_tags.USE_FAKE_CAMERA_DEVICE]
BASE_NAME = 'camera_to_webgl'
# pylint: disable=line-too-long
URL = 'https://www.khronos.org/registry/webgl/sdk/tests/extra/texture-from-camera-stress.html?uploadsPerFrame=200'
class UnityPage(ToughWebglPage):
ABSTRACT_STORY = True
def RunNavigateSteps(self, action_runner):
super(UnityPage, self).RunNavigateSteps(action_runner)
# Wait an additional 10 seconds for any loading screens
# or interaction to click "Play"
action_runner.Wait(10)
def RunPageInteractions(self, action_runner):
with action_runner.CreateInteraction('WebGLAnimation'):
action_runner.Wait(30)
class SkelebuddiesWasm2020(UnityPage):
BASE_NAME = 'skelebuddies_wasm_2020'
# pylint: disable=line-too-long
URL = 'http://clb.confined.space/emunittest/Skelebuddies-Wasm-Release-2020-10-26-profiling/Skelebuddies.html?playback'
class TinyRacingV3Wasm2020(UnityPage):
BASE_NAME = 'tiny_racing_v3_wasm_2020'
# pylint: disable=line-too-long
URL = 'http://clb.confined.space/emunittest/llvm-tinyracing-wasm-release-2020-03-17/TinyRacing.html?playback'
class MicrogameFPS(UnityPage):
BASE_NAME = 'microgame_fps'
# pylint: disable=line-too-long
URL = 'http://clb.confined.space/emunittest/microgame-fps_20190922_131915_wasm_release_profiling/index.html?playback'
class LostCrypt(UnityPage):
BASE_NAME = 'lost_crypt'
SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY
# pylint: disable=line-too-long
URL = 'http://clb.confined.space/emunittest/LostCrypt_20191220_131436_wasm_release/index.html?playback'
def MakeFastCallVariant(cls):
def __init__(self,
page_set,
shared_page_state_class,
name_suffix='',
extra_browser_args=None):
if extra_browser_args is None:
extra_browser_args = []
super(cls, self).__init__(page_set=page_set,
shared_page_state_class=shared_page_state_class,
name_suffix=name_suffix,
extra_browser_args=extra_browser_args)
# This has to be after after superclass init in order to override the args
# added by ToughWebglPage.__init__
extra_browser_args.remove("--disable-features=V8TurboFastApiCalls")
extra_browser_args.append("--enable-features=V8TurboFastApiCalls")
return type(
cls.__name__ + 'FastCall', (cls,), {
'BASE_NAME':
cls.BASE_NAME + '_fast_call',
'SUPPORTED_PLATFORMS':
cls.SUPPORTED_PLATFORMS.intersection(platforms.DESKTOP_ONLY),
'__init__':
__init__,
})
AnimometerWebGLFastCall = MakeFastCallVariant(AnimometerWebGL)
AnimometerWebGLIndexedFastCall = MakeFastCallVariant(AnimometerWebGLIndexed)
Aquarium20KFishFastCall = MakeFastCallVariant(Aquarium20KFish)
SkelebuddiesWasm2020FastCall = MakeFastCallVariant(SkelebuddiesWasm2020)
TinyRacingV3Wasm2020FastCall = MakeFastCallVariant(TinyRacingV3Wasm2020)
MicrogameFPSFastCall = MakeFastCallVariant(MicrogameFPS)
LostCryptFastCall = MakeFastCallVariant(LostCrypt)
|
{
"content_hash": "6782921a43f79d22c7e4f5cf7d2a9ec1",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 207,
"avg_line_length": 38.466981132075475,
"alnum_prop": 0.7347639484978541,
"repo_name": "scheib/chromium",
"id": "0cb555a1d75f9b8bb58af54fb600febf5ab2ee1a",
"size": "8318",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "tools/perf/page_sets/rendering/tough_webgl_cases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import unittest
import mock
from ..views import PostingDetailView
from ..models import JobPosting
class PostingDetailViewTestCase(unittest.TestCase):
def test_model_should_reference_job_posting_model(self):
# setup
view = PostingDetailView()
# assert
self.assertEqual(id(JobPosting), id(view.model))
def test_should_call_template_response_with_template(self):
# setup
view = PostingDetailView()
request = mock.Mock()
view.request = request
view.get_context_data = mock.Mock()
view.response_class = mock.Mock()
view.get_object = mock.Mock(return_value=JobPosting())
template_name = 'employer/posting_detail.html'
# action
view.get(request)
# assert
self.assertEqual(1, view.response_class.call_count)
self.assertEqual(template_name,
view.response_class.call_args[1]['template'][0])
|
{
"content_hash": "24698a0e4b64dac71e309864c7cb5b37",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 63,
"avg_line_length": 29.5,
"alnum_prop": 0.6483050847457628,
"repo_name": "hellhound/dentexchange",
"id": "d63716c05b0c1dc4d67398d7e2711de151f06dd5",
"size": "967",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dentexchange/apps/employer/tests/test_posting_detail_view.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6611"
},
{
"name": "JavaScript",
"bytes": "23966"
},
{
"name": "Python",
"bytes": "563289"
},
{
"name": "Shell",
"bytes": "2274"
}
],
"symlink_target": ""
}
|
import datetime
from p1tr.helpers import clean_string, humanize_time
from p1tr.plugin import *
class Seen(Plugin):
"""
Tracks all user's most recent time of activity.
"""
def initialize(self):
Plugin.__init__(self)
# Storage format: key = username,
# value = (timestamp, channel, lastActivity)
self.memory = self.load_storage('memory')
@command
def seen(self, server, channel, nick, params):
"""
Usage: seen NICK - Shows how long ago the given nick was seen for the
last time, and what they were doing then.
"""
if len(params) < 1:
return clean_string(self.seen.__doc__)
subject = params[0]
if not subject in self.memory:
return 'I have not seen %s before.' % subject
entry = self.memory[subject]
return '%s was last seen %s ago in %s, %s.' % (subject,
humanize_time(datetime.datetime.now() - entry[0]),
entry[1], entry[2])
def _remember(self, channel, nick, activity):
"""Helper for saving user activities to memory."""
self.memory[nick.split('!')[0]] = (datetime.datetime.now(), channel,
activity)
def on_privmsg(self, server, channel, nick, message):
self._remember(channel, nick, 'saying "%s"' % message)
def on_useraction(self, server, channel, nick, message):
self._remember(channel, nick,
'saying "* %s %s"' % (nick.split('!')[0], message))
def on_userjoin(self, server, channel, nick):
self._remember(channel, nick, 'joining the channel')
def on_userpart(self, server, channel, nick, message):
activity = 'leaving the channel'
if len(message) > 0:
activity += ', saying "%s"' % message
self._remember(channel, nick, activity)
def on_userkicked(self, server, channel, nick, reason):
activity = 'getting kicked'
if len(reason) > 0:
activity += ' because: %s' % reason
self._remember(channel, nick, activity)
def on_userrenamed(self, server, oldnick, newnick):
self._remember('some channel', oldnick, 'changing his nick to %s' %
newnick)
|
{
"content_hash": "a20e6040f2090a4a8ab6e280be7023a4",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 77,
"avg_line_length": 36.60655737704918,
"alnum_prop": 0.5875503806538289,
"repo_name": "howard/p1tr-tng",
"id": "73795d81f8f5dd2bf7777b2e3676a7074230dbc1",
"size": "2233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/seen/seen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Elixir",
"bytes": "3175"
},
{
"name": "Python",
"bytes": "117228"
},
{
"name": "Shell",
"bytes": "2683"
}
],
"symlink_target": ""
}
|
from tastypie.resources import ModelResource
from tastypie.fields import ToOneField, ToManyField, CharField # pyflakes:ignore
from tastypie.constants import ALL, ALL_WITH_RELATIONS # pyflakes:ignore
from ietf import api
from ietf.doc.models import * # pyflakes:ignore
from ietf.name.resources import BallotPositionNameResource, DocTypeNameResource
class BallotTypeResource(ModelResource):
doc_type = ToOneField(DocTypeNameResource, 'doc_type', null=True)
positions = ToManyField(BallotPositionNameResource, 'positions', null=True)
class Meta:
queryset = BallotType.objects.all()
serializer = api.Serializer()
#resource_name = 'ballottype'
filtering = {
"id": ALL,
"slug": ALL,
"name": ALL,
"question": ALL,
"used": ALL,
"order": ALL,
"doc_type": ALL_WITH_RELATIONS,
"positions": ALL_WITH_RELATIONS,
}
api.doc.register(BallotTypeResource())
from ietf.person.resources import PersonResource
from ietf.utils.resources import ContentTypeResource
class DeletedEventResource(ModelResource):
content_type = ToOneField(ContentTypeResource, 'content_type')
by = ToOneField(PersonResource, 'by')
class Meta:
queryset = DeletedEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'deletedevent'
filtering = {
"id": ALL,
"json": ALL,
"time": ALL,
"content_type": ALL_WITH_RELATIONS,
"by": ALL_WITH_RELATIONS,
}
api.doc.register(DeletedEventResource())
class StateTypeResource(ModelResource):
class Meta:
queryset = StateType.objects.all()
serializer = api.Serializer()
#resource_name = 'statetype'
filtering = {
"slug": ALL,
"label": ALL,
}
api.doc.register(StateTypeResource())
class StateResource(ModelResource):
type = ToOneField(StateTypeResource, 'type')
next_states = ToManyField('ietf.doc.resources.StateResource', 'next_states', null=True)
class Meta:
queryset = State.objects.all()
serializer = api.Serializer()
#resource_name = 'state'
filtering = {
"id": ALL,
"slug": ALL,
"name": ALL,
"used": ALL,
"desc": ALL,
"order": ALL,
"type": ALL_WITH_RELATIONS,
"next_states": ALL_WITH_RELATIONS,
}
api.doc.register(StateResource())
from ietf.person.resources import PersonResource, EmailResource
from ietf.group.resources import GroupResource
from ietf.name.resources import StdLevelNameResource, StreamNameResource, DocTypeNameResource, DocTagNameResource, IntendedStdLevelNameResource
class DocumentResource(ModelResource):
type = ToOneField(DocTypeNameResource, 'type', null=True)
stream = ToOneField(StreamNameResource, 'stream', null=True)
group = ToOneField(GroupResource, 'group', null=True)
intended_std_level = ToOneField(IntendedStdLevelNameResource, 'intended_std_level', null=True)
std_level = ToOneField(StdLevelNameResource, 'std_level', null=True)
ad = ToOneField(PersonResource, 'ad', null=True)
shepherd = ToOneField(EmailResource, 'shepherd', null=True)
states = ToManyField(StateResource, 'states', null=True)
tags = ToManyField(DocTagNameResource, 'tags', null=True)
authors = ToManyField(EmailResource, 'authors', null=True)
rfc = CharField(attribute='rfc_number', null=True)
class Meta:
queryset = Document.objects.all()
serializer = api.Serializer()
#resource_name = 'document'
filtering = {
"time": ALL,
"title": ALL,
"abstract": ALL,
"rev": ALL,
"pages": ALL,
"order": ALL,
"expires": ALL,
"notify": ALL,
"external_url": ALL,
"note": ALL,
"internal_comments": ALL,
"name": ALL,
"type": ALL_WITH_RELATIONS,
"stream": ALL_WITH_RELATIONS,
"group": ALL_WITH_RELATIONS,
"intended_std_level": ALL_WITH_RELATIONS,
"std_level": ALL_WITH_RELATIONS,
"ad": ALL_WITH_RELATIONS,
"shepherd": ALL_WITH_RELATIONS,
"states": ALL_WITH_RELATIONS,
"tags": ALL_WITH_RELATIONS,
"authors": ALL_WITH_RELATIONS,
}
api.doc.register(DocumentResource())
from ietf.person.resources import EmailResource
class DocumentAuthorResource(ModelResource):
document = ToOneField(DocumentResource, 'document')
author = ToOneField(EmailResource, 'author')
class Meta:
queryset = DocumentAuthor.objects.all()
serializer = api.Serializer()
#resource_name = 'documentauthor'
filtering = {
"id": ALL,
"order": ALL,
"document": ALL_WITH_RELATIONS,
"author": ALL_WITH_RELATIONS,
}
api.doc.register(DocumentAuthorResource())
from ietf.person.resources import PersonResource
class DocEventResource(ModelResource):
by = ToOneField(PersonResource, 'by')
doc = ToOneField(DocumentResource, 'doc')
class Meta:
queryset = DocEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'docevent'
filtering = {
"id": ALL,
"time": ALL,
"type": ALL,
"desc": ALL,
"by": ALL_WITH_RELATIONS,
"doc": ALL_WITH_RELATIONS,
}
api.doc.register(DocEventResource())
from ietf.person.resources import PersonResource
class StateDocEventResource(ModelResource):
by = ToOneField(PersonResource, 'by')
doc = ToOneField(DocumentResource, 'doc')
docevent_ptr = ToOneField(DocEventResource, 'docevent_ptr')
state_type = ToOneField(StateTypeResource, 'state_type')
state = ToOneField(StateResource, 'state', null=True)
class Meta:
queryset = StateDocEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'statedocevent'
filtering = {
"id": ALL,
"time": ALL,
"type": ALL,
"desc": ALL,
"by": ALL_WITH_RELATIONS,
"doc": ALL_WITH_RELATIONS,
"docevent_ptr": ALL_WITH_RELATIONS,
"state_type": ALL_WITH_RELATIONS,
"state": ALL_WITH_RELATIONS,
}
api.doc.register(StateDocEventResource())
from ietf.person.resources import PersonResource, EmailResource
from ietf.group.resources import GroupResource
from ietf.name.resources import StdLevelNameResource, StreamNameResource, DocTypeNameResource, DocTagNameResource, IntendedStdLevelNameResource
class DocHistoryResource(ModelResource):
type = ToOneField(DocTypeNameResource, 'type', null=True)
stream = ToOneField(StreamNameResource, 'stream', null=True)
group = ToOneField(GroupResource, 'group', null=True)
intended_std_level = ToOneField(IntendedStdLevelNameResource, 'intended_std_level', null=True)
std_level = ToOneField(StdLevelNameResource, 'std_level', null=True)
ad = ToOneField(PersonResource, 'ad', null=True)
shepherd = ToOneField(EmailResource, 'shepherd', null=True)
doc = ToOneField(DocumentResource, 'doc')
states = ToManyField(StateResource, 'states', null=True)
tags = ToManyField(DocTagNameResource, 'tags', null=True)
authors = ToManyField(EmailResource, 'authors', null=True)
class Meta:
queryset = DocHistory.objects.all()
serializer = api.Serializer()
#resource_name = 'dochistory'
filtering = {
"id": ALL,
"time": ALL,
"title": ALL,
"abstract": ALL,
"rev": ALL,
"pages": ALL,
"order": ALL,
"expires": ALL,
"notify": ALL,
"external_url": ALL,
"note": ALL,
"internal_comments": ALL,
"name": ALL,
"type": ALL_WITH_RELATIONS,
"stream": ALL_WITH_RELATIONS,
"group": ALL_WITH_RELATIONS,
"intended_std_level": ALL_WITH_RELATIONS,
"std_level": ALL_WITH_RELATIONS,
"ad": ALL_WITH_RELATIONS,
"shepherd": ALL_WITH_RELATIONS,
"doc": ALL_WITH_RELATIONS,
"states": ALL_WITH_RELATIONS,
"tags": ALL_WITH_RELATIONS,
"authors": ALL_WITH_RELATIONS,
}
api.doc.register(DocHistoryResource())
from ietf.person.resources import PersonResource
class ConsensusDocEventResource(ModelResource):
by = ToOneField(PersonResource, 'by')
doc = ToOneField(DocumentResource, 'doc')
docevent_ptr = ToOneField(DocEventResource, 'docevent_ptr')
class Meta:
queryset = ConsensusDocEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'consensusdocevent'
filtering = {
"id": ALL,
"time": ALL,
"type": ALL,
"desc": ALL,
"consensus": ALL,
"by": ALL_WITH_RELATIONS,
"doc": ALL_WITH_RELATIONS,
"docevent_ptr": ALL_WITH_RELATIONS,
}
api.doc.register(ConsensusDocEventResource())
class DocAliasResource(ModelResource):
document = ToOneField(DocumentResource, 'document')
class Meta:
queryset = DocAlias.objects.all()
serializer = api.Serializer()
#resource_name = 'docalias'
filtering = {
"name": ALL,
"document": ALL_WITH_RELATIONS,
}
api.doc.register(DocAliasResource())
from ietf.person.resources import PersonResource
class TelechatDocEventResource(ModelResource):
by = ToOneField(PersonResource, 'by')
doc = ToOneField(DocumentResource, 'doc')
docevent_ptr = ToOneField(DocEventResource, 'docevent_ptr')
class Meta:
queryset = TelechatDocEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'telechatdocevent'
filtering = {
"id": ALL,
"time": ALL,
"type": ALL,
"desc": ALL,
"telechat_date": ALL,
"returning_item": ALL,
"by": ALL_WITH_RELATIONS,
"doc": ALL_WITH_RELATIONS,
"docevent_ptr": ALL_WITH_RELATIONS,
}
api.doc.register(TelechatDocEventResource())
from ietf.name.resources import DocReminderTypeNameResource
class DocReminderResource(ModelResource):
event = ToOneField(DocEventResource, 'event')
type = ToOneField(DocReminderTypeNameResource, 'type')
class Meta:
queryset = DocReminder.objects.all()
serializer = api.Serializer()
#resource_name = 'docreminder'
filtering = {
"id": ALL,
"due": ALL,
"active": ALL,
"event": ALL_WITH_RELATIONS,
"type": ALL_WITH_RELATIONS,
}
api.doc.register(DocReminderResource())
from ietf.person.resources import PersonResource
class LastCallDocEventResource(ModelResource):
by = ToOneField(PersonResource, 'by')
doc = ToOneField(DocumentResource, 'doc')
docevent_ptr = ToOneField(DocEventResource, 'docevent_ptr')
class Meta:
queryset = LastCallDocEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'lastcalldocevent'
filtering = {
"id": ALL,
"time": ALL,
"type": ALL,
"desc": ALL,
"expires": ALL,
"by": ALL_WITH_RELATIONS,
"doc": ALL_WITH_RELATIONS,
"docevent_ptr": ALL_WITH_RELATIONS,
}
api.doc.register(LastCallDocEventResource())
from ietf.person.resources import PersonResource
class NewRevisionDocEventResource(ModelResource):
by = ToOneField(PersonResource, 'by')
doc = ToOneField(DocumentResource, 'doc')
docevent_ptr = ToOneField(DocEventResource, 'docevent_ptr')
class Meta:
queryset = NewRevisionDocEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'newrevisiondocevent'
filtering = {
"id": ALL,
"time": ALL,
"type": ALL,
"desc": ALL,
"rev": ALL,
"by": ALL_WITH_RELATIONS,
"doc": ALL_WITH_RELATIONS,
"docevent_ptr": ALL_WITH_RELATIONS,
}
api.doc.register(NewRevisionDocEventResource())
from ietf.person.resources import PersonResource
class WriteupDocEventResource(ModelResource):
by = ToOneField(PersonResource, 'by')
doc = ToOneField(DocumentResource, 'doc')
docevent_ptr = ToOneField(DocEventResource, 'docevent_ptr')
class Meta:
queryset = WriteupDocEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'writeupdocevent'
filtering = {
"id": ALL,
"time": ALL,
"type": ALL,
"desc": ALL,
"text": ALL,
"by": ALL_WITH_RELATIONS,
"doc": ALL_WITH_RELATIONS,
"docevent_ptr": ALL_WITH_RELATIONS,
}
api.doc.register(WriteupDocEventResource())
from ietf.person.resources import PersonResource
class InitialReviewDocEventResource(ModelResource):
by = ToOneField(PersonResource, 'by')
doc = ToOneField(DocumentResource, 'doc')
docevent_ptr = ToOneField(DocEventResource, 'docevent_ptr')
class Meta:
queryset = InitialReviewDocEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'initialreviewdocevent'
filtering = {
"id": ALL,
"time": ALL,
"type": ALL,
"desc": ALL,
"expires": ALL,
"by": ALL_WITH_RELATIONS,
"doc": ALL_WITH_RELATIONS,
"docevent_ptr": ALL_WITH_RELATIONS,
}
api.doc.register(InitialReviewDocEventResource())
from ietf.person.resources import EmailResource
class DocHistoryAuthorResource(ModelResource):
document = ToOneField(DocHistoryResource, 'document')
author = ToOneField(EmailResource, 'author')
class Meta:
queryset = DocHistoryAuthor.objects.all()
serializer = api.Serializer()
#resource_name = 'dochistoryauthor'
filtering = {
"id": ALL,
"order": ALL,
"document": ALL_WITH_RELATIONS,
"author": ALL_WITH_RELATIONS,
}
api.doc.register(DocHistoryAuthorResource())
from ietf.person.resources import PersonResource
class BallotDocEventResource(ModelResource):
by = ToOneField(PersonResource, 'by')
doc = ToOneField(DocumentResource, 'doc')
docevent_ptr = ToOneField(DocEventResource, 'docevent_ptr')
ballot_type = ToOneField(BallotTypeResource, 'ballot_type')
class Meta:
queryset = BallotDocEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'ballotdocevent'
filtering = {
"id": ALL,
"time": ALL,
"type": ALL,
"desc": ALL,
"by": ALL_WITH_RELATIONS,
"doc": ALL_WITH_RELATIONS,
"docevent_ptr": ALL_WITH_RELATIONS,
"ballot_type": ALL_WITH_RELATIONS,
}
api.doc.register(BallotDocEventResource())
from ietf.name.resources import DocRelationshipNameResource
class RelatedDocumentResource(ModelResource):
source = ToOneField(DocumentResource, 'source')
target = ToOneField(DocAliasResource, 'target')
relationship = ToOneField(DocRelationshipNameResource, 'relationship')
class Meta:
queryset = RelatedDocument.objects.all()
serializer = api.Serializer()
#resource_name = 'relateddocument'
filtering = {
"id": ALL,
"source": ALL_WITH_RELATIONS,
"target": ALL_WITH_RELATIONS,
"relationship": ALL_WITH_RELATIONS,
}
api.doc.register(RelatedDocumentResource())
from ietf.name.resources import DocRelationshipNameResource
class RelatedDocHistoryResource(ModelResource):
source = ToOneField(DocHistoryResource, 'source')
target = ToOneField(DocAliasResource, 'target')
relationship = ToOneField(DocRelationshipNameResource, 'relationship')
class Meta:
queryset = RelatedDocHistory.objects.all()
serializer = api.Serializer()
#resource_name = 'relateddochistory'
filtering = {
"id": ALL,
"source": ALL_WITH_RELATIONS,
"target": ALL_WITH_RELATIONS,
"relationship": ALL_WITH_RELATIONS,
}
api.doc.register(RelatedDocHistoryResource())
from ietf.person.resources import PersonResource
from ietf.name.resources import BallotPositionNameResource
class BallotPositionDocEventResource(ModelResource):
by = ToOneField(PersonResource, 'by')
doc = ToOneField(DocumentResource, 'doc')
docevent_ptr = ToOneField(DocEventResource, 'docevent_ptr')
ballot = ToOneField(BallotDocEventResource, 'ballot', null=True)
ad = ToOneField(PersonResource, 'ad')
pos = ToOneField(BallotPositionNameResource, 'pos')
class Meta:
queryset = BallotPositionDocEvent.objects.all()
serializer = api.Serializer()
#resource_name = 'ballotpositiondocevent'
filtering = {
"id": ALL,
"time": ALL,
"type": ALL,
"desc": ALL,
"discuss": ALL,
"discuss_time": ALL,
"comment": ALL,
"comment_time": ALL,
"by": ALL_WITH_RELATIONS,
"doc": ALL_WITH_RELATIONS,
"docevent_ptr": ALL_WITH_RELATIONS,
"ballot": ALL_WITH_RELATIONS,
"ad": ALL_WITH_RELATIONS,
"pos": ALL_WITH_RELATIONS,
}
api.doc.register(BallotPositionDocEventResource())
|
{
"content_hash": "e668d74d1c88224692ff05d389860558",
"timestamp": "",
"source": "github",
"line_count": 486,
"max_line_length": 143,
"avg_line_length": 38.19547325102881,
"alnum_prop": 0.5926305015353122,
"repo_name": "wpjesus/codematch",
"id": "93ecea04d7558f7c46a931ca1c734936a6ac10a0",
"size": "18640",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "ietf/doc/resources.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "139492"
},
{
"name": "CSS",
"bytes": "733662"
},
{
"name": "Groff",
"bytes": "2349"
},
{
"name": "HTML",
"bytes": "2149789"
},
{
"name": "JavaScript",
"bytes": "1003699"
},
{
"name": "Makefile",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "17323"
},
{
"name": "PostScript",
"bytes": "35"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "4536908"
},
{
"name": "Shell",
"bytes": "74113"
},
{
"name": "TeX",
"bytes": "2556"
}
],
"symlink_target": ""
}
|
"""
Primarily dashboards. Generic plotting is in sidpy
Submodules
----------
.. autosummary::
:toctree: _autosummary
"""
__all__ = ['']
|
{
"content_hash": "c42c61b78f3a26748ab983edd91bb745",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 50,
"avg_line_length": 12,
"alnum_prop": 0.5902777777777778,
"repo_name": "pycroscopy/pycroscopy",
"id": "f230cf7585b6f9dbd236dd1b6aa13b3df87b7f7a",
"size": "144",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pycroscopy/viz/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "871343"
},
{
"name": "Python",
"bytes": "165921"
}
],
"symlink_target": ""
}
|
"""
Module to get information from Keystone DB
"""
__author__ = 'gpetralia'
import json
from contextlib import closing
import mysql.connector as MySQLdb
class KeystoneDb():
"""
Exposes methods to get information regarding Keystone resources.
It manages the connection to the Keystone DB
"""
def __init__(self, host, usr, pwd, db):
self.conn = None
self.conn = MySQLdb.connect(host=host,
user=usr,
passwd=pwd,
db=db)
def get_controller_services(self, controller_ip, controller_hostname, service_type=None):
"""
Return a dict containing the controller services registered to Keystone.
If an service_type is given, it will return only the service with the given type
:param controller_ip: IP of the OpenStack Controller
:param controller_hostname: Hostname of the OpenStack controller
:param service_type: Optional type of the desired service
:return dict: contains controller services information
"""
res = {}
with closing(self.conn.cursor()) as cur:
query = 'select service.id, service.extra, service.type, endpoint.interface, endpoint.url ' \
'from service join endpoint on service.id = endpoint.service_id ' \
'where endpoint.url like "%' + controller_ip + '%" ' \
'and ( service.type = "volume" or service.type = "compute" ' \
'or service.type = "network" or service.type = "orchestration" or service.type = "image") ' \
'and service.enabled = 1'
if service_type:
query += ' service.type= "' + service_type + '"'
cur.execute(query)
for row in cur.fetchall():
row_1 = json.loads(row[1])
if row[0] in res.keys():
res[row[0]]['attributes']['endpoints'][row[3]] = row[4]
else:
res[row[0]] = {}
res[row[0]]['resource_type'] = 'service'
res[row[0]]['type'] = 'controller-service'
res[row[0]]['name'] = row[2]
res[row[0]]['hostname'] = controller_hostname
res[row[0]]['controller_service'] = row[2]
res[row[0]]['attributes'] = {}
res[row[0]]['attributes']['extra'] = row_1
res[row[0]]['attributes']['endpoints'] = {}
res[row[0]]['attributes']['endpoints'][row[3]] = row[4]
return res
def get_nova_controller_uuid(self):
"""
Return the UUID of the Nova controller
:return string: UUID of Nova controller
"""
cur = self.conn.cursor()
query = 'select id from service where type = "compute"'
cur.execute(query)
for row in cur.fetchall():
return row[0]
return None
def get_heat_controller_uuid(self):
"""
Return the UUID of the Heat controller
:return string: UUID of Heat controller
"""
cur = self.conn.cursor()
query = 'select id from service where type = "orchestration"'
cur.execute(query)
for row in cur.fetchall():
return row[0]
return None
|
{
"content_hash": "64a33a674bb59e54d26385d49b3a319e",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 113,
"avg_line_length": 37.75555555555555,
"alnum_prop": 0.5341377280753384,
"repo_name": "IntelLabsEurope/infrastructure-repository",
"id": "65968a13b402b2c8c5767ee806cb2fb47a839997",
"size": "3980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitoring_service/epa_database/openstack/keystone_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "263216"
}
],
"symlink_target": ""
}
|
"""RunAbove image service library."""
from base import Resource, BaseManagerWithList
class ImageManager(BaseManagerWithList):
"""Manage images available in RunAbove."""
basepath = '/image'
def get_by_id(self, image_id=None):
"""Get one image from a RunAbove account.
:param image_id: ID of the image to retrieve
"""
url = self.basepath + '/' + self._api.encode_for_api(image_id)
image = self._api.get(url)
return self._dict_to_obj(image)
def _dict_to_obj(self, key):
"""Converts a dict to an image object."""
region = self._handler.regions._name_to_obj(key['region'])
return Image(self,
key['id'],
key.get('name'),
region=region)
class Image(Resource):
"""Represents one image."""
def __init__(self, manager, id, name, region):
self._manager = manager
self.id = id
self.name = name
self.region = region
|
{
"content_hash": "eb98f0130712697eaa06bf800e868565",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 70,
"avg_line_length": 27.16216216216216,
"alnum_prop": 0.5691542288557214,
"repo_name": "NicolasLM/python-runabove",
"id": "e699eea88af9a21c9cb8b0f357add9bbddc2783f",
"size": "2381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runabove/image.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
Functionality related to versioning. This makes project version management
much easier.
"""
from __future__ import absolute_import
import re
from os.path import exists
# MAJOR.MINOR[.PATCH[-BUILD]]
RE_VERSION = re.compile(
r'^'
r'(?P<major>\d+)\.'
r'(?P<minor>\d+)'
r'(\.(?P<patch>\d+))?'
r'$'
)
def bump_version(version, component='patch'):
""" Bump the given version component.
:param str version:
The current version. The format is: MAJOR.MINOR[.PATCH].
:param str component:
What part of the version should be bumped. Can be one of:
- major
- minor
- patch
:return str:
Bumped version as a string.
"""
if component not in ('major', 'minor', 'patch'):
raise ValueError("Invalid version component: {}".format(component))
m = RE_VERSION.match(version)
if m is None:
raise ValueError("Version must be in MAJOR.MINOR[.PATCH] format")
major = m.group('major')
minor = m.group('minor')
patch = m.group('patch') or None
if patch == '0':
patch = None
if component == 'major':
major = str(int(major) + 1)
minor = '0'
patch = None
elif component == 'minor':
minor = str(int(minor) + 1)
patch = None
else:
patch = patch or 0
patch = str(int(patch) + 1)
new_ver = '{}.{}'.format(major, minor)
if patch is not None:
new_ver += '.' + patch
return new_ver
def bump_version_file(version_file, component='patch'):
""" Bump version stored in a file.
:param str version_file:
Path to the file storing the current version.
:param str component:
Version component to bump. Same as in `bump_version`.
"""
if not exists(version_file):
raise ValueError("VERSION file for does not exist")
with open(version_file) as fp:
old_ver = fp.read().strip()
new_ver = bump_version(old_ver, component)
with open(version_file, 'w') as fp:
fp.write(new_ver)
return old_ver, new_ver
|
{
"content_hash": "25cbdcb5e8ab14a89f0bc16ea4f63722",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 75,
"avg_line_length": 23.348314606741575,
"alnum_prop": 0.588065447545717,
"repo_name": "novopl/fabutils",
"id": "d695e113eee5404053adc9d3bfbefe06c719691a",
"size": "2102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/fabutils/versioning.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26565"
},
{
"name": "Shell",
"bytes": "738"
}
],
"symlink_target": ""
}
|
"""
pykube.config unittests
"""
import os
from pykube import config, exceptions
from . import TestCase
GOOD_CONFIG_FILE_PATH = os.path.sep.join(["test", "test_config.yaml"])
DEFAULTUSER_CONFIG_FILE_PATH = os.path.sep.join(["test", "test_config_default_user.yaml"])
class TestConfig(TestCase):
def setUp(self):
self.cfg = config.KubeConfig.from_file(GOOD_CONFIG_FILE_PATH)
def tearDown(self):
self.cfg = None
def test_init(self):
"""
Test Config instance creation.
"""
# Ensure that a valid creation works
self.assertEqual(
GOOD_CONFIG_FILE_PATH,
self.cfg.filename)
# Ensure that if a file does not exist the creation fails
self.assertRaises(
exceptions.PyKubeError,
config.KubeConfig.from_file,
"doesnotexist")
def test_set_current_context(self):
"""
Verify set_current_context works as expected.
"""
self.cfg.set_current_context("new_context")
self.assertEqual(
"new_context",
self.cfg.current_context)
def test_clusters(self):
"""
Verify clusters works as expected.
"""
self.assertEqual(
{"server": "http://localhost"},
self.cfg.clusters.get("thecluster", None))
def test_users(self):
"""
Verify users works as expected.
"""
self.assertEqual(
"data",
self.cfg.users.get("admin", None))
def test_contexts(self):
"""
Verify contexts works as expected.
"""
self.assertEqual(
{"cluster": "thecluster", "user": "admin"},
self.cfg.contexts.get("thename", None))
def test_cluster(self):
"""
Verify cluster works as expected.
"""
# Without a current_context this should fail
try:
cluster = self.cfg.cluster
self.fail(
"cluster was found without a current context set: {}".format(
cluster))
except exceptions.PyKubeError:
# We should get an error
pass
self.cfg.set_current_context("thename")
self.assertEqual({"server": "http://localhost"}, self.cfg.cluster)
def test_user(self):
"""
Verify user works as expected.
"""
# Without a current_context this should fail
try:
user = self.cfg.user
self.fail(
"user was found without a current context set: {}".format(
user))
except exceptions.PyKubeError:
# We should get an error
pass
self.cfg.set_current_context("thename")
self.assertEqual("data", self.cfg.user)
def test_default_user(self):
"""
User can sometimes be specified as 'default' with no corresponding definition
"""
test_config = config.KubeConfig.from_file(DEFAULTUSER_CONFIG_FILE_PATH)
test_config.set_current_context("a_context")
self.assertIsNotNone(test_config.user)
def test_namespace(self):
self.cfg.set_current_context("thename")
self.assertEqual("default", self.cfg.namespace)
self.cfg.set_current_context("context_with_namespace")
self.assertEqual("foospace", self.cfg.namespace)
|
{
"content_hash": "2cc6ba93952dca30b877eff467e329dc",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 90,
"avg_line_length": 29.00854700854701,
"alnum_prop": 0.572775486152033,
"repo_name": "gralfca/pykube",
"id": "dc519d16c9353e5f49fc599307fcec59364ce16a",
"size": "3394",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/test_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "59897"
}
],
"symlink_target": ""
}
|
from django.db import models
class CommentForListDestroyModelMixin(models.Model):
email = models.EmailField()
|
{
"content_hash": "487a829009d731a0e40c95b0d095b5d3",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 52,
"avg_line_length": 23.2,
"alnum_prop": 0.7931034482758621,
"repo_name": "chibisov/drf-extensions",
"id": "9b3e92ea3cac41a8f7a8a1ab984c9ae2b05bcdbf",
"size": "116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests_app/tests/functional/mixins/list_destroy_model_mixin/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "245"
},
{
"name": "Python",
"bytes": "259901"
}
],
"symlink_target": ""
}
|
import os
import psycopg2
import sys
from psycopg2 import extensions as psy_ext
from conary.dbstore.base_drv import BaseDatabase, BaseCursor, BaseKeywordDict
from conary.dbstore import _mangle
from conary.dbstore import sqlerrors
from conary.dbstore import sqllib
class KeywordDict(BaseKeywordDict):
keys = BaseKeywordDict.keys.copy()
keys.update( {
'PRIMARYKEY' : 'SERIAL PRIMARY KEY',
'BLOB' : 'BYTEA',
'MEDIUMBLOB' : 'BYTEA',
'PATHTYPE' : 'BYTEA',
'STRING' : 'VARCHAR'
} )
def binaryVal(self, len):
return "BYTEA"
class Cursor(BaseCursor):
binaryClass = buffer
driver = "psycopg2"
_encodeRequired = False
def _tryExecute(self, func, *params, **kw):
try:
return func(*params, **kw)
except:
e_type, e_value, e_tb = sys.exc_info()
e_value = self._convertError(e_value)
raise type(e_value), e_value, e_tb
@staticmethod
def _fixStatement(statement):
return _mangle.swapPlaceholders(statement)
@staticmethod
def _convertError(exc_value):
pgcode = getattr(exc_value, 'pgcode', None)
if pgcode == '23503':
new_type = sqlerrors.ConstraintViolation
elif pgcode == '42P01':
new_type = sqlerrors.InvalidTable
elif pgcode == '23505':
new_type = sqlerrors.ColumnNotUnique
else:
new_type = sqlerrors.CursorError
new_value = new_type(str(exc_value))
new_value.err_code = pgcode
return new_value
def execute(self, sql, *args, **kw):
sql = self._fixStatement(sql)
self._executeCheck(sql)
kw.pop("start_transaction", True)
args, kw = self._executeArgs(args, kw)
# if we have args, we can not have keywords
if args:
if kw:
raise sqlerrors.CursorError(
"Do not pass both positional and named bind arguments",
args, kw)
ret = self._tryExecute(self._cursor.execute, sql, args)
elif kw:
ret = self._tryExecute(self._cursor.execute, sql, kw)
else:
ret = self._tryExecute(self._cursor.execute, sql)
return self
def executemany(self, sql, argList, start_transaction=True):
sql = self._fixStatement(sql)
self._executeCheck(sql)
return self._tryExecute(self._cursor.executemany, sql, argList)
def fields(self):
return [x[0] for x in self._cursor.description]
def lastid(self):
cu = self.dbh.cursor()
cu.execute("SELECT lastval()")
row = cu.fetchone()
if row is None:
return None
else:
return int(row[0])
lastrowid = property(lastid)
def _row(self, data):
"Convert a data tuple to a C{Row} object."
assert self._cursor
if data is None:
return None
# This implementation does not request the unicode extension, but the
# underlying connection might be shared with one that does. Callers
# won't be expecting unicodes though so re-encode it.
data = [self.encode(x) for x in data]
return sqllib.Row(data, self.fields())
class Database(BaseDatabase):
driver = "psycopg2"
kind = "postgresql"
alive_check = "select version() as version"
cursorClass = Cursor
keywords = KeywordDict()
basic_transaction = "START TRANSACTION"
poolmode = True
def connect(self, **kwargs):
assert self.database
cdb = self._connectData()
cdb = dict((x, y) for (x, y) in cdb.iteritems() if y is not None)
try:
self.dbh = psycopg2.connect(**cdb)
except psycopg2.DatabaseError:
raise sqlerrors.DatabaseError("Could not connect to database", cdb)
self.tempTables = sqllib.CaselessDict()
c = self.cursor()
c.execute("""
select c.relname as tablename from pg_class c
where c.relnamespace = pg_my_temp_schema()
and c.relkind = 'r'::"char"
""")
for table, in c.fetchall():
self.tempTables[table] = sqllib.Llist()
self.closed = False
return True
def close_fork(self):
if self.dbh:
# Close socket without notifying the server.
os.close(self.dbh.fileno())
self.dbh = None
self.close()
def loadSchema(self):
BaseDatabase.loadSchema(self)
c = self.cursor()
# get tables
c.execute("""
select tablename as name, schemaname as schema
from pg_tables
where schemaname not in ('pg_catalog', 'pg_toast', 'information_schema')
and ( schemaname !~ '^pg_temp_' OR schemaname = (pg_catalog.current_schemas(true))[1])
""")
for table, schema in c.fetchall():
if schema.startswith("pg_temp"):
self.tempTables[table] = sqllib.Llist()
else:
self.tables[table] = sqllib.Llist()
if not len(self.tables):
return self.version
# views
c.execute("""
select viewname as name
from pg_views
where schemaname not in ('pg_catalog', 'pg_toast', 'information_schema')
""")
for name, in c.fetchall():
self.views[name] = True
# indexes
c.execute("""
select indexname as name, tablename as table, schemaname as schema
from pg_indexes
where schemaname not in ('pg_catalog', 'pg_toast', 'information_schema')
and ( schemaname !~ '^pg_temp_' OR schemaname = (pg_catalog.current_schemas(true))[1])
""")
for (name, table, schema) in c.fetchall():
if schema.startswith("pg_temp"):
self.tempTables.setdefault(table, sqllib.Llist()).append(name)
else:
self.tables.setdefault(table, sqllib.Llist()).append(name)
# sequences. I wish there was a better way...
c.execute("""
SELECT c.relname as name
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind = 'S'
AND n.nspname NOT IN ('pg_catalog', 'pg_toast', 'information_schema')
AND pg_catalog.pg_table_is_visible(c.oid)
""")
for name, in c.fetchall():
self.sequences[name] = True
# triggers
# AWKWARD: postgres 9.0 changed tgisconstraint to tgisinternal, so we
# have to detect which it is to maintain compatibility :(
# -- gxti 2010-11-01
c.execute("""
SELECT a.attname
FROM pg_catalog.pg_attribute a
LEFT JOIN pg_catalog.pg_class c ON a.attrelid = c.oid
LEFT JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
WHERE n.nspname = 'pg_catalog' AND c.relname = 'pg_trigger'
AND a.attname in ('tgisconstraint', 'tgisinternal')
""")
colname, = c.fetchone()
c.execute("""
SELECT t.tgname, c.relname
FROM pg_catalog.pg_trigger t, pg_class c, pg_namespace n
WHERE t.tgrelid = c.oid AND c.relnamespace = n.oid
AND NOT t.%(colname)s
AND n.nspname NOT IN ('pg_catalog', 'pg_toast', 'information_schema')
AND ( n.nspname !~ '^pg_temp_' OR n.nspname = (pg_catalog.current_schemas(true))[1])
""" % dict(colname=colname))
for (name, table) in c.fetchall():
self.triggers[name] = table
version = self.getVersion()
return version
# Transaction support
def inTransaction(self, default=None):
"""
Return C{True} if the connection currently has an active
transaction.
"""
return self.dbh.status == psy_ext.STATUS_IN_TRANSACTION
def createTrigger(self, table, column, onAction):
onAction = onAction.lower()
assert onAction in ('insert', 'update')
# first create the trigger function
triggerName = "%s_%s" % (table, onAction)
if triggerName in self.triggers:
return False
funcName = "%s_func" % triggerName
cu = self.dbh.cursor()
cu.execute("""
CREATE OR REPLACE FUNCTION %s()
RETURNS trigger
AS $$
BEGIN
NEW.%s := TO_NUMBER(TO_CHAR(CURRENT_TIMESTAMP, 'YYYYMMDDHH24MISS'), '99999999999999') ;
RETURN NEW;
END ; $$ LANGUAGE 'plpgsql';
""" % (funcName, column))
# now create the trigger based on the above function
cu.execute("""
CREATE TRIGGER %s
BEFORE %s ON %s
FOR EACH ROW
EXECUTE PROCEDURE %s()
""" % (triggerName, onAction, table, funcName))
self.triggers[triggerName] = table
return True
def dropTrigger(self, table, onAction):
onAction = onAction.lower()
triggerName = "%s_%s" % (table, onAction)
if triggerName not in self.triggers:
return False
funcName = "%s_func" % triggerName
cu = self.dbh.cursor()
cu.execute("DROP TRIGGER %s ON %s" % (triggerName, table))
cu.execute("DROP FUNCTION %s()" % funcName)
del self.triggers[triggerName]
return True
def getVersion(self):
cu = self.dbh.cursor()
cu.execute("SAVEPOINT getversion_save")
try:
try:
return BaseDatabase.getVersion(self, raiseOnError=True)
except sqlerrors.InvalidTable:
self.version = sqllib.DBversion(0, 0)
return self.version
finally:
cu.execute("ROLLBACK TO SAVEPOINT getversion_save")
def analyze(self, table=""):
cu = self.cursor()
assert isinstance(table, basestring)
cu.execute("ANALYZE " + table)
def truncate(self, *tables):
cu = self.cursor()
cu.execute("TRUNCATE TABLE " + ", ".join(tables))
def runAutoCommit(self, func, *args, **kwargs):
"""Call the given function in auto-commit mode. Needed to execute
statements that cannot be run in a transaction, like CREATE
DATABASE.
WARNING: This will commit any open transaction!
"""
old_level = self.dbh.isolation_level
try:
if self.inTransaction():
self.dbh.commit()
self.dbh.set_isolation_level(psy_ext.ISOLATION_LEVEL_AUTOCOMMIT)
return func(*args, **kwargs)
finally:
self.dbh.set_isolation_level(old_level)
# resetting the auto increment values of primary keys
def setAutoIncrement(self, table, column, value=None):
cu = self.cursor()
seqName = "%s_%s_seq" % (table, column)
usedVal = True
if value is None:
cu.execute("select max(%s) from %s" % (column, table))
value = cu.fetchall()[0][0]
if value is None:
usedVal = False
value = 1
else:
values = int(value)
cu.execute("select setval(?, ?, ?)", (seqName, value, usedVal))
ret = cu.fetchall()
assert ret[0][0] == value
return True
def use(self, dbName, **kwargs):
self.close()
self.database = "/".join([self.database.rsplit("/", 1)[0], dbName])
return self.connect(**kwargs)
|
{
"content_hash": "3ff252199df0b476dc03f29eb15c60b9",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 99,
"avg_line_length": 34.89296636085627,
"alnum_prop": 0.5741454864154251,
"repo_name": "fedora-conary/conary",
"id": "09853ba66a3872bd61de5fa74e499275f230de4f",
"size": "11997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conary/dbstore/psycopg2_drv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "481681"
},
{
"name": "C++",
"bytes": "8244"
},
{
"name": "CSS",
"bytes": "3920"
},
{
"name": "Erlang",
"bytes": "477"
},
{
"name": "Perl",
"bytes": "45629"
},
{
"name": "Python",
"bytes": "10586616"
},
{
"name": "Shell",
"bytes": "4657"
},
{
"name": "Standard ML",
"bytes": "2756"
}
],
"symlink_target": ""
}
|
"""Per-prefix data, mapping each prefix to a name.
Auto-generated file, do not edit by hand.
"""
from ..util import u
# Copyright (C) 2011-2022 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TIMEZONE_DATA = {}
from .data0 import data
TIMEZONE_DATA.update(data)
del data
TIMEZONE_LONGEST_PREFIX = 8
|
{
"content_hash": "afa2c66eeebc8e047e2099f3d8367e1d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 32.96,
"alnum_prop": 0.7572815533980582,
"repo_name": "daviddrysdale/python-phonenumbers",
"id": "944037a9fc9b86b1251d1106178a805582ab1f00",
"size": "824",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "python/phonenumbers/tzdata/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3898"
},
{
"name": "Makefile",
"bytes": "9034"
},
{
"name": "Python",
"bytes": "22052087"
},
{
"name": "Ruby",
"bytes": "237"
}
],
"symlink_target": ""
}
|
import os
from urllib.parse import urlsplit
from astropy.time import TimeDelta
from astropy.time import Time
import astropy.units as u
from datetime import timedelta
from sunpy.time import parse_time, TimeRange
from ..client import GenericClient
from sunpy import config
TIME_FORMAT = config.get("general", "time_format")
__all__ = ['XRSClient']
class XRSClient(GenericClient):
def _get_goes_sat_num(self, date):
"""
Determines the satellite number for a given date.
Parameters
----------
date : `astropy.time.Time`
The date to determine which satellite is active.
"""
goes_operational = {
2: TimeRange('1981-01-01', '1983-04-30'),
5: TimeRange('1983-05-02', '1984-07-31'),
6: TimeRange('1983-06-01', '1994-08-18'),
7: TimeRange('1994-01-01', '1996-08-13'),
8: TimeRange('1996-03-21', '2003-06-18'),
9: TimeRange('1997-01-01', '1998-09-08'),
10: TimeRange('1998-07-10', '2009-12-01'),
11: TimeRange('2006-06-20', '2008-02-15'),
12: TimeRange('2002-12-13', '2007-05-08'),
13: TimeRange('2006-08-01', '2006-08-01'),
14: TimeRange('2009-12-02', '2010-10-04'),
15: TimeRange('2010-09-01', parse_time('now'))
}
results = []
for sat_num in goes_operational:
if date in goes_operational[sat_num]:
# if true then the satellite with sat_num is available
results.append(sat_num)
if results:
# Return the newest satellite
return max(results)
else:
# if no satellites were found then raise an exception
raise ValueError('No operational GOES satellites on {}'.format(
date.strftime(TIME_FORMAT)))
def _get_time_for_url(self, urls):
times = []
for uri in urls:
uripath = urlsplit(uri).path
# Extract the yymmdd or yyyymmdd timestamp
datestamp = os.path.splitext(os.path.split(uripath)[1])[0][4:]
# 1999-01-15 as an integer.
if int(datestamp) <= 990115:
start = Time.strptime(datestamp, "%y%m%d")
else:
start = Time.strptime(datestamp, "%Y%m%d")
almost_day = TimeDelta(1*u.day - 1*u.millisecond)
times.append(TimeRange(start, start + almost_day))
return times
def _get_url_for_timerange(self, timerange, **kwargs):
"""
Returns a URL to the GOES data for the specified date.
Parameters
----------
timerange: sunpy.time.TimeRange
time range for which data is to be downloaded.
satellitenumber : int
GOES satellite number (default = 15)
data_type : str
Data type to return for the particular GOES satellite. Supported
types depend on the satellite number specified. (default = xrs_2s)
"""
# find out which satellite and datatype to query from the query times
base_url = 'https://umbra.nascom.nasa.gov/goes/fits/'
start_time = Time(timerange.start.strftime('%Y-%m-%d'))
# make sure we are counting a day even if only a part of it is in the query range.
day_range = TimeRange(timerange.start.strftime('%Y-%m-%d'),
timerange.end.strftime('%Y-%m-%d'))
total_days = int(day_range.days.value) + 1
result = list()
# Iterate over each day in the input timerange and generate a URL for
# it.
for day in range(total_days):
# It is okay to convert to datetime here as the start_time is a date
# hence we don't necesserily gain anything.
# This is necessary because when adding a day to a Time, we may
# end up with the same day if the day is a leap second day
date = start_time.datetime + timedelta(days=day)
regex = date.strftime('%Y') + "/go{sat:02d}"
if (date < parse_time('1999/01/15')):
regex += date.strftime('%y%m%d') + '.fits'
else:
regex += date.strftime('%Y%m%d') + '.fits'
satellitenumber = kwargs.get('satellitenumber', self._get_goes_sat_num(date))
url = base_url + regex.format(sat=satellitenumber)
result.append(url)
return result
def _makeimap(self):
"""
Helper function used to hold information about source.
"""
self.map_['source'] = 'nasa'
self.map_['instrument'] = 'goes'
self.map_['physobs'] = 'irradiance'
self.map_['provider'] = 'sdac'
@classmethod
def _can_handle_query(cls, *query):
"""
Answers whether client can service the query.
Parameters
----------
query : list of query objects
Returns
-------
boolean
answer as to whether client can service the query
"""
chkattr = ['Time', 'Instrument', 'SatelliteNumber']
chklist = [x.__class__.__name__ in chkattr for x in query]
for x in query:
if x.__class__.__name__ == 'Instrument' and x.value.lower() in ('xrs', 'goes'):
return all(chklist)
return False
|
{
"content_hash": "f8dbe49206f2a3bef16bf7203bfebb96",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 91,
"avg_line_length": 36.40816326530612,
"alnum_prop": 0.5590433482810164,
"repo_name": "dpshelio/sunpy",
"id": "80791b52a344c5bfe0f76528c2de91edc1c34c5f",
"size": "5495",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sunpy/net/dataretriever/sources/goes.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "73732"
},
{
"name": "IDL",
"bytes": "5746"
},
{
"name": "Python",
"bytes": "1922243"
},
{
"name": "Shell",
"bytes": "235"
}
],
"symlink_target": ""
}
|
"""
EDITS: Joshua Beard
- comments added for clarity
This module defines the entry point into the IBEIS system
ibeis.opendb and ibeis.main are the main entry points
"""
from __future__ import absolute_import, division, print_function
from six.moves import builtins # For py2 & py3 compatability
import sys # for system calls
import multiprocessing # for using multi-core processors
PREINIT_MULTIPROCESSING_POOLS = '--preinit' in sys.argv
QUIET = '--quiet' in sys.argv
NOT_QUIET = not QUIET
USE_GUI = '--gui' in sys.argv or '--nogui' not in sys.argv
try:
profile = getattr(builtins, 'profile') # Reference library function
except AttributeError:
def profile(func):
return func
# Define behavior for user-called exit
def _on_ctrl_c(signal, frame):
proc_name = multiprocessing.current_process().name
print('[ibeis.main_module] Caught ctrl+c in %s' % (proc_name,))
try:
_close_parallel()
except Exception as ex:
print('Something very bad happened' + repr(ex))
finally:
print('[ibeis.main_module] sys.exit(0)')
sys.exit(0)
#-----------------------
# private init functions
def _init_signals():
import signal # reference library module
signal.signal(signal.SIGINT, _on_ctrl_c)
def _reset_signals():
import signal # reference library module
signal.signal(signal.SIGINT, signal.SIG_DFL) # reset ctrl+c behavior
'''IBEIS'''
def _parse_args():
from ibeis import params
params.parse_args()
@profile
def _init_matplotlib():
from plottool import __MPL_INIT__
__MPL_INIT__.init_matplotlib()
@profile
def _init_gui(activate=True):
import guitool
if NOT_QUIET:
print('[main] _init_gui()')
guitool.ensure_qtapp()
#USE_OLD_BACKEND = '--old-backend' in sys.argv
#if USE_OLD_BACKEND:
from ibeis.gui import guiback
back = guiback.MainWindowBackend()
#else:
# from ibeis.gui import newgui
# back = newgui.IBEISGuiWidget()
if activate:
guitool.activate_qwindow(back.mainwin)
return back
@profile
def _init_ibeis(dbdir=None, verbose=None, use_cache=True, web=None, **kwargs):
"""
Private function that calls code to create an ibeis controller
"""
import utool as ut
from ibeis import params
from ibeis.control import IBEISControl
if verbose is None:
verbose = ut.VERBOSE
if verbose and NOT_QUIET:
print('[main] _init_ibeis()')
# Use command line dbdir unless user specifies it
if dbdir is None:
ibs = None
print('[main!] WARNING: args.dbdir is None')
else:
kwargs = kwargs.copy()
request_dbversion = kwargs.pop('request_dbversion', None)
asproxy = kwargs.pop('asproxy', None)
ibs = IBEISControl.request_IBEISController(
dbdir=dbdir, use_cache=use_cache,
request_dbversion=request_dbversion,
asproxy=asproxy)
if web is None:
web = ut.get_argflag(('--webapp', '--webapi', '--web', '--browser'),
help_='automatically launch the web app / web api')
#web = params.args.webapp
if web:
from ibeis.web import app
port = params.args.webport
app.start_from_ibeis(ibs, port=port, **kwargs)
return ibs
def __import_parallel_modules():
# Import any modules which parallel process will use here
# so they are accessable when the program forks
#from utool import util_sysreq
#util_sysreq.ensure_in_pythonpath('hesaff')
#util_sysreq.ensure_in_pythonpath('pyrf')
#util_sysreq.ensure_in_pythonpath('code')
#import pyhesaff # NOQA
#import pyrf # NOQA
from ibeis import core_annots # NOQA
#.algo.preproc import preproc_chip # NOQA
def _init_parallel():
import utool as ut
if ut.VERBOSE:
print('_init_parallel')
from utool import util_parallel
from ibeis import params
__import_parallel_modules()
util_parallel.set_num_procs(params.args.num_procs)
if PREINIT_MULTIPROCESSING_POOLS:
util_parallel.init_pool(params.args.num_procs)
def _close_parallel():
#if ut.VERBOSE:
# print('_close_parallel')
try:
from utool import util_parallel
util_parallel.close_pool(terminate=True)
except Exception as ex:
import utool as ut
ut.printex(ex, 'error closing parallel')
raise
def _init_numpy():
import utool as ut
import numpy as np
if ut.VERBOSE:
print('_init_numpy')
error_options = ['ignore', 'warn', 'raise', 'call', 'print', 'log']
on_err = error_options[0]
#np.seterr(divide='ignore', invalid='ignore')
numpy_err = {
'divide': on_err,
'over': on_err,
'under': on_err,
'invalid': on_err,
}
#numpy_print = {
# 'precision': 8,
# 'threshold': 500,
# 'edgeitems': 3,
# 'linewidth': 200, # default 75
# 'suppress': False,
# 'nanstr': 'nan',
# 'formatter': None,
#}
np.seterr(**numpy_err)
#np.set_printoptions(**numpy_print)
#-----------------------
# private loop functions
def _guitool_loop(main_locals, ipy=False):
import guitool
from ibeis import params
print('[main] guitool loop')
back = main_locals.get('back', None)
if back is not None:
loop_freq = params.args.loop_freq
ipy = ipy or params.args.cmd
guitool.qtapp_loop(qwin=back.mainwin, ipy=ipy, frequency=loop_freq, init_signals=False)
if ipy: # If we're in IPython, the qtapp loop won't block, so we need to refresh
back.refresh_state()
else:
if NOT_QUIET:
print('WARNING: back was not expected to be None')
def set_newfile_permissions():
r"""
sets this processes default permission bits when creating new files
CommandLine:
python -m ibeis.main_module --test-set_newfile_permissions
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.main_module import * # NOQA
>>> import os
>>> import utool as ut
>>> # write before umask
>>> ut.delete('tempfile1.txt')
>>> ut.write_to('tempfile1.txt', 'foo')
>>> stat_result1 = os.stat('tempfile1.txt')
>>> # apply umask
>>> set_newfile_permissions()
>>> ut.delete('tempfile2.txt')
>>> ut.write_to('tempfile2.txt', 'foo')
>>> stat_result2 = os.stat('tempfile2.txt')
>>> # verify results
>>> print('old masked all bits = %o' % (stat_result1.st_mode))
>>> print('new masked all bits = %o' % (stat_result2.st_mode))
"""
import os
#import stat
# Set umask so all files written will be group read and writable
# To get the permissions we want subtract what you want from 0o0666 because
# umask subtracts the mask you give it.
#mask = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH
#mask = 0o000 # most permissive umask
mask = 0o000 # most permissive umask
prev_mask = os.umask(mask)
return prev_mask
#print('prev_mask = %o' % (prev_mask,))
#print('new_mask = %o' % (mask,))
@profile
def main(gui=True, dbdir=None, defaultdb='cache',
allow_newdir=False, db=None,
delete_ibsdir=False,
**kwargs):
"""
Program entry point
Inits the system environment, an IBEISControl, and a GUI if requested
Args:
gui (bool): (default=True) If gui is False a gui instance will not be created
dbdir (None): full directory of a database to load
db (None): name of database to load relative to the workdir
allow_newdir (bool): (default=False) if False an error is raised if a
a new database is created
defaultdb (str): codename of database to load if db and dbdir is None. a value
of 'cache' will open the last database opened with the GUI.
Returns:
dict: main_locals
"""
set_newfile_permissions()
from ibeis.init import main_commands
from ibeis.init import sysres
# Display a visible intro message
msg = '''
_____ ______ _______ _____ _______
| |_____] |______ | |______
__|__ |_____] |______ __|__ ______|
'''
if NOT_QUIET:
print(msg)
# Init the only two main system api handles
ibs = None
back = None
if NOT_QUIET:
print('[main] ibeis.main_module.main()')
_preload()
DIAGNOSTICS = NOT_QUIET
if DIAGNOSTICS:
import os
import utool as ut
import ibeis
print('[main] MAIN DIAGNOSTICS')
print('[main] * username = %r' % (ut.get_user_name()))
print('[main] * ibeis.__version__ = %r' % (ibeis.__version__,))
print('[main] * computername = %r' % (ut.get_computer_name()))
print('[main] * cwd = %r' % (os.getcwd(),))
print('[main] * sys.argv = %r' % (sys.argv,))
# Parse directory to be loaded from command line args
# and explicit kwargs
dbdir = sysres.get_args_dbdir(defaultdb, allow_newdir, db, dbdir, cache_priority=False)
if delete_ibsdir is True:
from ibeis.other import ibsfuncs
assert allow_newdir, 'must be making new directory if you are deleting everything!'
ibsfuncs.delete_ibeis_database(dbdir)
#limit = sys.getrecursionlimit()
#if limit == 1000:
# print('Setting Recursion Limit to 3000')
# sys.setrecursionlimit(3000)
# Execute preload commands
main_commands.preload_commands(dbdir, **kwargs) # PRELOAD CMDS
try:
# Build IBEIS Control object
ibs = _init_ibeis(dbdir)
if gui and USE_GUI:
back = _init_gui(activate=kwargs.get('activate', True))
back.connect_ibeis_control(ibs)
except Exception as ex:
print('[main()] IBEIS LOAD imageseted exception: %s %s' % (type(ex), ex))
raise
main_commands.postload_commands(ibs, back) # POSTLOAD CMDS
main_locals = {'ibs': ibs, 'back': back}
return main_locals
def opendb_in_background(*args, **kwargs):
"""
Starts a web server in the background
"""
import utool as ut
import time
sec = kwargs.pop('wait', 0)
if sec != 0:
print('waiting %s seconds for startup' % (sec,))
proc = ut.spawn_background_process(opendb, *args, **kwargs)
if sec != 0:
time.sleep(sec) # wait for process to initialize
return proc
def opendb_bg_web(*args, **kwargs):
"""
Wrapper around opendb_in_background, returns a nice web_ibs
object to execute web calls using normal python-like syntax
Accespts domain and port as kwargs
Kwargs:
port, domain
CommandLine:
python -m ibeis.main_module opendb_bg_web
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.main_module import * # NOQA
>>> print('Opening a web_ibs')
>>> web_ibs = opendb_bg_web()
>>> print('SUCESS Opened a web_ibs!')
>>> print(web_ibs)
>>> print('Now kill the web_ibs')
>>> web_ibs.terminate2()
"""
import utool as ut
kwargs = kwargs.copy()
domain = kwargs.pop('domain', ut.get_argval('--domain', type_=str, default=None))
port = kwargs.pop('port', 5000)
if 'wait' in kwargs:
print('NOTE: No need to specify wait param anymore. '
'This is automatically taken care of.')
if domain is None:
# Requesting a local test server
_kw = dict(web=True, browser=False)
_kw.update(kwargs)
web_ibs = opendb_in_background(*args, **_kw)
else:
# Using a remote controller, no need to spin up anything
web_ibs = ut.DynStruct()
web_ibs.terminate2 = lambda: None
# Augment web instance with usefull test functions
if domain is None:
domain = 'http://127.0.1.1'
if not domain.startswith('http://'):
domain = 'http://' + domain
baseurl = domain + ':' + str(port)
web_ibs.domain = domain
web_ibs.port = port
web_ibs.baseurl = baseurl
def send_ibeis_request(suffix, type_='post', **kwargs):
"""
Posts a request to a url suffix
"""
import requests
import utool as ut
if not suffix.endswith('/'):
raise Exception('YOU PROBABLY WANT A / AT THE END OF YOUR URL')
payload = ut.map_dict_vals(ut.to_json, kwargs)
if type_ == 'post':
resp = requests.post(baseurl + suffix, data=payload)
json_content = resp._content
elif type_ == 'get':
resp = requests.get(baseurl + suffix, data=payload)
json_content = resp.content
try:
content = ut.from_json(json_content)
except ValueError:
raise Exception('Expected JSON string but got json_content=%r' % (json_content,))
else:
# print('content = %r' % (content,))
if content['status']['code'] != 200:
print(content['status']['message'])
raise Exception(content['status']['message'])
request_response = content['response']
return request_response
def wait_for_results(jobid, timeout=None, delays=[1, 3, 10]):
"""
Waits for results from an engine
"""
for _ in ut.delayed_retry_gen(delays):
print('Waiting for jobid = %s' % (jobid,))
status_response = web_ibs.send_ibeis_request('/api/engine/job/status/', jobid=jobid)
if status_response['jobstatus'] == 'completed':
break
return status_response
def read_engine_results(jobid):
result_response = web_ibs.send_ibeis_request('/api/engine/job/result/', jobid=jobid)
return result_response
def send_request_and_wait(suffix, type_='post', timeout=None, **kwargs):
jobid = web_ibs.send_ibeis_request(suffix, type_=type_, **kwargs)
status_response = web_ibs.wait_for_results(jobid, timeout) # NOQA
result_response = web_ibs.read_engine_results(jobid)
#>>> cmdict = ut.from_json(result_response['json_result'])[0]
return result_response
web_ibs.send_ibeis_request = send_ibeis_request
web_ibs.wait_for_results = wait_for_results
web_ibs.read_engine_results = read_engine_results
web_ibs.send_request_and_wait = send_request_and_wait
def wait_until_started():
""" waits until the web server responds to a request """
import requests
for count in ut.delayed_retry_gen([1], timeout=15):
if ut.VERBOSE:
print('Waiting for server to be up. count=%r' % (count,))
try:
web_ibs.send_ibeis_request('/api/test/heartbeat/', type_='get')
break
except requests.ConnectionError:
pass
wait_until_started()
return web_ibs
def opendb(db=None, dbdir=None, defaultdb='cache', allow_newdir=False,
delete_ibsdir=False, verbose=False, use_cache=True,
web=None, **kwargs):
"""
main without the preload (except for option to delete database before
opening)
Args:
db (str): database name in your workdir used only if dbdir is None
dbdir (None): full database path
defaultdb (str): dbdir search stratagy when db is None and dbdir is
None
allow_newdir (bool): (default=True) if True errors when opening a
nonexisting database
delete_ibsdir (bool): BE CAREFUL! (default=False) if True deletes the
entire
verbose (bool): verbosity flag
web (bool): starts webserver if True (default=param specification)
use_cache (bool): if True will try to return a previously loaded
controller
Returns:
ibeis.IBEISController: ibs
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.main_module import * # NOQA
>>> db = None
>>> dbdir = None
>>> defaultdb = 'cache'
>>> allow_newdir = False
>>> delete_ibsdir = False
>>> verbose = False
>>> use_cache = True
>>> ibs = opendb(db, dbdir, defaultdb, allow_newdir, delete_ibsdir,
>>> verbose, use_cache)
>>> result = str(ibs)
>>> print(result)
"""
from ibeis.init import sysres
from ibeis.other import ibsfuncs
dbdir = sysres.get_args_dbdir(defaultdb, allow_newdir, db, dbdir,
cache_priority=False)
if delete_ibsdir is True:
assert allow_newdir, (
'must be making new directory if you are deleting everything!')
ibsfuncs.delete_ibeis_database(dbdir)
ibs = _init_ibeis(dbdir, verbose=verbose, use_cache=use_cache, web=web,
**kwargs)
return ibs
def start(*args, **kwargs):
""" alias for main() """ # + main.__doc__
return main(*args, **kwargs)
def test_main(gui=True, dbdir=None, defaultdb='cache', allow_newdir=False,
db=None):
""" alias for main() """ # + main.__doc__
from ibeis.init import sysres
_preload()
dbdir = sysres.get_args_dbdir(defaultdb, allow_newdir, db, dbdir, cache_priority=False)
ibs = _init_ibeis(dbdir)
return ibs
@profile
def _preload(mpl=True, par=True, logging=True):
""" Sets up python environment """
import utool as ut
#from ibeis.init import main_helpers
from ibeis import params
if multiprocessing.current_process().name != 'MainProcess':
return
if ut.VERBOSE:
print('[ibies] _preload')
_parse_args()
# mpl backends
if logging and not params.args.nologging:
# Log in the configured ibeis log dir (which is maintained by utool)
# fix this to be easier to figure out where the logs actually are
ut.start_logging(appname='ibeis')
if mpl:
_init_matplotlib()
# numpy print settings
_init_numpy()
# parallel servent processes
if par:
_init_parallel()
# ctrl+c
_init_signals()
# inject colored exceptions
ut.util_inject.inject_colored_exceptions()
# register type aliases for debugging
#main_helpers.register_utool_aliases()
#return params.args
@profile
def main_loop(main_locals, rungui=True, ipy=False, persist=True):
"""
Runs the qt loop if the GUI was initialized and returns an executable string
for embedding an IPython terminal if requested.
If rungui is False the gui will not loop even if back has been created
the main locals dict must be callsed main_locals in the scope you call this
function in.
Args:
main_locals (dict_):
rungui (bool):
ipy (bool):
persist (bool):
Returns:
str: execstr
"""
print('[main] ibeis.main_module.main_loop()')
from ibeis import params
import utool as ut
#print('current process = %r' % (multiprocessing.current_process().name,))
#== 'MainProcess':
if rungui and not params.args.nogui:
try:
_guitool_loop(main_locals, ipy=ipy)
except Exception as ex:
ut.printex(ex, 'error in main_loop')
raise
#if not persist or params.args.cmd:
# main_close()
# Put locals in the exec namespace
ipycmd_execstr = ut.ipython_execstr()
locals_execstr = ut.execstr_dict(main_locals, 'main_locals')
execstr = locals_execstr + '\n' + ipycmd_execstr
return execstr
def main_close(main_locals=None):
#import utool as ut
#if ut.VERBOSE:
# print('main_close')
_close_parallel()
_reset_signals()
#if __name__ == '__main__':
# multiprocessing.freeze_support()
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.main_module
python -m ibeis.main_module --allexamples
python -m ibeis.main_module --allexamples --noface --nosrc
"""
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
{
"content_hash": "e4d5a12f8e1794f84a7ccbecfff98ed9",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 96,
"avg_line_length": 32.95387149917628,
"alnum_prop": 0.6037094435834625,
"repo_name": "SU-ECE-17-7/ibeis",
"id": "134145f3fd9bb14437d8afdb3ad83394a5194c0f",
"size": "20027",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "ibeis/main_module.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "331"
},
{
"name": "CSS",
"bytes": "26792"
},
{
"name": "HTML",
"bytes": "33762203"
},
{
"name": "Inno Setup",
"bytes": "1585"
},
{
"name": "JavaScript",
"bytes": "227454"
},
{
"name": "Jupyter Notebook",
"bytes": "66346367"
},
{
"name": "Python",
"bytes": "6112508"
},
{
"name": "Shell",
"bytes": "58211"
}
],
"symlink_target": ""
}
|
"""
A micro wx App with a list of the things checked on the checklist.
"""
import datetime
import random
import wx
import hotmodel
import production
import hotwidgets
class ProcView(hotwidgets.MVCList):
"""
A specialized version of MVCList that sets columns for the process
operations and adds another column that indicates, whether the
operation has been done.
"""
def __init__(self, parent, id, model):
super(ProcView, self).__init__(
parent, id, style=wx.LC_REPORT,
columns=[
("operation", "Op."),
("act", "Act"),
("done", "Done"),
],
)
self.model = model
def indicate_operation_status(self, row, status):
self.SetStringItem(row, 2, "YES" if status else "NO",)
def update_indication(self, model, fqname, event_name, key):
"""
Marks those operations that have been done as done and those that
have not been done as not done.
"""
done_ops = set([i.operation for i in self.model.operations])
for (i, op) in enumerate(self.model.process):
self.indicate_operation_status(
i,
op.operation in done_ops,
)
class ProductionView(wx.Frame):
def __init__(self, parent, dummy_app, title, model):
""" Create the main frame. """
wx.Frame.__init__(
self, parent, -1,
title,
)
self.box = wx.GridBagSizer(5, 5)
self.product = wx.StaticText(self, -1, "")
self.proc_view = ProcView(self, -1, model)
self.box.Add(self.product, (0, 0), (1, 2), flag=wx.EXPAND)
self.box.Add(self.proc_view, (1, 0), (1, 2), flag=wx.EXPAND)
next = wx.Button(self, -1, "Next Record")
add_op = wx.Button(self, -1, "Add Operation")
self.box.Add(next, (3, 0))
self.box.Add(add_op, (3, 1))
self.box.AddGrowableRow(1)
self.box.AddGrowableCol(0)
self.box.AddGrowableCol(1)
self.SetSizerAndFit(self.box)
self.Bind(wx.EVT_BUTTON, self.on_next, next)
self.Bind(wx.EVT_BUTTON, self.on_add_op, add_op)
self.model = model
self.mapper = hotmodel.Mapper()
self.proc_view.add_routes(self.mapper, "/process")
self.mapper.add_route(
"/process",
"",
self.proc_view.update_indication,
)
self.mapper.add_route(
"/operations",
"",
self.proc_view.update_indication,
)
self.mapper.add_route("/", "", self.on_product,)
self.model.add_listener(self.mapper)
wx.CallAfter(lambda: self.model.set_product("FIRST8", 1))
def on_product(self, model, fqname, event_name, key):
"""
An article or sn change handler.
"""
self.product.SetLabel("%s %s" % (model.article, model.sn))
def on_next(self, evt):
"""
Button "Next" handler: Display the next product.
"""
evt.Skip()
self.model.set_product("AAAQA%s" % random.randint(0, 9), 1)
def on_add_op(self, evt):
"""
Button "add op" handler: Add a random operation to the operation
list.
"""
evt.Skip()
proc_op = random.choice(self.model.process)
self.model.operations.append(production.ProductOperation(
operation=proc_op.operation,
tm=datetime.datetime.now(),
workplace=100,
))
if "__main__" == __name__:
MODEL = production.ProductModel(production.Server(op_done_rate=10))
APP = wx.App(redirect=False)
FRAME = ProductionView(None, APP, "Sample Frame", MODEL)
APP.SetTopWindow(FRAME)
FRAME.Show(True)
FRAME.Maximize(True)
APP.MainLoop()
|
{
"content_hash": "48e7bd7008f2c4638d05f819eb3308c1",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 77,
"avg_line_length": 30.899224806201552,
"alnum_prop": 0.539136979427998,
"repo_name": "petrblahos/modellerkit",
"id": "108b804ff9d21e5b62bfc892b4ebf4ba266ad7ba",
"size": "3986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "step06/view03.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "203293"
}
],
"symlink_target": ""
}
|
"""
Name: 'OpenCTM (*.ctm)...'
Blender: 248
Group: 'Import'
Tooltip: 'Import an OpenCTM file'
"""
import bpy
import Blender
from Blender import Mesh, Scene, Window, sys, Image, Draw
import BPyMesh
import math
import ctypes
from ctypes import *
from ctypes.util import find_library
import os
__author__ = "Marcus Geelnard"
__version__ = "0.4"
__bpydoc__ = """\
This script imports OpenCTM files into Blender. It supports normals,
colours, and UV coordinates per vertex.
"""
# Copyright (C) 2009-2010: Marcus Geelnard
#
# This program is released to the public domain.
#
# Portions of this code are taken from ply_import.py in Blender
# 2.48.
#
# The script uses the OpenCTM shared library (.so, .dll, etc). If no
# such library can be found, the script will exit with an error
# message.
#
# v0.4, 2009-09-14
# - Updated to OpenCTM API version 0.8 (texture maps are now called UV maps)
#
# v0.3, 2009-08-09
# - Changed vertex color attribute name to "Color"
#
# v0.2, 2009-06-30
# - Better error reporting
#
# v0.1, 2009-05-31
# - First test version with an alpha version of the OpenCTM API
#
def file_callback(filename):
Window.WaitCursor(1)
try:
# Load the OpenCTM shared library
if os.name == 'nt':
libHDL = WinDLL('openctm.dll')
else:
libName = find_library('openctm')
if not libName:
Blender.Draw.PupMenu('Could not find the OpenCTM shared library')
return
libHDL = CDLL(libName)
if not libHDL:
Blender.Draw.PupMenu('Could not open the OpenCTM shared library')
return
# Get all the functions from the shared library that we need
ctmNewContext = libHDL.ctmNewContext
ctmNewContext.argtypes = [c_int]
ctmNewContext.restype = c_void_p
ctmFreeContext = libHDL.ctmFreeContext
ctmFreeContext.argtypes = [c_void_p]
ctmGetError = libHDL.ctmGetError
ctmGetError.argtypes = [c_void_p]
ctmGetError.restype = c_int
ctmErrorString = libHDL.ctmErrorString
ctmErrorString.argtypes = [c_int]
ctmErrorString.restype = c_char_p
ctmLoad = libHDL.ctmLoad
ctmLoad.argtypes = [c_void_p, c_char_p]
ctmGetInteger = libHDL.ctmGetInteger
ctmGetInteger.argtypes = [c_void_p, c_int]
ctmGetInteger.restype = c_int
ctmGetString = libHDL.ctmGetString
ctmGetString.argtypes = [c_void_p, c_int]
ctmGetString.restype = c_char_p
ctmGetIntegerArray = libHDL.ctmGetIntegerArray
ctmGetIntegerArray.argtypes = [c_void_p, c_int]
ctmGetIntegerArray.restype = POINTER(c_int)
ctmGetFloatArray = libHDL.ctmGetFloatArray
ctmGetFloatArray.argtypes = [c_void_p, c_int]
ctmGetFloatArray.restype = POINTER(c_float)
ctmGetNamedAttribMap = libHDL.ctmGetNamedAttribMap
ctmGetNamedAttribMap.argtypes = [c_void_p, c_char_p]
ctmGetNamedAttribMap.restype = c_int
# Create an OpenCTM context
ctm = ctmNewContext(0x0101) # CTM_IMPORT
try:
# Load the file
ctmLoad(ctm, c_char_p(filename))
err = ctmGetError(ctm)
if err != 0:
s = ctmErrorString(err)
Blender.Draw.PupMenu('Could not load the file: ' + s)
return
# Get the mesh properties
vertexCount = ctmGetInteger(ctm, 0x0301) # CTM_VERTEX_COUNT
triangleCount = ctmGetInteger(ctm, 0x0302) # CTM_TRIANGLE_COUNT
hasNormals = ctmGetInteger(ctm, 0x0303) # CTM_HAS_NORMALS
texMapCount = ctmGetInteger(ctm, 0x0304) # CTM_UV_MAP_COUNT
# Get indices
pindices = ctmGetIntegerArray(ctm, 0x0601) # CTM_INDICES
# Get vertices
pvertices = ctmGetFloatArray(ctm, 0x0602) # CTM_VERTICES
# Get normals
if hasNormals == 1:
pnormals = ctmGetFloatArray(ctm, 0x0603) # CTM_NORMALS
else:
pnormals = None
# Get texture coordinates
if texMapCount > 0:
ptexCoords = ctmGetFloatArray(ctm, 0x0700) # CTM_UV_MAP_1
else:
ptexCoords = None
# Get colors
colorMap = ctmGetNamedAttribMap(ctm, c_char_p('Color'))
if colorMap != 0:
pcolors = ctmGetFloatArray(ctm, colorMap)
else:
pcolors = None
# We will be creating vectors...
Vector = Blender.Mathutils.Vector
# Create Blender verts and faces
verts = []
for i in range(vertexCount):
verts.append(Vector(pvertices[i * 3], pvertices[i * 3 + 1], pvertices[i * 3 + 2]))
faces = []
for i in range(triangleCount):
faces.append((pindices[i * 3], pindices[i * 3 + 1], pindices[i * 3 + 2]))
# Create a new Blender mesh from the loaded mesh data
objName = Blender.sys.splitext(Blender.sys.basename(filename))[0]
mesh = bpy.data.meshes.new(objName)
mesh.verts.extend(verts)
mesh.faces.extend(faces)
# Add normals?
if pnormals:
i = 0
for v in mesh.verts:
n = Vector(pnormals[i], pnormals[i + 1], pnormals[i + 2])
v.no = n
i += 3
else:
mesh.calcNormals()
# Always use smooth normals - regardless if they are defined or calculated
for f in mesh.faces:
f.smooth = 1
# Add texture coordinates?
if ptexCoords:
mesh.faceUV = 1
for f in mesh.faces:
for j, v in enumerate(f.v):
k = v.index
if k < vertexCount:
uv = f.uv[j]
uv[0] = ptexCoords[k * 2]
uv[1] = ptexCoords[k * 2 + 1]
# Add colors?
if pcolors:
mesh.vertexColors = 1
for f in mesh.faces:
for j, v in enumerate(f.v):
k = v.index
if k < vertexCount:
col = f.col[j]
r = int(round(pcolors[k * 4] * 255.0))
if r < 0: r = 0
if r > 255: r = 255
g = int(round(pcolors[k * 4 + 1] * 255.0))
if g < 0: g = 0
if g > 255: g = 255
b = int(round(pcolors[k * 4 + 2] * 255.0))
if b < 0: b = 0
if b > 255: b = 255
col.r = r
col.g = g
col.b = b
# Select all vertices in the mesh
mesh.sel = True
# Create a new object with the new mesh
scn = bpy.data.scenes.active
scn.objects.selected = []
obj = scn.objects.new(mesh, objName)
scn.objects.active = obj
finally:
# Free the OpenCTM context
ctmFreeContext(ctm)
finally:
Window.WaitCursor(0)
Blender.Redraw()
def main():
Blender.Window.FileSelector(file_callback, 'Import OpenCTM', '*.ctm')
if __name__=='__main__':
main()
|
{
"content_hash": "492699a10d32175460d91731627d5226",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 86,
"avg_line_length": 27.915555555555557,
"alnum_prop": 0.6377965292150931,
"repo_name": "wibbe/pbr-test",
"id": "42ef5b49bf90d36cc091cbc8f8d0da61cee8d2b5",
"size": "6288",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/slg/3rdparty/openctm-1.0.3/plugins/blender/openctm_import.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "108"
},
{
"name": "C++",
"bytes": "72257"
}
],
"symlink_target": ""
}
|
import socket
import fcntl
import struct
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(),0x8915,struct.pack('256s',ifname[:15]))[20:24])
res = '{"key":"ClientIP","value":{"ip":"%s"}}' %get_ip_address('eth0')
output = open('/home/pi/getIPAddress.txt','w')
output.write(res);
output.close
|
{
"content_hash": "a655d1d73b68564c9f80e05ea832188d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 98,
"avg_line_length": 34.09090909090909,
"alnum_prop": 0.6933333333333334,
"repo_name": "XintingXu/AutoWaterSystem",
"id": "698c15be53c06677fb9e5a14656e2483e8c5584e",
"size": "375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "~PI/getIPAddress.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "65579"
},
{
"name": "PHP",
"bytes": "40551"
},
{
"name": "Python",
"bytes": "756"
},
{
"name": "QMake",
"bytes": "933"
},
{
"name": "Shell",
"bytes": "6818"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("ddcz", "0086_renames_followup"),
]
operations = [
migrations.AlterModelOptions(
name="reputacelog",
options={},
),
migrations.AlterModelOptions(
name="reputacespecial",
options={},
),
migrations.RenameModel(
old_name="ReputaceSpecial",
new_name="ReputationAdditional",
),
migrations.RenameModel(
old_name="ReputaceLog",
new_name="ReputationLog",
),
]
|
{
"content_hash": "8a1e1901a6afea3d8c9032ba43bf26be",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 44,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.5349206349206349,
"repo_name": "dracidoupe/graveyard",
"id": "a9ac7e478c4d1f38d49fcc8ead589c6a82626bdb",
"size": "680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ddcz/migrations/0087_reputation_init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "API Blueprint",
"bytes": "4273"
},
{
"name": "CSS",
"bytes": "37578"
},
{
"name": "Dockerfile",
"bytes": "208"
},
{
"name": "HTML",
"bytes": "101149"
},
{
"name": "JavaScript",
"bytes": "2417"
},
{
"name": "Python",
"bytes": "766548"
},
{
"name": "Shell",
"bytes": "5103"
}
],
"symlink_target": ""
}
|
import json
import uuid
from examples import acquire_token_by_username_password
from office365.graph_client import GraphClient
def print_success(group):
"""
:type group: office365.directory.groups.group.Group
"""
print(f"team has been deleted")
client = GraphClient(acquire_token_by_username_password)
team_name = "Team_" + uuid.uuid4().hex
team = client.teams.create(team_name).execute_query()
print(json.dumps(team.to_json(), indent=4))
team.delete_object().execute_query_retry(success_callback=print_success) # clean up
|
{
"content_hash": "a8e603fd76ed99de01034acde7761f1b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 84,
"avg_line_length": 27.3,
"alnum_prop": 0.7435897435897436,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "caed63d1f74e8ef09ca84c49b85d86fcd138ff96",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/teams/create_team.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
}
|
import urwid
from mitmproxy.tools.console.grideditor import base
from mitmproxy.tools.console import signals
from mitmproxy.net.http import cookies
class Column(base.Column):
def __init__(self, heading, subeditor):
super().__init__(heading)
self.subeditor = subeditor
def Edit(self, data):
raise RuntimeError("SubgridColumn should handle edits itself")
def Display(self, data):
return Display(data)
def blank(self):
return []
def keypress(self, key: str, editor):
if key in "rRe":
signals.status_message.send(
message="Press enter to edit this field."
)
return
elif key == "m_select":
self.subeditor.grideditor = editor
editor.master.switch_view("edit_focus_setcookie_attrs")
else:
return key
class Display(base.Cell):
def __init__(self, data):
p = cookies._format_pairs(data, sep="\n")
w = urwid.Text(p)
super().__init__(w)
def get_data(self):
pass
|
{
"content_hash": "63fbb230c8f56bbe13a0c7f8b8d34a74",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 70,
"avg_line_length": 26.121951219512194,
"alnum_prop": 0.5929038281979458,
"repo_name": "mhils/mitmproxy",
"id": "be4b4271b5535b72ca891f89cae176d163f63368",
"size": "1071",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "mitmproxy/tools/console/grideditor/col_subgrid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3618"
},
{
"name": "Dockerfile",
"bytes": "618"
},
{
"name": "HTML",
"bytes": "10672"
},
{
"name": "JavaScript",
"bytes": "134086"
},
{
"name": "Kaitai Struct",
"bytes": "3670"
},
{
"name": "Less",
"bytes": "21203"
},
{
"name": "PowerShell",
"bytes": "258"
},
{
"name": "Python",
"bytes": "2367991"
},
{
"name": "Shell",
"bytes": "3055"
},
{
"name": "TypeScript",
"bytes": "279053"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
PUBLISHED_CHOICES = (('private', 'private'), ('public', 'public'), ('shared', 'shared'))
PUBLISHED_DEFAULT = PUBLISHED_CHOICES[0][1]
@python_2_unicode_compatible
class Photo(models.Model):
"""Photo class."""
image = models.ImageField(upload_to='media')
title = models.CharField(max_length=250)
description = models.TextField()
uploaded = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
date_published = models.DateTimeField(default=None, null=True)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='photos'
)
published = models.CharField(
max_length=10,
choices=PUBLISHED_CHOICES
)
def __str__(self):
"""Return title."""
return self.title
@python_2_unicode_compatible
class Album(models.Model):
"""Album class."""
photos = models.ManyToManyField('Photo', related_name='album')
owned_by = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='albums')
title = models.CharField(max_length=255)
description = models.TextField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
date_published = models.DateTimeField(default=None, null=True)
cover_photo = models.ForeignKey('Photo', related_name='cover', blank=True, default=None, null=True)
published = models.CharField(
max_length=10,
choices=PUBLISHED_CHOICES,
default=PUBLISHED_DEFAULT
)
def __str__(self):
"""Return title."""
return self.title
|
{
"content_hash": "6883e2c834e023f340a54efb8f2ff935",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 103,
"avg_line_length": 31.339285714285715,
"alnum_prop": 0.6763532763532764,
"repo_name": "DZwell/django-imager",
"id": "b0b67faefd5b86a4236ef41159896a01d9ca62bc",
"size": "1755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_images/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16548"
}
],
"symlink_target": ""
}
|
from .world import world, res_filename
from bigml.fields import Fields, get_resource_type
from bigml.io import UnicodeReader
from nose.tools import eq_
#@step(r'I create a Fields object from the source with objective column "(.*)"')
def create_fields(step, objective_column):
world.fields = Fields(world.source, objective_field=int(objective_column),
objective_field_present=True)
#@step(r'I create a Fields object from the dataset with objective column "(.*)"')
def create_fields_from_dataset(step, objective_column):
world.fields = Fields(world.dataset, objective_field=int(objective_column),
objective_field_present=True)
#@step(r'the object id is "(.*)"')
def check_objective(step, objective_id):
found_id = world.fields.field_id(world.fields.objective_field)
eq_(found_id, objective_id)
#@step(r'I import a summary fields file "(.*)" as a fields structure')
def import_summary_file(step, summary_file):
world.fields_struct = world.fields.new_fields_structure( \
csv_attributes_file=res_filename(summary_file))
#@step(r'I check the new field structure has field "(.*)" as "(.*)"')
def check_field_type(step, field_id, field_type):
assert field_id in list(world.fields_struct['fields'].keys())
eq_(world.fields_struct['fields'][field_id]["optype"], field_type)
#@step(r'I export a summary fields file "(.*)"')
def generate_summary(step, summary_file):
world.fields.summary_csv(res_filename(summary_file))
#@step(r'I check that the fields summary file is like "(.*)"')
def check_summary_like_expected(step, summary_file, expected_file):
summary_contents = []
expected_contents = []
with UnicodeReader(res_filename(summary_file)) as summary_handler:
for line in summary_handler:
summary_contents.append(line)
with UnicodeReader(res_filename(expected_file)) as expected_handler:
for line in expected_handler:
expected_contents.append(line)
eq_(summary_contents, expected_contents)
#@step(r'I update the "<.*>" with the file "<.*>"')
def update_with_summary_file(step, resource, summary_file):
fields = Fields(resource)
changes = fields.filter_fields_update( \
fields.new_fields_structure(res_filename(summary_file)))
resource_type = get_resource_type(resource)
resource = world.api.updaters[resource_type](resource, changes)
world.api.ok(resource)
setattr(world, resource_type, resource)
#@step(r'I check the source has field ".*" as ".*"')
def check_resource_field_type(step, resource, field_id, optype):
eq_(resource["object"]["fields"][field_id]["optype"], optype)
|
{
"content_hash": "53bad972407f9c82a334b3d59b08b7f0",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 81,
"avg_line_length": 38.82608695652174,
"alnum_prop": 0.6905561776782382,
"repo_name": "mmerce/python",
"id": "d9f209c33501bb383c146658bd83907f2bf23dc4",
"size": "3280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigml/tests/fields_steps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1531559"
}
],
"symlink_target": ""
}
|
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from bs4 import BeautifulSoup
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def html2js(value):
soup = mark_safe(BeautifulSoup(value))
return repr(soup)
register.filter('h2j', html2js)
@register.tag('htmltojs')
def htmltojs(parser, token):
nodelist = parser.parse(('endhtmltojs',))
parser.delete_first_token()
return HtmlToJs(nodelist)
class HtmlToJs(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
output = self.nodelist.render(context)
return html2js(output)
|
{
"content_hash": "fd8e407f381ba7532d144e219e2ed727",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 55,
"avg_line_length": 25.964285714285715,
"alnum_prop": 0.7290233837689133,
"repo_name": "Tarun12345/rat-notes",
"id": "b939813a9027b8dd48d4c4924b5d4594834745e7",
"size": "727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/note/templatetags/html2js.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "70695"
},
{
"name": "JavaScript",
"bytes": "85493"
},
{
"name": "Python",
"bytes": "87237"
},
{
"name": "Shell",
"bytes": "6714"
}
],
"symlink_target": ""
}
|
"""
This module implements a TensorBoard log writer.
"""
import os
import tensorflow as tf
from niftynet.engine.application_variables import TF_SUMMARIES
from niftynet.engine.signal import \
TRAIN, VALID, ITER_STARTED, ITER_FINISHED, GRAPH_CREATED
from niftynet.io.misc_io import get_latest_subfolder
class TensorBoardLogger(object):
"""
This class handles iteration events to log summaries to
the TensorBoard log.
"""
def __init__(self,
model_dir=None,
initial_iter=0,
tensorboard_every_n=0,
**_unused):
self.tensorboard_every_n = tensorboard_every_n
# creating new summary subfolder if it's not finetuning
self.summary_dir = get_latest_subfolder(
os.path.join(model_dir, 'logs'), create_new=initial_iter == 0)
self.writer_train = None
self.writer_valid = None
GRAPH_CREATED.connect(self.init_writer)
ITER_STARTED.connect(self.read_tensorboard_op)
ITER_FINISHED.connect(self.write_tensorboard)
def init_writer(self, _sender, **_unused_msg):
"""
Initialise summary writers.
:param _sender:
:param msg:
:return:
"""
# initialise summary writer
if not self.summary_dir or self.tensorboard_every_n <= 0:
return
self.writer_train = tf.summary.FileWriter(
os.path.join(self.summary_dir, TRAIN), tf.get_default_graph())
self.writer_valid = tf.summary.FileWriter(
os.path.join(self.summary_dir, VALID), tf.get_default_graph())
def read_tensorboard_op(self, sender, **msg):
"""
Get TensorBoard summary_op from application at the
beginning of each iteration.
:param sender: a niftynet.application instance
:param msg: should contain an IterationMessage instance
"""
_iter_msg = msg['iter_msg']
if _iter_msg.is_inference:
return
if not self._is_writing(_iter_msg.current_iter):
return
tf_summary_ops = sender.outputs_collector.variables(TF_SUMMARIES)
_iter_msg.ops_to_run[TF_SUMMARIES] = tf_summary_ops
def write_tensorboard(self, _sender, **msg):
"""
Write to tensorboard when received the iteration finished signal.
:param _sender:
:param msg:
"""
_iter_msg = msg['iter_msg']
if not self._is_writing(_iter_msg.current_iter):
return
if _iter_msg.is_training:
_iter_msg.to_tf_summary(self.writer_train)
elif _iter_msg.is_validation:
_iter_msg.to_tf_summary(self.writer_valid)
def _is_writing(self, c_iter):
"""
Decide whether to save a TensorBoard log entry for a given iteration.
:param c_iter: Integer of the current iteration number
:return: boolean True if is writing at the current iteration
"""
if self.writer_valid is None or self.writer_train is None:
return False
if not self.summary_dir:
return False
return c_iter % self.tensorboard_every_n == 0
|
{
"content_hash": "86c5c13c6d6c2f9847fbe19ee0a77fdd",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 77,
"avg_line_length": 33.36842105263158,
"alnum_prop": 0.6126182965299685,
"repo_name": "NifTK/NiftyNet",
"id": "b5bf693f4f8451ebbcfec7c368e2f1f4b72054e2",
"size": "3194",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "niftynet/engine/handler_tensorboard.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "381956"
},
{
"name": "C++",
"bytes": "182582"
},
{
"name": "CMake",
"bytes": "3500"
},
{
"name": "Cuda",
"bytes": "69664"
},
{
"name": "Python",
"bytes": "2340002"
},
{
"name": "Shell",
"bytes": "1792"
}
],
"symlink_target": ""
}
|
"""
=====================
Simple volume slicing
=====================
Here we present an example for visualizing slices from 3D images.
"""
from __future__ import division
import os
import nibabel as nib
from dipy.data import fetch_bundles_2_subjects
from dipy.viz import window, actor
"""
Let's download and load a T1.
"""
fetch_bundles_2_subjects()
fname_t1 = os.path.join(os.path.expanduser('~'), '.dipy',
'exp_bundles_and_maps', 'bundles_2_subjects',
'subj_1', 't1_warped.nii.gz')
img = nib.load(fname_t1)
data = img.get_data()
affine = img.get_affine()
"""
Create a Renderer object which holds all the actors which we want to visualize.
"""
renderer = window.Renderer()
renderer.background((1, 1, 1))
"""
Render slices from T1 with a specific value range
=================================================
The T1 has usually a higher range of values than what can be visualized in an
image. We can set the range that we would like to see.
"""
mean, std = data[data > 0].mean(), data[data > 0].std()
value_range = (mean - 0.5 * std, mean + 1.5 * std)
"""
The ``slice`` function will read data and resample the data using an affine
transformation matrix. The default behavior of this function is to show the
middle slice of the last dimension of the resampled data.
"""
slice_actor = actor.slicer(data, affine, value_range)
"""
The ``slice_actor`` contains an axial slice.
"""
renderer.add(slice_actor)
"""
The same actor can show any different slice from the given data using its
``display`` function. However, if we want to show multiple slices we need to
copy the actor first.
"""
slice_actor2 = slice_actor.copy()
"""
Now we have a new ``slice_actor`` which displays the middle slice of sagittal
plane.
"""
slice_actor2.display(slice_actor2.shape[0]//2, None, None)
renderer.add(slice_actor2)
renderer.reset_camera()
renderer.zoom(1.4)
"""
In order to interact with the data you will need to uncomment the line below.
"""
# window.show(renderer, size=(600, 600), reset_camera=False)
"""
Otherwise, you can save a screenshot using the following command.
"""
window.record(renderer, out_path='slices.png', size=(600, 600),
reset_camera=False)
"""
.. figure:: slices.png
:align: center
**Simple slice viewer**.
Render slices from FA with your colormap
========================================
It is also possible to set the colormap of your preference. Here we are loading
an FA image and showing it in a non-standard way using an HSV colormap.
"""
fname_fa = os.path.join(os.path.expanduser('~'), '.dipy',
'exp_bundles_and_maps', 'bundles_2_subjects',
'subj_1', 'fa_1x1x1.nii.gz')
img = nib.load(fname_fa)
fa = img.get_data()
"""
Notice here how the scale range is (0, 255) and not (0, 1) which is the usual
range of FA values.
"""
lut = actor.colormap_lookup_table(scale_range=(0, 255),
hue_range=(0.4, 1.),
saturation_range=(1, 1.),
value_range=(0., 1.))
"""
This is because the lookup table is applied in the slice after interpolating
to (0, 255).
"""
fa_actor = actor.slicer(fa, affine, lookup_colormap=lut)
renderer.clear()
renderer.add(fa_actor)
renderer.reset_camera()
renderer.zoom(1.4)
# window.show(renderer, size=(600, 600), reset_camera=False)
window.record(renderer, out_path='slices_lut.png', size=(600, 600),
reset_camera=False)
"""
.. figure:: slices_lut.png
:align: center
**Simple slice viewer with an HSV colormap**.
Create a mosaic
================
By using the ``copy`` and ``display`` method of the ``slice_actor`` becomes
easy and efficient to create a mosaic of all the slices.
So, let's clear the renderer and change the projection from perspective to
parallel.
"""
renderer.clear()
renderer.projection('parallel')
"""
Now we need to create two nested for loops which will set the positions of
the grid of the mosaic and add the new actors to the renderer. We are going
to use 15 columns and 10 rows but you can adjust those with your datasets.
"""
cnt = 0
X, Y, Z = slice_actor.shape[:3]
rows = 10
cols = 15
border = 10
for j in range(rows):
for i in range(cols):
slice_mosaic = slice_actor.copy()
slice_mosaic.display(None, None, cnt)
slice_mosaic.SetPosition((X + border) * i,
0.5 * cols * (Y + border) - (Y + border) * j,
0)
renderer.add(slice_mosaic)
cnt += 1
if cnt > Z:
break
if cnt > Z:
break
renderer.reset_camera()
renderer.zoom(1.6)
# window.show(renderer, size=(900, 600), reset_camera=False)
"""
If you uncomment the ``window.show`` line above, you will be able to move the
mosaic up/down and left/right using the middle mouse button pressed. And zoom
in/out using the scroll wheel.
"""
window.record(renderer, out_path='mosaic.png', size=(900, 600),
reset_camera=False)
"""
.. figure:: mosaic.png
:align: center
**A mosaic of all the slices in the T1 volume**.
"""
|
{
"content_hash": "de871e11c1c687ce3141e2c919ae2d8c",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 79,
"avg_line_length": 24.37735849056604,
"alnum_prop": 0.6331269349845201,
"repo_name": "matthieudumont/dipy",
"id": "96664a3472938b8f9d1512dfc3d2953fedac1053",
"size": "5169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/examples/viz_slice.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2844"
},
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Python",
"bytes": "2944439"
}
],
"symlink_target": ""
}
|
'''
test_core.py: Python testing for core functions for Singularity in Python,
including defaults, utils, and shell functions.
Copyright (c) 2016-2017, Vanessa Sochat. All rights reserved.
"Singularity" Copyright (c) 2016, The Regents of the University of California,
through Lawrence Berkeley National Laboratory (subject to receipt of any
required approvals from the U.S. Dept. of Energy). All rights reserved.
This software is licensed under a customized 3-clause BSD license. Please
consult LICENSE file distributed with the sources of this project regarding
your rights to use or distribute this software.
NOTICE. This Software was developed under funding from the U.S. Department of
Energy and the U.S. Government consequently retains certain rights. As such,
the U.S. Government has been granted for itself and others acting on its
behalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software
to reproduce, distribute copies to the public, prepare derivative works, and
perform publicly and display publicly, and to permit other to do so.
'''
import os
import re
import sys
import tarfile
sys.path.append('..') # directory with client
from unittest import TestCase
import shutil
import tempfile
VERSION = sys.version_info[0]
print("*** PYTHON VERSION %s BASE TESTING START ***" %(VERSION))
class TestShell(TestCase):
def setUp(self):
# Test repo information
self.registry = "registry"
self.repo_name = "repo"
self.namespace = "namespace"
self.tag = "tag"
# Default repo information
self.REGISTRY = 'index.docker.io'
self.NAMESPACE = 'library'
self.REPO_TAG = 'latest'
self.tmpdir = tempfile.mkdtemp()
os.environ['SINGULARITY_ROOTFS'] = self.tmpdir
print("\n---START----------------------------------------")
def tearDown(self):
shutil.rmtree(self.tmpdir)
print("---END------------------------------------------")
def test_get_image_uri(self):
'''test_get_image_uri ensures that the correct uri is returned
for a user specified uri, registry, namespace.
'''
from shell import get_image_uri
print("Case 1: No image uri should return None")
image_uri = get_image_uri('namespace/repo:tag')
self.assertEqual(image_uri, None)
print("Case 2: testing return of shub://")
image_uri = get_image_uri('shub://namespace/repo:tag')
self.assertEqual(image_uri, 'shub://')
print("Case 3: testing return of docker uri")
image_uri = get_image_uri('docker://namespace/repo:tag')
self.assertEqual(image_uri, 'docker://')
print("Case 4: weird capitalization should return lowercase")
image_uri = get_image_uri('DocKer://namespace/repo:tag')
self.assertEqual(image_uri, 'docker://')
def test_remove_image_uri(self):
'''test_remove_image_uri removes the uri
'''
from shell import remove_image_uri
print("Case 1: No image_uri should estimate first")
image = remove_image_uri('myuri://namespace/repo:tag')
self.assertEqual(image, "namespace/repo:tag")
print("Case 2: Missing image uri should return image")
image = remove_image_uri('namespace/repo:tag')
self.assertEqual(image, "namespace/repo:tag")
def test_parse_image_uri(self):
'''test_parse_image_uri ensures that the correct namespace,
repo name, and tag (or unique id) is returned.
'''
from shell import parse_image_uri
print("Case 1: Empty repo_name should return error")
with self.assertRaises(SystemExit) as cm:
image = parse_image_uri(image="")
self.assertEqual(cm.exception.code, 1)
print("Case 2: Checking for correct output tags in digest...")
image_name = "%s/%s" %(self.namespace,self.repo_name)
digest = parse_image_uri(image=image_name)
for tag in ['registry','repo_name','repo_tag','namespace']:
self.assertTrue(tag in digest)
print("Case 3: Specifying only an image should return defaults")
image = parse_image_uri(image="shub://lizardleezle",
uri = "shub://")
self.assertTrue(isinstance(image,dict))
self.assertEqual(image["namespace"],self.NAMESPACE)
self.assertEqual(image["repo_tag"],self.REPO_TAG)
self.assertEqual(image["repo_name"],'lizardleezle')
self.assertEqual(image["registry"],self.REGISTRY)
print("Case 4: Tag when speciifed should be returned.")
image_name = "%s/%s:%s" %(self.namespace,self.repo_name,"pusheenasaurus")
digest = parse_image_uri(image_name)
self.assertTrue(digest['repo_tag'] == 'pusheenasaurus')
print("Case 5: Repo name and tag without namespace...")
image_name = "%s:%s" %(self.repo_name,self.tag)
digest = parse_image_uri(image_name)
self.assertTrue(digest['repo_tag'] == self.tag)
self.assertTrue(digest['namespace'] == 'library')
self.assertTrue(digest['repo_name'] == self.repo_name)
print("Case 6: Changing default namespace should not use library.")
image_name = "meow/%s:%s" %(self.repo_name,self.tag)
digest = parse_image_uri(image_name)
self.assertTrue(digest['namespace'] == 'meow')
print("Case 7: Changing default registry should not use index.docker.io.")
image_name = "meow/mix/%s:%s" %(self.repo_name,self.tag)
digest = parse_image_uri(image_name)
self.assertTrue(digest['registry'] == 'meow')
self.assertTrue(digest['namespace'] == 'mix')
print("Case 8: Custom uri should use it.")
image_name = "catdog://meow/mix/%s:%s" %(self.repo_name,self.tag)
digest = parse_image_uri(image_name,uri="catdog://")
self.assertTrue(digest['registry'] == 'meow')
self.assertTrue(digest['namespace'] == 'mix')
print("Case 9: Digest version should be parsed")
image_name = "catdog://meow/mix/%s:%s@sha:256xxxxxxxxxxxxxxx" %(self.repo_name,self.tag)
digest = parse_image_uri(image_name,uri="catdog://")
self.assertTrue(digest['registry'] == 'meow')
self.assertTrue(digest['namespace'] == 'mix')
self.assertTrue(digest['version'] == 'sha:256xxxxxxxxxxxxxxx')
class TestUtils(TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
print("\n---START----------------------------------------")
def tearDown(self):
shutil.rmtree(self.tmpdir)
print("---END------------------------------------------")
def test_add_http(self):
'''test_add_http ensures that http is added to a url
'''
from sutils import add_http
url_http = 'http://registry.docker.io'
url_https = 'https://registry.docker.io'
print("Case 1: adding https to url with nothing specified...")
# Default is https
url = 'registry.docker.io'
http = add_http(url)
self.assertEqual(url_https,http)
# http
print("Case 2: adding http to url with nothing specified...")
http = add_http(url,use_https=False)
self.assertEqual(url_http,http)
# This should not change. Note - is url is http, stays http
print("Case 3: url already has https, should not change...")
url = 'https://registry.docker.io'
http = add_http(url)
self.assertEqual(url_https,http)
# This should not change. Note - is url is http, stays http
print("Case 4: url already has http, should not change...")
url = 'http://registry.docker.io'
http = add_http(url,use_https=False)
self.assertEqual(url_http,http)
print("Case 5: url has http, should change to https")
url = 'http://registry.docker.io'
http = add_http(url)
self.assertEqual(url_https,http)
print("Case 6: url has https, should change to http")
url = 'https://registry.docker.io'
http = add_http(url,use_https=False)
self.assertEqual(url_http,http)
print("Case 7: url should have trailing slash stripped")
url = 'https://registry.docker.io/'
http = add_http(url,use_https=False)
self.assertEqual(url_http,http)
def test_headers(self):
'''test_add_http ensures that http is added to a url
'''
print("Testing utils header functions...")
from sutils import basic_auth_header
# Basic auth header
print("Case 4: basic_auth_header - ask for custom authentication header")
auth = basic_auth_header(username='vanessa',
password='pancakes')
self.assertEqual(auth['Authorization'],
'Basic dmFuZXNzYTpwYW5jYWtlcw==')
def test_run_command(self):
'''test_run_command tests sending a command to commandline
using subprocess
'''
print("Testing utils.run_command...")
from sutils import run_command
# An error should return None
print("Case 1: Command errors returns None ")
none = run_command(['exec','whaaczasd'])
self.assertEqual(none,None)
# A success should return console output
print("Case 2: Command success returns output")
hello = run_command(['echo','hello'])
if not isinstance(hello,str): # python 3 support
hello = hello.decode('utf-8')
self.assertEqual(hello,'hello\n')
def test_is_number(self):
'''test_is_number should return True for any string or
number that turns to a number, and False for everything else
'''
print("Testing utils.is_number...")
from sutils import is_number
print("Case 1: Testing string and float numbers returns True")
self.assertTrue(is_number("4"))
self.assertTrue(is_number(4))
self.assertTrue(is_number("2.0"))
self.assertTrue(is_number(2.0))
print("Case 2: Testing repo names, tags, commits, returns False")
self.assertFalse(is_number("vsoch/singularity-images"))
self.assertFalse(is_number("vsoch/singularity-images:latest"))
self.assertFalse(is_number("44ca6e7c6c35778ab80b34c3fc940c32f1810f39"))
def test_extract_tar(self):
'''test_extract_tar will test extraction of a tar.gz file
'''
print("Testing utils.extract_tar...")
# First create a temporary tar file
from sutils import extract_tar
from glob import glob
import tarfile
# Create and close a temporary tar.gz
print("Case 1: Testing tar.gz...")
creation_dir = tempfile.mkdtemp()
archive,files = create_test_tar(creation_dir)
# Extract to different directory
extract_dir = tempfile.mkdtemp()
extract_tar(archive=archive,
output_folder=extract_dir)
extracted_files = [x.replace(extract_dir,'') for x in glob("%s/tmp/*" %(extract_dir))]
[self.assertTrue(x in files) for x in extracted_files]
# Clean up
for dirname in [extract_dir,creation_dir]:
shutil.rmtree(dirname)
print("Case 2: Testing tar...")
creation_dir = tempfile.mkdtemp()
archive,files = create_test_tar(creation_dir,compressed=False)
# Extract to different directory
extract_dir = tempfile.mkdtemp()
extract_tar(archive=archive,
output_folder=extract_dir)
extracted_files = [x.replace(extract_dir,'') for x in glob("%s/tmp/*" %(extract_dir))]
[self.assertTrue(x in files) for x in extracted_files]
print("Case 3: Testing that extract_tar returns None on error...")
creation_dir = tempfile.mkdtemp()
archive,files = create_test_tar(creation_dir,compressed=False)
extract_dir = tempfile.mkdtemp()
shutil.rmtree(extract_dir)
output = extract_tar(archive=archive,
output_folder=extract_dir)
self.assertEqual(output,None)
def test_write_read_files(self):
'''test_write_read_files will test the functions write_file and read_file
'''
print("Testing utils.write_file...")
from sutils import write_file
import json
tmpfile = tempfile.mkstemp()[1]
os.remove(tmpfile)
write_file(tmpfile,"hello!")
self.assertTrue(os.path.exists(tmpfile))
print("Testing utils.read_file...")
from sutils import read_file
content = read_file(tmpfile)[0]
self.assertEqual("hello!",content)
from sutils import write_json
print("Testing utils.write_json...")
print("Case 1: Providing bad json")
bad_json = {"Wakkawakkawakka'}":[{True},"2",3]}
tmpfile = tempfile.mkstemp()[1]
os.remove(tmpfile)
with self.assertRaises(TypeError) as cm:
write_json(bad_json,tmpfile)
print("Case 2: Providing good json")
good_json = {"Wakkawakkawakka":[True,"2",3]}
tmpfile = tempfile.mkstemp()[1]
os.remove(tmpfile)
write_json(good_json,tmpfile)
content = json.load(open(tmpfile,'r'))
self.assertTrue(isinstance(content,dict))
self.assertTrue("Wakkawakkawakka" in content)
def test_clean_path(self):
'''test_clean_path will test the clean_path function
'''
print("Testing utils.clean_path...")
from sutils import clean_path
ideal_path = '/home/vanessa/Desktop/stuff'
self.assertEqual(clean_path('/home/vanessa/Desktop/stuff/'),ideal_path)
self.assertEqual(clean_path('/home/vanessa/Desktop/stuff//'),ideal_path)
self.assertEqual(clean_path('/home/vanessa//Desktop/stuff/'),ideal_path)
def test_get_fullpath(self):
'''test_get_fullpath will test the get_fullpath function
'''
print("Testing utils.get_fullpath...")
from sutils import get_fullpath
tmpfile = tempfile.mkstemp()[1]
print("Case 1: File exists, should return full path")
self.assertEqual(get_fullpath(tmpfile),tmpfile)
print("Case 2: File doesn't exist, should return error")
os.remove(tmpfile)
with self.assertRaises(SystemExit) as cm:
get_fullpath(tmpfile)
self.assertEqual(cm.exception.code, 1)
print("Case 3: File doesn't exist, but not required, should return None")
self.assertEqual(get_fullpath(tmpfile,required=False),None)
def test_write_singularity_infos(self):
'''test_get_fullpath will test the get_fullpath function
'''
print("Testing utils.write_singuarity_infos...")
from sutils import write_singularity_infos
base_dir = '%s/ROOTFS' %(self.tmpdir)
prefix = 'docker'
start_number = 0
content = "export HELLO=MOTO"
print("Case 1: Metadata base doesn't exist, should return error")
with self.assertRaises(SystemExit) as cm:
info_file = write_singularity_infos(base_dir=base_dir,
prefix=prefix,
start_number=start_number,
content=content)
self.assertEqual(cm.exception.code, 1)
print("Case 2: Metadata base does exist, should return path.")
os.mkdir(base_dir)
info_file = write_singularity_infos(base_dir=base_dir,
prefix=prefix,
start_number=start_number,
content=content)
self.assertEqual(info_file,"%s/%s-%s" %(base_dir,start_number,prefix))
print("Case 3: Adding another equivalent prefix should return next")
info_file = write_singularity_infos(base_dir=base_dir,
prefix=prefix,
start_number=start_number,
content=content)
self.assertEqual(info_file,"%s/%s-%s" %(base_dir,start_number+1,prefix))
print("Case 4: Files have correct content.")
with open(info_file,'r') as filey:
written_content = filey.read()
self.assertEqual(content,written_content)
# Supporting Test Functions
def create_test_tar(tmpdir,compressed=True):
archive = "%s/toodles.tar.gz" %tmpdir
if compressed == False:
archive = "%s/toodles.tar" %tmpdir
mode = "w:gz"
if compressed == False:
mode = "w"
print("Creating %s" %(archive))
tar = tarfile.open(archive, mode)
files = [tempfile.mkstemp()[1] for x in range(3)]
[tar.add(x) for x in files]
tar.close()
return archive,files
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ef6bac22100c0131a51c8f731f7cc54f",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 96,
"avg_line_length": 38.348314606741575,
"alnum_prop": 0.604160562554937,
"repo_name": "Trophime/singularity",
"id": "88fc23aed76bed716e46b2c22adb32b6b0e85b4b",
"size": "17065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libexec/python/tests/test_core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "388098"
},
{
"name": "C++",
"bytes": "2141"
},
{
"name": "M4",
"bytes": "12781"
},
{
"name": "Makefile",
"bytes": "30870"
},
{
"name": "Python",
"bytes": "182419"
},
{
"name": "Shell",
"bytes": "190455"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
from pony.orm.tests.testutils import *
db = Database('sqlite', ':memory:')
class Student(db.Entity):
name = Required(unicode, autostrip=False)
foo = Optional(unicode)
bar = Optional(unicode)
db.generate_mapping(create_tables=True)
with db_session:
Student(id=1, name="Jon", foo='Abcdef', bar='b%d')
Student(id=2, name=" Bob ", foo='Ab%def', bar='b%d')
Student(id=3, name=" Beth ", foo='Ab_def', bar='b%d')
Student(id=4, name="Jonathan")
Student(id=5, name="Pete")
class TestStringMethods(unittest.TestCase):
def setUp(self):
rollback()
db_session.__enter__()
def tearDown(self):
rollback()
db_session.__exit__()
def test_nonzero(self):
result = set(select(s for s in Student if s.foo))
self.assertEqual(result, {Student[1], Student[2], Student[3]})
def test_add(self):
name = 'Jonny'
result = set(select(s for s in Student if s.name + "ny" == name))
self.assertEqual(result, {Student[1]})
def test_slice_1(self):
result = set(select(s for s in Student if s.name[0:3] == "Jon"))
self.assertEqual(result, {Student[1], Student[4]})
def test_slice_2(self):
result = set(select(s for s in Student if s.name[:3] == "Jon"))
self.assertEqual(result, {Student[1], Student[4]})
def test_slice_3(self):
x = 3
result = set(select(s for s in Student if s.name[:x] == "Jon"))
self.assertEqual(result, {Student[1], Student[4]})
def test_slice_4(self):
x = 3
result = set(select(s for s in Student if s.name[0:x] == "Jon"))
self.assertEqual(result, {Student[1], Student[4]})
def test_slice_5(self):
result = set(select(s for s in Student if s.name[0:10] == "Jon"))
self.assertEqual(result, {Student[1]})
def test_slice_6(self):
result = set(select(s for s in Student if s.name[0:] == "Jon"))
self.assertEqual(result, {Student[1]})
def test_slice_7(self):
result = set(select(s for s in Student if s.name[:] == "Jon"))
self.assertEqual(result, {Student[1]})
def test_slice_8(self):
result = set(select(s for s in Student if s.name[1:] == "on"))
self.assertEqual(result, {Student[1]})
def test_slice_9(self):
x = 1
result = set(select(s for s in Student if s.name[x:] == "on"))
self.assertEqual(result, {Student[1]})
def test_slice_10(self):
x = 0
result = set(select(s for s in Student if s.name[x:3] == "Jon"))
self.assertEqual(result, {Student[1], Student[4]})
def test_slice_11(self):
x = 1
y = 3
result = set(select(s for s in Student if s.name[x:y] == "on"))
self.assertEqual(result, {Student[1], Student[4]})
def test_slice_12(self):
x = 10
y = 20
result = set(select(s for s in Student if s.name[x:y] == ''))
self.assertEqual(result, {Student[1], Student[2], Student[3], Student[4], Student[5]})
def test_getitem_1(self):
result = set(select(s for s in Student if s.name[1] == 'o'))
self.assertEqual(result, {Student[1], Student[4]})
def test_getitem_2(self):
x = 1
result = set(select(s for s in Student if s.name[x] == 'o'))
self.assertEqual(result, {Student[1], Student[4]})
def test_getitem_3(self):
result = set(select(s for s in Student if s.name[-1] == 'n'))
self.assertEqual(result, {Student[1], Student[4]})
def test_getitem_4(self):
x = -1
result = set(select(s for s in Student if s.name[x] == 'n'))
self.assertEqual(result, {Student[1], Student[4]})
def test_contains_1(self):
result = set(select(s for s in Student if 'o' in s.name))
self.assertEqual(result, {Student[1], Student[2], Student[4]})
def test_contains_2(self):
result = set(select(s for s in Student if 'on' in s.name))
self.assertEqual(result, {Student[1], Student[4]})
def test_contains_3(self):
x = 'on'
result = set(select(s for s in Student if x in s.name))
self.assertEqual(result, {Student[1], Student[4]})
def test_contains_4(self):
x = 'on'
result = set(select(s for s in Student if x not in s.name))
self.assertEqual(result, {Student[2], Student[3], Student[5]})
def test_contains_5(self):
result = set(select(s for s in Student if '%' in s.foo))
self.assertEqual(result, {Student[2]})
def test_contains_6(self):
x = '%'
result = set(select(s for s in Student if x in s.foo))
self.assertEqual(result, {Student[2]})
def test_contains_7(self):
result = set(select(s for s in Student if '_' in s.foo))
self.assertEqual(result, {Student[3]})
def test_contains_8(self):
x = '_'
result = set(select(s for s in Student if x in s.foo))
self.assertEqual(result, {Student[3]})
def test_contains_9(self):
result = set(select(s for s in Student if s.foo in 'Abcdef'))
self.assertEqual(result, {Student[1], Student[4], Student[5]})
def test_contains_10(self):
result = set(select(s for s in Student if s.bar in s.foo))
self.assertEqual(result, {Student[2], Student[4], Student[5]})
def test_startswith_1(self):
students = set(select(s for s in Student if s.name.startswith('J')))
self.assertEqual(students, {Student[1], Student[4]})
def test_startswith_2(self):
students = set(select(s for s in Student if not s.name.startswith('J')))
self.assertEqual(students, {Student[2], Student[3], Student[5]})
def test_startswith_3(self):
students = set(select(s for s in Student if not not s.name.startswith('J')))
self.assertEqual(students, {Student[1], Student[4]})
def test_startswith_4(self):
students = set(select(s for s in Student if not not not s.name.startswith('J')))
self.assertEqual(students, {Student[2], Student[3], Student[5]})
def test_startswith_5(self):
x = "Pe"
students = select(s for s in Student if s.name.startswith(x))[:]
self.assertEqual(students, [Student[5]])
def test_endswith_1(self):
students = set(select(s for s in Student if s.name.endswith('n')))
self.assertEqual(students, {Student[1], Student[4]})
def test_endswith_2(self):
x = "te"
students = select(s for s in Student if s.name.endswith(x))[:]
self.assertEqual(students, [Student[5]])
def test_strip_1(self):
students = select(s for s in Student if s.name.strip() == 'Beth')[:]
self.assertEqual(students, [Student[3]])
def test_rstrip(self):
students = select(s for s in Student if s.name.rstrip('n') == 'Jo')[:]
self.assertEqual(students, [Student[1]])
def test_lstrip(self):
students = select(s for s in Student if s.name.lstrip('P') == 'ete')[:]
self.assertEqual(students, [Student[5]])
def test_upper(self):
result = select(s for s in Student if s.name.upper() == "JON")[:]
self.assertEqual(result, [Student[1]])
def test_lower(self):
result = select(s for s in Student if s.name.lower() == "jon")[:]
self.assertEqual(result, [Student[1]])
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "5740c4da5703fd802c621a54d5b65fab",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 94,
"avg_line_length": 36.54502369668246,
"alnum_prop": 0.5761898586434963,
"repo_name": "Ahmad31/Web_Flask_Cassandra",
"id": "2d71d54941b2f3646490fcf8350d6f625f4b9355",
"size": "7711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask/lib/python2.7/site-packages/pony/orm/tests/test_declarative_strings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "34860"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "HTML",
"bytes": "86875"
},
{
"name": "JavaScript",
"bytes": "7232"
},
{
"name": "Jupyter Notebook",
"bytes": "181"
},
{
"name": "Python",
"bytes": "12265503"
},
{
"name": "Shell",
"bytes": "3248"
}
],
"symlink_target": ""
}
|
"""
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function context_diff(a, b):
For two lists of strings, return a delta in context diff format.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Function unified_diff(a, b):
For two lists of strings, return a delta in unified diff format.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
Class HtmlDiff:
For producing HTML side by side comparison with change highlights.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
'unified_diff', 'HtmlDiff', 'Match']
import heapq
from collections import namedtuple as _namedtuple
Match = _namedtuple('Match', 'a b size')
def _calculate_ratio(matches, length):
if length:
return 2.0 * matches / length
return 1.0
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print(round(s.ratio(), 3))
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print("a[%d] and b[%d] match for %d elements" % block)
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print("%6s a[%d:%d] b[%d:%d]" % opcode)
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b='', autojunk=True):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
Optional arg autojunk should be set to False to disable the
"automatic junk heuristic" that treats popular elements as junk
(see module documentation for more information).
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk and popular elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use "in self.bjunk".
# bjunk
# the items in b for which isjunk is True.
# bpopular
# nonjunk items in b treated as junk by the heuristic (if used).
self.isjunk = isjunk
self.a = self.b = None
self.autojunk = autojunk
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1 + 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
for i, elt in enumerate(b):
indices = b2j.setdefault(elt, [])
indices.append(i)
# Purge junk elements
self.bjunk = junk = set()
isjunk = self.isjunk
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junk.add(elt)
for elt in junk: # separate loop avoids separate list of keys
del b2j[elt]
# Purge popular elements that are not junk
self.bpopular = popular = set()
n = len(b)
if self.autojunk and n >= 200:
ntest = n // 100 + 1
for elt, idxs in b2j.items():
if len(idxs) > ntest:
popular.add(elt)
for elt in popular: # ditto; as fast for 1% deletion
del b2j[elt]
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in range(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while besti > alo and bestj > blo and \
not isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
not isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> list(s.get_matching_blocks())
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = list(map(Match._make, non_adjacent))
return self.matching_blocks
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = list(map(str, range(1,40)))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = sum(triple[-1] for triple in self.get_matching_blocks())
return _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("Apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = heapq.nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(keepends=True)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(keepends=True)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print(''.join(result), end="")
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods:
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; as of Python 2.3, the underlying
SequenceMatcher class has grown an adaptive notion of "noise" lines
that's better than any static definition the author has ever been
able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True),
... 'ore\ntree\nemu\n'.splitlines(True))),
... end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError('unknown tag %r' % (tag,))
yield from g
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in range(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
yield from g
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print(''.join(results), end="")
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in range(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in range(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
yield from self._plain_replace(a, alo, ahi, b, blo, bhi)
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
yield from self._fancy_helper(a, alo, best_i, b, blo, best_j)
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError('unknown tag %r' % (tag,))
yield from self._qformat(aelt, belt, atags, btags)
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi)
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
yield from g
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
... ' ^ ^ ^ ', ' ^ ^ ^ ')
>>> for line in results: print(repr(line))
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
common = min(common, _count_leading(btags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
########################################################################
### Unified Diff
########################################################################
def _format_range_unified(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{},{}'.format(beginning, length)
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
... lineterm=''):
... print(line) # doctest: +NORMALIZE_WHITESPACE
--- Original 2005-01-26 23:30:50
+++ Current 2010-04-02 10:20:52
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
file1_range = _format_range_unified(first[1], last[2])
file2_range = _format_range_unified(first[3], last[4])
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag in {'replace', 'delete'}:
for line in a[i1:i2]:
yield '-' + line
if tag in {'replace', 'insert'}:
for line in b[j1:j2]:
yield '+' + line
########################################################################
### Context Diff
########################################################################
def _format_range_context(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if not length:
beginning -= 1 # empty ranges begin at line just before the range
if length <= 1:
return '{}'.format(beginning)
return '{},{}'.format(beginning, beginning + length - 1)
# See http://www.unix.org/single_unix_specification/
def context_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The context diff format normally has a header for filenames and
modification times. Any or all of these may be specified using
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
If not specified, the strings default to blanks.
Example:
>>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True),
... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')),
... end="")
*** Original
--- Current
***************
*** 1,4 ****
one
! two
! three
four
--- 1,4 ----
+ zero
one
! tree
four
"""
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
yield '--- {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
yield '***************' + lineterm
file1_range = _format_range_context(first[1], last[2])
yield '*** {} ****{}'.format(file1_range, lineterm)
if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group):
for tag, i1, i2, _, _ in group:
if tag != 'insert':
for line in a[i1:i2]:
yield prefix[tag] + line
file2_range = _format_range_context(first[3], last[4])
yield '--- {} ----{}'.format(file2_range, lineterm)
if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group):
for tag, _, _, j1, j2 in group:
if tag != 'delete':
for line in b[j1:j2]:
yield prefix[tag] + line
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions (or None):
- linejunk: A function that should accept a single string argument, and
return true iff the string is junk. The default is None, and is
recommended; as of Python 2.3, an adaptive notion of "noise" lines is
used that does a good job on its own.
- charjunk: A function that should accept a string of length 1. The
default is module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: bad idea to include newline
in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> print(''.join(diff), end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def _mdiff(fromlines, tolines, context=None, linejunk=None,
charjunk=IS_CHARACTER_JUNK):
r"""Returns generator yielding marked up from/to side by side differences.
Arguments:
fromlines -- list of text lines to compared to tolines
tolines -- list of text lines to be compared to fromlines
context -- number of context lines to display on each side of difference,
if None, all from/to text lines will be generated.
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
This function returns an iterator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
This function/iterator was originally developed to generate side by side
file difference for making HTML pages (see HtmlDiff class for example
usage).
Note, this function utilizes the ndiff function to generate the side by
side difference markup. Optional ndiff arguments may be passed to this
function and they in turn will be passed to ndiff.
"""
import re
# regular expression for finding intraline change indices
change_re = re.compile('(\++|\-+|\^+)')
# create the difference iterator to generate the differences
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
def _make_line(lines, format_key, side, num_lines=[0,0]):
"""Returns line of text with user's change markup and line formatting.
lines -- list of lines from the ndiff generator to produce a line of
text from. When producing the line of text to return, the
lines used are removed from this list.
format_key -- '+' return first line in list with "add" markup around
the entire line.
'-' return first line in list with "delete" markup around
the entire line.
'?' return first line in list with add/delete/change
intraline markup (indices obtained from second line)
None return first line in list with no markup
side -- indice into the num_lines list (0=from,1=to)
num_lines -- from/to current line number. This is NOT intended to be a
passed parameter. It is present as a keyword argument to
maintain memory of the current line numbers between calls
of this function.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
num_lines[side] += 1
# Handle case where no user markup is to be added, just return line of
# text with user's line format to allow for usage of the line number.
if format_key is None:
return (num_lines[side],lines.pop(0)[2:])
# Handle case of intraline changes
if format_key == '?':
text, markers = lines.pop(0), lines.pop(0)
# find intraline changes (store change type and indices in tuples)
sub_info = []
def record_sub_info(match_object,sub_info=sub_info):
sub_info.append([match_object.group(1)[0],match_object.span()])
return match_object.group(1)
change_re.sub(record_sub_info,markers)
# process each tuple inserting our special marks that won't be
# noticed by an xml/html escaper.
for key,(begin,end) in sub_info[::-1]:
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
text = text[2:]
# Handle case of add/delete entire line
else:
text = lines.pop(0)[2:]
# if line of text is just a newline, insert a space so there is
# something for the user to highlight and see.
if not text:
text = ' '
# insert marks that won't be noticed by an xml/html escaper.
text = '\0' + format_key + text + '\1'
# Return line of text, first allow user's line formatter to do its
# thing (such as adding the line number) then replace the special
# marks with what the user's change markup.
return (num_lines[side],text)
def _line_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from a
differencing iterator, processes them and yields them. When it can
it yields both a "from" and a "to" line, otherwise it will yield one
or the other. In addition to yielding the lines of from/to text, a
boolean flag is yielded to indicate if the text line(s) have
differences in them.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
lines = []
num_blanks_pending, num_blanks_to_yield = 0, 0
while True:
# Load up next 4 lines so we can look ahead, create strings which
# are a concatenation of the first character of each of the 4 lines
# so we can do some very readable comparisons.
while len(lines) < 4:
try:
lines.append(next(diff_lines_iterator))
except StopIteration:
lines.append('X')
s = ''.join([line[0] for line in lines])
if s.startswith('X'):
# When no more lines, pump out any remaining blank lines so the
# corresponding add/delete lines get a matching blank line so
# all line pairs get yielded at the next level.
num_blanks_to_yield = num_blanks_pending
elif s.startswith('-?+?'):
# simple intraline change
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
continue
elif s.startswith('--++'):
# in delete block, add block coming: we do NOT want to get
# caught up on blank lines yet, just process the delete line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith(('--?+', '--+', '- ')):
# in delete block and see an intraline change or unchanged line
# coming: yield the delete line and then blanks
from_line,to_line = _make_line(lines,'-',0), None
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
elif s.startswith('-+?'):
# intraline change
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
continue
elif s.startswith('-?+'):
# intraline change
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
continue
elif s.startswith('-'):
# delete FROM line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith('+--'):
# in add block, delete block coming: we do NOT want to get
# caught up on blank lines yet, just process the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(('+ ', '+-')):
# will be leaving an add block: yield blanks then add line
from_line, to_line = None, _make_line(lines,'+',1)
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
elif s.startswith('+'):
# inside an add block, yield the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(' '):
# unchanged text, yield it to both sides
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
continue
# Catch up on the blank lines so when we yield the next from/to
# pair, they are lined up.
while(num_blanks_to_yield < 0):
num_blanks_to_yield += 1
yield None,('','\n'),True
while(num_blanks_to_yield > 0):
num_blanks_to_yield -= 1
yield ('','\n'),None,True
if s.startswith('X'):
raise StopIteration
else:
yield from_line,to_line,True
def _line_pair_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from the line
iterator. Its difference from that iterator is that this function
always yields a pair of from/to text lines (with the change
indication). If necessary it will collect single from/to lines
until it has a matching pair from/to pair to yield.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
line_iterator = _line_iterator()
fromlines,tolines=[],[]
while True:
# Collecting lines of text until we have a from/to pair
while (len(fromlines)==0 or len(tolines)==0):
from_line, to_line, found_diff = next(line_iterator)
if from_line is not None:
fromlines.append((from_line,found_diff))
if to_line is not None:
tolines.append((to_line,found_diff))
# Once we have a pair, remove them from the collection and yield it
from_line, fromDiff = fromlines.pop(0)
to_line, to_diff = tolines.pop(0)
yield (from_line,to_line,fromDiff or to_diff)
# Handle case where user does not want context differencing, just yield
# them up without doing anything else with them.
line_pair_iterator = _line_pair_iterator()
if context is None:
while True:
yield next(line_pair_iterator)
# Handle case where user wants context differencing. We must do some
# storage of lines until we know for sure that they are to be yielded.
else:
context += 1
lines_to_write = 0
while True:
# Store lines up until we find a difference, note use of a
# circular queue because we only need to keep around what
# we need for context.
index, contextLines = 0, [None]*(context)
found_diff = False
while(found_diff is False):
from_line, to_line, found_diff = next(line_pair_iterator)
i = index % context
contextLines[i] = (from_line, to_line, found_diff)
index += 1
# Yield lines that we have collected so far, but first yield
# the user's separator.
if index > context:
yield None, None, None
lines_to_write = context
else:
lines_to_write = index
index = 0
while(lines_to_write):
i = index % context
index += 1
yield contextLines[i]
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
while(lines_to_write):
from_line, to_line, found_diff = next(line_pair_iterator)
# If another change within the context, extend the context
if found_diff:
lines_to_write = context-1
else:
lines_to_write -= 1
yield from_line, to_line, found_diff
_file_template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=ISO-8859-1" />
<title></title>
<style type="text/css">%(styles)s
</style>
</head>
<body>
%(table)s%(legend)s
</body>
</html>"""
_styles = """
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups" >
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add"> Added </td></tr>
<tr><td class="diff_chg">Changed</td> </tr>
<tr><td class="diff_sub">Deleted</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
</table></td> </tr>
</table>"""
class HtmlDiff(object):
"""For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See tools/scripts/diff.py for an example usage of this class.
"""
_file_template = _file_template
_styles = _styles
_table_template = _table_template
_legend = _legend
_default_prefix = 0
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
charjunk=IS_CHARACTER_JUNK):
"""HtmlDiff instance initializer
Arguments:
tabsize -- tab stop spacing, defaults to 8.
wrapcolumn -- column number where lines are broken and wrapped,
defaults to None where lines are not wrapped.
linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
HtmlDiff() to generate the side by side HTML differences). See
ndiff() documentation for argument default values and descriptions.
"""
self._tabsize = tabsize
self._wrapcolumn = wrapcolumn
self._linejunk = linejunk
self._charjunk = charjunk
def make_file(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML file of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
return self._file_template % dict(
styles = self._styles,
legend = self._legend,
table = self.make_table(fromlines,tolines,fromdesc,todesc,
context=context,numlines=numlines))
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
def _split_line(self,data_list,line_num,text):
"""Builds list of text lines by splitting text lines at wrap point
This function will determine if the input text line needs to be
wrapped (split) into separate lines. If so, the first wrap point
will be determined and the first line appended to the output
text line list. This function is used recursively to handle
the second part of the split line to further split it.
"""
# if blank line or context separator, just add it to the output list
if not line_num:
data_list.append((line_num,text))
return
# if line text doesn't need wrapping, just add it to the output list
size = len(text)
max = self._wrapcolumn
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
data_list.append((line_num,text))
return
# scan text looking for the wrap point, keeping track if the wrap
# point is inside markers
i = 0
n = 0
mark = ''
while n < max and i < size:
if text[i] == '\0':
i += 1
mark = text[i]
i += 1
elif text[i] == '\1':
i += 1
mark = ''
else:
i += 1
n += 1
# wrap point is inside text, break it up into separate lines
line1 = text[:i]
line2 = text[i:]
# if wrap point is inside markers, place end marker at end of first
# line and start marker at beginning of second line because each
# line will have its own table tag markup around it.
if mark:
line1 = line1 + '\1'
line2 = '\0' + mark + line2
# tack on first line onto the output list
data_list.append((line_num,line1))
# use this routine again to wrap the remaining text
self._split_line(data_list,'>',line2)
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag
def _collect_lines(self,diffs):
"""Collects mdiff output into separate lists
Before storing the mdiff from/to data into a list, it is converted
into a single line of text with HTML markup.
"""
fromlist,tolist,flaglist = [],[],[]
# pull from/to data and flags from mdiff style iterator
for fromdata,todata,flag in diffs:
try:
# store HTML markup of the lines into the lists
fromlist.append(self._format_line(0,flag,*fromdata))
tolist.append(self._format_line(1,flag,*todata))
except TypeError:
# exceptions occur for lines where context separators go
fromlist.append(None)
tolist.append(None)
flaglist.append(flag)
return fromlist,tolist,flaglist
def _format_line(self,side,flag,linenum,text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side],linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text=text.replace("&","&").replace(">",">").replace("<","<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ',' ').rstrip()
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
% (id,linenum,text)
def _make_prefix(self):
"""Create unique anchor prefixes"""
# Generate a unique anchor prefix so multiple tables
# can exist on the same HTML page without conflicts.
fromprefix = "from%d_" % HtmlDiff._default_prefix
toprefix = "to%d_" % HtmlDiff._default_prefix
HtmlDiff._default_prefix += 1
# store prefixes so line format method has access
self._prefix = [fromprefix,toprefix]
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
"""Makes list of "next" links"""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
# process change flags, generating middle column of next anchors/links
next_id = ['']*len(flaglist)
next_href = ['']*len(flaglist)
num_chg, in_change = 0, False
last = 0
for i,flag in enumerate(flaglist):
if flag:
if not in_change:
in_change = True
last = i
# at the beginning of a change, drop an anchor a few lines
# (the context lines) before the change for the previous
# link
i = max([0,i-numlines])
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
# at the beginning of a change, drop a link to the next
# change
num_chg += 1
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
toprefix,num_chg)
else:
in_change = False
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
last = 0
if context:
fromlist = ['<td></td><td> No Differences Found </td>']
tolist = fromlist
else:
fromlist = tolist = ['<td></td><td> Empty File </td>']
# if not a change on first line, drop a link
if not flaglist[0]:
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
# redo the last link to link to the top
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
return fromlist,tolist,flaglist,next_href,next_id
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist,tolist,flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
del re
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> diff = list(diff)
>>> print(''.join(restore(diff, 1)), end="")
one
two
three
>>> print(''.join(restore(diff, 2)), end="")
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test()
|
{
"content_hash": "f2c4f67d828f8c603b55d8ae8eea20ae",
"timestamp": "",
"source": "github",
"line_count": 2039,
"max_line_length": 83,
"avg_line_length": 40.05885237861697,
"alnum_prop": 0.5657443682664055,
"repo_name": "chidea/GoPythonDLLWrapper",
"id": "56d4852a375530dc6dae715eeea460a1a6f8fd31",
"size": "81680",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bin/lib/difflib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1345"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "Go",
"bytes": "2169"
},
{
"name": "Groff",
"bytes": "21080"
},
{
"name": "HTML",
"bytes": "152703"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "23244205"
},
{
"name": "R",
"bytes": "5378"
},
{
"name": "Shell",
"bytes": "3770"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import sys
import time
import argparse
import pathlib
import logging
import subprocess
import docker_image
def main():
parser = argparse.ArgumentParser("Extract using docker extractor image")
parser.add_argument("--in-file", type=lambda p: pathlib.Path(p).absolute(), required=True, help="Input file (e.g. Android image)")
parser.add_argument("--out-dir", type=lambda p: pathlib.Path(p).absolute(), required=True, help="Output directory")
parser.add_argument('--force-cleanup-and-rebuild', action='store_true')
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s', level=logging.DEBUG)
# Abort if out dir does not exist or is non-empty
if not args.out_dir.is_dir():
logging.error("[!] %s not a directory, exiting", args.out_dir)
sys.exit(1)
if any(args.out_dir.iterdir()):
logging.error("[!] %s not empty, exiting", args.out_dir)
sys.exit(1)
start_time = time.time()
image_name = docker_image.check_rebuild_docker_image(args.force_cleanup_and_rebuild)
logging.info("[+] Running extractor with docker image %s", image_name)
subprocess.check_call([
"docker",
"run",
"--privileged",
"--mount",
"type=bind,src=" + str(args.in_file.parents[0]) + ",dst=/in_dir",
"--mount",
"type=bind,src=" + str(args.out_dir) + ",dst=/out_dir",
"--rm",
image_name,
"/in_dir/" + args.in_file.name,
"--system-dir-output",
"/out_dir/"
])
duration = time.time() - start_time
logging.info("%s", f"[+] Output saved to {str(args.out_dir)} in {duration}s")
if __name__ == "__main__":
main()
|
{
"content_hash": "143e6e281ecec6a60a6c5fb48f4035a7",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 134,
"avg_line_length": 32.86538461538461,
"alnum_prop": 0.6155646576945583,
"repo_name": "srlabs/extractor",
"id": "d3ca66d5899c9a26c245877f6ba47e7ab09024d4",
"size": "2399",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "extract-docker.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "787"
},
{
"name": "Python",
"bytes": "174835"
},
{
"name": "Shell",
"bytes": "371"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.avatar'
db.add_column(u'profiles_profile', 'avatar',
self.gf('django.db.models.fields.files.ImageField')(default='', max_length=500, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Profile.avatar'
db.delete_column(u'profiles_profile', 'avatar')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'profiles.category': {
'Meta': {'ordering': "('name',)", 'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'profiles.interest': {
'Meta': {'ordering': "('name',)", 'object_name': 'Interest'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'profiles.profile': {
'Meta': {'object_name': 'Profile'},
'availability': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '500', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Category']", 'null': 'True', 'blank': 'True'}),
'category_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'interests': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Interest']", 'symmetrical': 'False', 'blank': 'True'}),
'interests_other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'blank': 'True'}),
'quote': ('django.db.models.fields.TextField', [], {'max_length': '140', 'blank': 'True'}),
'skills': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('us_ignite.common.fields.AutoUUIDField', [], {'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '500', 'blank': 'True'})
},
u'profiles.profilelink': {
'Meta': {'object_name': 'ProfileLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Profile']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '500'})
}
}
complete_apps = ['profiles']
|
{
"content_hash": "b0adf517bc4b675ea90b0edd82cf9709",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 225,
"avg_line_length": 74.14285714285714,
"alnum_prop": 0.5549132947976878,
"repo_name": "us-ignite/us_ignite",
"id": "c09fcbc87606aa4c4b3753dcd03421ff3db951af",
"size": "7290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "us_ignite/profiles/migrations/0002_auto__add_field_profile_avatar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "590320"
},
{
"name": "HTML",
"bytes": "920235"
},
{
"name": "JavaScript",
"bytes": "109759"
},
{
"name": "Nginx",
"bytes": "3047"
},
{
"name": "Pascal",
"bytes": "48"
},
{
"name": "Puppet",
"bytes": "53455"
},
{
"name": "Python",
"bytes": "1321882"
},
{
"name": "Ruby",
"bytes": "370509"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
}
|
"""General tests for embeddings"""
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
from itertools import product
import numpy as np
from numpy.testing import assert_raises, assert_allclose
from megaman.embedding import (Isomap, LocallyLinearEmbedding,
LTSA, SpectralEmbedding)
from megaman.geometry.geometry import Geometry
EMBEDDINGS = [Isomap, LocallyLinearEmbedding, LTSA, SpectralEmbedding]
# # TODO: make estimator_checks pass!
# def test_estimator_checks():
# from sklearn.utils.estimator_checks import check_estimator
# for Embedding in EMBEDDINGS:
# yield check_estimator, Embedding
def test_embeddings_fit_vs_transform():
rand = np.random.RandomState(42)
X = rand.rand(100, 5)
geom = Geometry(adjacency_kwds = {'radius':1.0},
affinity_kwds = {'radius':1.0})
def check_embedding(Embedding, n_components):
model = Embedding(n_components=n_components,
geom=geom, random_state=rand)
embedding = model.fit_transform(X)
assert model.embedding_.shape == (X.shape[0], n_components)
assert_allclose(embedding, model.embedding_)
for Embedding in EMBEDDINGS:
for n_components in [1, 2, 3]:
yield check_embedding, Embedding, n_components
def test_embeddings_bad_arguments():
rand = np.random.RandomState(32)
X = rand.rand(100, 3)
def check_bad_args(Embedding):
# no radius set
embedding = Embedding()
assert_raises(ValueError, embedding.fit, X)
# unrecognized geometry
embedding = Embedding(radius=2, geom='blah')
assert_raises(ValueError, embedding.fit, X)
for Embedding in EMBEDDINGS:
yield check_bad_args, Embedding
|
{
"content_hash": "d39d545a2d6a093be10c2119218b3b5c",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 32.69090909090909,
"alnum_prop": 0.6690767519466073,
"repo_name": "jakevdp/Mmani",
"id": "290d2e06e5e373964867969ef82dbe1602cd5b1b",
"size": "1798",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "megaman/embedding/tests/test_embeddings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "5681"
},
{
"name": "Makefile",
"bytes": "645"
},
{
"name": "Matlab",
"bytes": "1207"
},
{
"name": "Python",
"bytes": "197549"
},
{
"name": "Shell",
"bytes": "879"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
from rdflib import URIRef, Literal
from rdflib.namespace import RDF
from settings import *
# functions
def check_permission(input_dir, output_dir):
"""
checks if input is readable and output is writeable
"""
if not(os.access(output_dir, os.W_OK)):
os.chmod(output_dir, int(0777))
if not(os.access(input_dir, os.R_OK)):
os.chmod(input_dir, int(0744))
print ("Input: %s\t is OK\nOutput: %s\tis OK" %(input_dir, output_dir))
def explode(c_vals, ref_ids, c, c_parent):
i = 0
for r in ref_ids:
exploded_cross_refs = et.Element(cross_ref_tag_name, refid=r, nsmap=NS_MAP)
exploded_cross_refs.set("connected", str(i))
exploded_cross_refs.text = "[" + c_vals[i] + "]"
c.addprevious(exploded_cross_refs)
i += 1
remove_cross_refs(c, c_parent)
def remove_preserve_tail(element):
if element.tail:
prev = element.getprevious()
parent = element.getparent()
if prev is not None:
prev.tail = (prev.tail or '') + element.tail
else:
parent.text = (parent.text or '') + element.tail
parent.remove(element)
def remove_cross_refs(element, element_parent):
global count_remove_all, count_remove_preserve
count_remove_all += 1
try:
et.strip_elements(element_parent, cross_refs_tag_name, with_tail=False)
except TypeError:
count_remove_preserve += 1
remove_preserve_tail(element)
def build_textual_marker(p_number, ref_id):
# output : [xxxcitxxx[[_'6'_][_'1'_]]xxxcitxxx]
return "[xxxcitxxx[[_'" + str(p_number) + "'_][_'" + ref_id + "'_]]xxxcitxxx]"
def xml_to_rdf(files):
global total_time, number_of_papers, count_remove_preserve, \
count_remove_all, papers_with_block_detect_error, papers_with_no_crossrefs
for f in files:
f_name, f_extension = os.path.splitext(f)
if(f_name[-5:] == "-full"):
eid = f_name[0:-5]
else:
eid = f_name
file_path = os.path.join(input_dir, f)
start_time = time.time()
print("Processing %s" %f)
tree = et.parse(file_path)
"""
STEP 1
Expand cross-refs as multiple adjacent <cross-ref refid=''> elements
- finds 'bib1' in bibliography, sets its positionNumber'1' ->
finds all ce:cross-ref with refid = 'bib1' , set its positionNumberofBiblioref same as positionnumber'1'
"""
xpath = "//ce:cross-refs"
cross_refs = tree.xpath(xpath, namespaces=NS_MAP )
for c in cross_refs:
c_parent = c.getparent()
c_val = c.text.strip("[]")
if c_val:
c_vals = re.split(',|and', c_val)
ref_ids = c.attrib['refid'].strip().split()
if (len(c_vals) == len(ref_ids)):
explode(c_vals, ref_ids, c, c_parent)
else:
new_c_vals = []
for element in c_vals:
if element.isdigit():
new_c_vals.append(str(element))
else:
toexpand = NON_DECIMAL.sub('f', element)
toexpand = toexpand.split("f")
try:
for i in range(int(toexpand[0]), int(toexpand[1]) + 1, 1):
new_c_vals.append(str((i)))
except ValueError:
pass
# print "Problem!!"
if (len(new_c_vals) == len(ref_ids)):
explode(new_c_vals, ref_ids, c, c_parent)
else:
# pass in production, print for debugging
pass
# print "# refids NE new_c_vals.\n refids: %s \n c_vals: %s \n" %(ref_ids, new_c_vals)
"""
STEP 2
Count bib-reference(s)
Add an attribute to each cross-ref with the number of the bibliographic reference it denotes.
"""
xpath_bib_references ="//ce:bib-reference"
bib_refrences = tree.xpath(xpath_bib_references, namespaces={'ce' : 'http://www.elsevier.com/xml/common/dtd'})
bib_ref_pos = 1
for b in bib_refrences:
b.set("positionNumber", str(bib_ref_pos))
b_ref_id = b.attrib['id'].split()[0]
xpath_cross_ref_bib_pointers = "//ce:cross-ref[@refid='{0}']".format(b_ref_id)
cross_ref_bib_pointers = tree.xpath(xpath_cross_ref_bib_pointers, namespaces={'ce': 'http://www.elsevier.com/xml/common/dtd'})
for c in cross_ref_bib_pointers:
c.set("positionNumberOfBibliographicReference", str(bib_ref_pos))
bib_ref_pos += 1
"""
STEP 3
Identify InTextPointer (by checking @positionNumberOfBibliographicReference attribute)
- Add position attribute
- Normalize cross-ref content, by substituting content with a marker
- invokes buildTextualMarker()
"""
xpath = "//ce:cross-ref[@positionNumberOfBibliographicReference]"
cross_refs = tree.xpath(xpath, namespaces={'ce': 'http://www.elsevier.com/xml/common/dtd'})
if len(cross_refs) == 0 :
no_cross_refs = os.path.join(output_dir, NO_CROSS_REFS_LIST)
with open(no_cross_refs, 'a') as ncf:
ncf.write(f + '\n')
ncf.close()
# papers_with_no_crossrefs.append(f)
current_cross_ref_pos = 1
for c in cross_refs:
c.set("positionNumber", str(current_cross_ref_pos))
c_textual_marker = build_textual_marker(current_cross_ref_pos, c.attrib['positionNumberOfBibliographicReference'])
try:
c.text = c.text + c_textual_marker
except TypeError:
c.text = "" + c_textual_marker
papers_with_block_detect_error.append(f)
print "Rare typeError Happened @ %s: \n" %file_path, c.get('refid'), c_textual_marker
current_cross_ref_pos += 1
"""
STEP 4
Extract citation contexts and build info array
"""
c_ref_info = []
xpath = "//ce:cross-ref[@positionNumberOfBibliographicReference]"
cross_refs = tree.xpath(xpath, namespaces={'ce': 'http://www.elsevier.com/xml/common/dtd'})
count = 0
for c in cross_refs:
current_ref_id = c.attrib['positionNumberOfBibliographicReference']
c_ref_info_being_added = {}
c_ref_info_being_added['positionNumber'] = c.attrib['positionNumber']
c_ref_info_being_added['positionNumberOfBibliographicReference'] = current_ref_id
c_textual_marker_current = build_textual_marker(c_ref_info_being_added['positionNumber'], current_ref_id)
# print c_textual_marker_current
xpath_block = "//*[self::ce:para or self::ce:entry or self::ce:note-para or self::ce:simple-para or self::ce:textref or self::xocs:item-toc-section-title or self::entry or self::ce:source or self::ce:section-title][descendant::ce:cross-ref[@positionNumberOfBibliographicReference and @positionNumber='{0}']]".format(c_ref_info_being_added['positionNumber'])
connected = c.get("connected")
block_containing_cross_ref = tree.xpath(xpath_block, namespaces=NMSPCS)
if not connected:
try:
block_content = et.tostring(block_containing_cross_ref[0], method="text", encoding="unicode")
except IndexError:
block_content = ""
else:
if connected == "0":
try:
block_content = et.tostring(block_containing_cross_ref[0], method="text", encoding="unicode")
except IndexError:
block_content = ""
candidate_sentences = sent_detector.tokenize(block_content.strip())
marker_regexp = "\[xxxcitxxx\[\[_'(?P<pos>.*?)'_\]\[_'.*?'_\]\]xxxcitxxx\]"
for i in range(len(candidate_sentences)):
if c_textual_marker_current in candidate_sentences[i]:
citation_context = candidate_sentences[i]
ref_pointers = re.findall("\[_'.*?'_\]", citation_context)
first_ref_pointer = ref_pointers[0].strip("[]_'")
c_ref_info_being_added['sentenceid'] = "sentence-with-in-text-reference-pointer-"+first_ref_pointer
citation_context = re.sub(marker_regexp, "" , citation_context)
c_ref_info_being_added['citation_context'] = citation_context
c_ref_info_being_added['DEBUG-blockContent'] = block_content
c_ref_info.append(c_ref_info_being_added)
"""
STEP 5
Convert to RDF
"""
graph_of_citation_contexts = Graph()
graph_of_citation_contexts.namespace_manager = ns_mgr
work_uri = SEMLANCET_NS + eid # http://www.semanticlancet.eu/resource/1-s2.0-S157082680300009X
exp_uri = work_uri + "/version-of-record" # http://www.semanticlancet.eu/resource/1-s2.0-S157082680300009X/version-of-record
exp_resource = URIRef(exp_uri)
for c in c_ref_info:
# http://www.semanticlancet.eu/resource/1-s2.0-S157082680300009X/version-of-record/reference-list/NUMBER/reference
ref_uri = URIRef(exp_uri + "/reference-list/" + c['positionNumberOfBibliographicReference'] + "/reference")
# http://www.semanticlancet.eu/resource/1-s2.0-S157082680300009X/version-of-record/in-text-reference-pointer/positionNumber
in_text_pointer_uri = URIRef(exp_uri + "/in-text-reference-pointer-" + c['positionNumber'])
# http://www.semanticlancet.eu/resource/1-s2.0-S157082680300009X/version-of-record/sentenceid
citation_sentence_uri = URIRef(exp_uri + "/" + c['sentenceid'])
graph_of_citation_contexts.add( (in_text_pointer_uri, RDF.type, c4o.InTextReferencePointer) )
graph_of_citation_contexts.add( (in_text_pointer_uri, c4o.hasContent, Literal("[" + c['positionNumberOfBibliographicReference'] + "]") ) )
graph_of_citation_contexts.add( (in_text_pointer_uri, c4o.denotes, ref_uri) )
graph_of_citation_contexts.add( (citation_sentence_uri, RDF.type, doco.Sentence) )
graph_of_citation_contexts.add( (citation_sentence_uri, c4o.hasContent, Literal(c['citation_context'])) )
graph_of_citation_contexts.add( (citation_sentence_uri, frbr.partOf, exp_resource) )
graph_of_citation_contexts.add( (citation_sentence_uri, frbr.part, in_text_pointer_uri) )
graph_of_citation_contexts.add( (exp_resource, frbr.part, citation_sentence_uri))
graph_of_citation_contexts.add( (in_text_pointer_uri, c4o.hasContext, citation_sentence_uri) )
graph_of_citation_contexts.add( (in_text_pointer_uri, frbr.partOf, citation_sentence_uri) )
"""
STEP 6
Serialize in a file
"""
g_citation_filename = os.path.join(output_dir, eid + "." + RDF_EXTENSION )
graph_of_citation_contexts.serialize(destination=g_citation_filename, format='turtle')
exec_t = time.time() - start_time
print("--- %s processed in %s seconds ---\n" % (eid, exec_t))
total_time = total_time + exec_t
number_of_papers += 1
""""
Summary
TODO : add more information
"""
summary_file = os.path.join(output_dir, SUMMARY_FILENAME)
with open(summary_file, 'a') as sf:
sf.write("\n" + f + "\n")
for c in c_ref_info:
citation_contexts_summary = c['positionNumber'] + " | " + c['positionNumberOfBibliographicReference'] + " | " + c['sentenceid'] + " | " + c["citation_context"] + " | " + c['DEBUG-blockContent'] + "\n"
citation_contexts_summary = citation_contexts_summary.encode('ascii', 'ignore')
sf.write(citation_contexts_summary)
sf.close()
"""
In case we need to write the tree on a file
"""
# tree.write("OUTPUT_FILE.XML", pretty_print=True)
if __name__ == "__main__":
# set/check input & output directories
try:
arg1 = sys.argv[1]
arg2 = sys.argv[2]
except IndexError:
print "Usage: \tpython ccex.py <input_directory_name> <output_directory_name>"
sys.exit(1)
input_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), arg1)
output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), arg2)
if not os.path.isdir(input_dir):
print(
"Invalid input directory: '%s' is not a valid direcotry, please insert a valid directory name" % input_dir)
sys.exit(1)
if not os.path.exists(output_dir):
print "Making directory %s as output" % output_dir
os.makedirs(output_dir)
check_permission(input_dir, output_dir)
all_files = os.listdir(input_dir)
files = []
# keep only XML files
for f in all_files:
f_name, f_extension = os.path.splitext(f)
if f_extension == '.xml':
files.append(f)
# counters
total_time = 0
number_of_papers = 0
count_remove_preserve = 0
count_remove_all = 0
papers_with_block_detect_error = []
papers_with_no_crossrefs = []
# call xml_to_rdf function with list of all files
xml_to_rdf(files)
# reports
print "Total execution time: %s seconds" %total_time
print "Number of processed papers: %d" %number_of_papers
print "Number of papers with no cross-ref: %d\n" %len(papers_with_no_crossrefs)
for p in papers_with_no_crossrefs:
print "\t", p
if papers_with_block_detect_error:
print "Block detection erros happened %d times" %len(papers_with_block_detect_error)
for p in papers_with_block_detect_error:
print "\t", p
print "# Cross-refs Rremoved: %d \n# Preserve function used: %d times" %(count_remove_all, count_remove_preserve)
|
{
"content_hash": "1e4aa2df58d5dcd8aca92d853a7c948a",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 369,
"avg_line_length": 44.71383647798742,
"alnum_prop": 0.5811941768056825,
"repo_name": "sheshkovsky/CCeX",
"id": "cc9a4c6c34991dea607ef9952ca87218f8cd55b5",
"size": "14219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31150"
}
],
"symlink_target": ""
}
|
"""
===============================================================================
Original code copyright (C) 2009-2022 Rudolf Cardinal (rudolf@pobox.com).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
"""
|
{
"content_hash": "2efd593038e8c7e9c8b984e7b99197c9",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 40.76190476190476,
"alnum_prop": 0.5934579439252337,
"repo_name": "RudolfCardinal/pythonlib",
"id": "246162835eb617aa025b983db8c317d824fe04a2",
"size": "919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cardinal_pythonlib/django/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1987146"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from admin_views.admin import AdminViews
from django.shortcuts import redirect
from example_project.example_app.models import TestModel
class TestAdmin(AdminViews):
admin_views = (
('Process This', 'process'), # Admin view
('Go to LJW', 'http://www.ljworld.com'), # Direct URL
)
def process(self, *args, **kwargs):
return redirect('http://www.cnn.com')
admin.site.register(TestModel, TestAdmin)
|
{
"content_hash": "1ed7c95b2f70287e501f5b21cb43599c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 30.625,
"alnum_prop": 0.6632653061224489,
"repo_name": "mikeumus/django-admin-views",
"id": "a86be8e537728919fff2d21a7587387ed527fecb",
"size": "490",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "example_project/example_project/example_app/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3012"
},
{
"name": "Python",
"bytes": "18214"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('editorial', '0040_auto_20171115_1148'),
]
operations = [
migrations.CreateModel(
name='ContractorInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('resume', models.FileField(upload_to=b'resumes/%Y/%m/%d', blank=True)),
('address', models.TextField(help_text=b'Mailing address.', blank=True)),
('availability', models.TextField(help_text=b'Notes on when a contractor is available or not.', blank=True)),
('current_location', models.TextField(help_text=b'Contractors specific location.', blank=True)),
('gear', models.TextField(help_text=b'Gear that a contractor has access to and skills for.', blank=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OrganizationContractorInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('w9_on_file', models.BooleanField(default=False, help_text=b'Does the organization have a W9 on file.')),
('rates', models.TextField(help_text=b'The rates the contractor is paid by the org.', blank=True)),
('strengths', models.TextField(help_text=b'Internal notes on strengths of the contractor.', blank=True)),
('conflicts', models.TextField(help_text=b'Any conflicts of interest the contractor has.', blank=True)),
('editor_notes', models.TextField(help_text=b'Any notes for editors on things to know when working with this contractor.', blank=True)),
],
),
]
|
{
"content_hash": "cc375b6505dc1ae2be594b46b20b6359",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 152,
"avg_line_length": 53.5945945945946,
"alnum_prop": 0.6197680282400403,
"repo_name": "ProjectFacet/facet",
"id": "abb14129387741a11600ea88edefdf0eaf035c4c",
"size": "2007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/editorial/migrations/0041_contractorinfo_organizationcontractorinfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4350483"
},
{
"name": "HTML",
"bytes": "1677386"
},
{
"name": "JavaScript",
"bytes": "1120019"
},
{
"name": "Python",
"bytes": "804022"
},
{
"name": "Ruby",
"bytes": "225"
},
{
"name": "Shell",
"bytes": "889"
}
],
"symlink_target": ""
}
|
import copy
import datetime as dt
import json
import os.path
import ddt
import mock
import yaml
from rally.cli.commands import task
from rally import consts
from rally import exceptions
from tests.unit import fakes
from tests.unit import test
@ddt.ddt
class TaskCommandsTestCase(test.TestCase):
def setUp(self):
super(TaskCommandsTestCase, self).setUp()
self.task = task.TaskCommands()
@mock.patch("rally.cli.commands.task.open", create=True)
def test__load_task(self, mock_open):
input_task = "{'ab': {{test}}}"
input_args = "{'test': 2}"
# NOTE(boris-42): Such order of files is because we are reading
# file with args before file with template.
mock_open.side_effect = [
mock.mock_open(read_data="{'test': 1}").return_value,
mock.mock_open(read_data=input_task).return_value
]
task_conf = self.task._load_task(
"in_task", task_args_file="in_args_path")
self.assertEqual({"ab": 1}, task_conf)
mock_open.side_effect = [
mock.mock_open(read_data=input_task).return_value
]
task_conf = self.task._load_task(
"in_task", task_args=input_args)
self.assertEqual(task_conf, {"ab": 2})
mock_open.side_effect = [
mock.mock_open(read_data="{'test': 1}").return_value,
mock.mock_open(read_data=input_task).return_value
]
task_conf = self.task._load_task(
"in_task", task_args=input_args, task_args_file="any_file")
self.assertEqual(task_conf, {"ab": 2})
@mock.patch("rally.cli.commands.task.open", create=True)
def test__load_task_wrong_task_args_file(self, mock_open):
mock_open.side_effect = [
mock.mock_open(read_data="{'test': {}").return_value
]
self.assertRaises(task.FailedToLoadTask,
self.task._load_task,
"in_task", task_args_file="in_args_path")
@mock.patch("rally.cli.commands.task.open", create=True)
def test__load_task_wrong_task_args_file_exception(self, mock_open):
mock_open.side_effect = IOError
self.assertRaises(IOError, self.task._load_task,
"in_task", task_args_file="in_args_path")
def test__load_task_wrong_input_task_args(self):
self.assertRaises(task.FailedToLoadTask,
self.task._load_task, "in_task",
"{'test': {}")
self.assertRaises(task.FailedToLoadTask,
self.task._load_task, "in_task", "[]")
@mock.patch("rally.cli.commands.task.open", create=True)
def test__load_task_task_render_raise_exc(self, mock_open):
mock_open.side_effect = [
mock.mock_open(read_data="{'test': {{t}}}").return_value
]
self.assertRaises(task.FailedToLoadTask,
self.task._load_task, "in_task")
@mock.patch("rally.cli.commands.task.open", create=True)
def test__load_task_task_not_in_yaml(self, mock_open):
mock_open.side_effect = [
mock.mock_open(read_data="{'test': {}").return_value
]
self.assertRaises(task.FailedToLoadTask,
self.task._load_task, "in_task")
def test_load_task_including_other_template(self):
other_template_path = os.path.join(
os.path.dirname(__file__),
"..", "..", "..", "..", "samples/tasks/scenarios/nova/boot.json")
input_task = "{%% include \"%s\" %%}" % os.path.basename(
other_template_path)
expect = self.task._load_task(other_template_path)
with mock.patch("rally.cli.commands.task.open",
create=True) as mock_open:
mock_open.side_effect = [
mock.mock_open(read_data=input_task).return_value
]
input_task_file = os.path.join(
os.path.dirname(other_template_path), "input_task.json")
actual = self.task._load_task(input_task_file)
self.assertEqual(expect, actual)
@mock.patch("rally.cli.commands.task.os.path.isfile", return_value=True)
@mock.patch("rally.cli.commands.task.api.Task.validate",
return_value=fakes.FakeTask())
@mock.patch("rally.cli.commands.task.TaskCommands._load_task",
return_value={"uuid": "some_uuid"})
def test__load_and_validate_task(self, mock__load_task,
mock_task_validate, mock_os_path_isfile):
deployment = "some_deployment_uuid"
self.task._load_and_validate_task("some_task", "task_args",
"task_args_file", deployment)
mock__load_task.assert_called_once_with("some_task", "task_args",
"task_args_file")
mock_task_validate.assert_called_once_with(
deployment, mock__load_task.return_value, None)
@mock.patch("rally.api.Task.validate")
def test__load_and_validate_file(self, mock_task_validate):
deployment = "some_deployment_uuid"
self.assertRaises(IOError, self.task._load_and_validate_task,
"some_task", "task_args",
"task_args_file", deployment)
@mock.patch("rally.cli.commands.task.os.path.isfile", return_value=True)
@mock.patch("rally.cli.commands.task.api.Task.create",
return_value=fakes.FakeTask(uuid="some_new_uuid", tag="tag"))
@mock.patch("rally.cli.commands.task.TaskCommands.use")
@mock.patch("rally.cli.commands.task.TaskCommands.detailed")
@mock.patch("rally.cli.commands.task.TaskCommands._load_task",
return_value={"some": "json"})
@mock.patch("rally.cli.commands.task.api.Task.validate",
return_value=fakes.FakeTask(some="json", uuid="some_uuid",
temporary=True))
@mock.patch("rally.cli.commands.task.api.Task.start")
def test_start(self, mock_task_start, mock_task_validate, mock__load_task,
mock_detailed, mock_use, mock_task_create,
mock_os_path_isfile):
deployment_id = "e0617de9-77d1-4875-9b49-9d5789e29f20"
task_path = "path_to_config.json"
self.task.start(task_path, deployment_id, do_use=True)
mock_task_create.assert_called_once_with(
deployment_id, None)
mock_task_start.assert_called_once_with(
deployment_id, mock__load_task.return_value,
task=mock_task_validate.return_value, abort_on_sla_failure=False)
mock__load_task.assert_called_once_with(task_path, None, None)
mock_use.assert_called_once_with("some_new_uuid")
mock_detailed.assert_called_once_with(task_id="some_new_uuid")
@mock.patch("rally.cli.commands.task.os.path.isfile", return_value=True)
@mock.patch("rally.cli.commands.task.api.Task.create",
return_value=fakes.FakeTask(uuid="new_uuid", tag="some_tag"))
@mock.patch("rally.cli.commands.task.TaskCommands.detailed")
@mock.patch("rally.cli.commands.task.api.Task.start")
@mock.patch("rally.cli.commands.task.TaskCommands._load_task",
return_value="some_config")
@mock.patch("rally.cli.commands.task.api.Task.validate",
return_value=fakes.FakeTask(uuid="some_id"))
def test_start_with_task_args(self, mock_task_validate, mock__load_task,
mock_task_start, mock_detailed,
mock_task_create, mock_os_path_isfile):
task_path = mock.MagicMock()
task_args = mock.MagicMock()
task_args_file = mock.MagicMock()
self.task.start(task_path, deployment="any", task_args=task_args,
task_args_file=task_args_file, tag="some_tag")
mock__load_task.assert_called_once_with(task_path, task_args,
task_args_file)
mock_task_validate.assert_called_once_with(
"any", mock__load_task.return_value, {})
mock_task_start.assert_called_once_with(
"any", mock__load_task.return_value,
task=mock_task_create.return_value, abort_on_sla_failure=False)
mock_detailed.assert_called_once_with(
task_id=mock_task_create.return_value["uuid"])
mock_task_create.assert_called_once_with("any", "some_tag")
@mock.patch("rally.cli.commands.task.envutils.get_global")
def test_start_no_deployment_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.start, "path_to_config.json", None)
@mock.patch("rally.cli.commands.task.os.path.isfile", return_value=True)
@mock.patch("rally.cli.commands.task.api.Task.create",
return_value=fakes.FakeTask(temporary=False, tag="tag",
uuid="uuid"))
@mock.patch("rally.cli.commands.task.TaskCommands._load_task",
return_value={"some": "json"})
@mock.patch("rally.cli.commands.task.api.Task.validate")
@mock.patch("rally.cli.commands.task.api.Task.start",
side_effect=exceptions.InvalidTaskException)
def test_start_invalid_task(self, mock_task_start, mock_task_validate,
mock__load_task, mock_task_create,
mock_os_path_isfile):
result = self.task.start("task_path", "deployment", tag="tag")
self.assertEqual(1, result)
mock_task_create.assert_called_once_with("deployment", "tag")
mock_task_start.assert_called_once_with(
"deployment", mock__load_task.return_value,
task=mock_task_create.return_value, abort_on_sla_failure=False)
@mock.patch("rally.cli.commands.task.api")
def test_abort(self, mock_api):
test_uuid = "17860c43-2274-498d-8669-448eff7b073f"
mock_api.Task.abort = mock.MagicMock()
self.task.abort(test_uuid)
mock_api.Task.abort.assert_called_once_with(test_uuid, False,
async=False)
@mock.patch("rally.cli.commands.task.envutils.get_global")
def test_abort_no_task_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.abort, None)
def test_status(self):
test_uuid = "a3e7cefb-bec2-4802-89f6-410cc31f71af"
value = {"task_id": "task", "status": "status"}
with mock.patch("rally.cli.commands.task.api.Task") as mock_task:
mock_task.get = mock.MagicMock(return_value=value)
self.task.status(test_uuid)
mock_task.get.assert_called_once_with(test_uuid)
@mock.patch("rally.cli.commands.task.envutils.get_global")
def test_status_no_task_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.status, None)
@mock.patch("rally.cli.commands.task.api.Task")
def test_detailed(self, mock_task):
test_uuid = "c0d874d4-7195-4fd5-8688-abe82bfad36f"
mock_task.get_detailed.return_value = {
"id": "task",
"uuid": test_uuid,
"status": "status",
"results": [
{
"key": {
"name": "fake_name",
"pos": "fake_pos",
"kw": "fake_kw"
},
"info": {
"load_duration": 3.2,
"full_duration": 3.5,
"iterations_count": 4,
"atomic": {"foo": {}, "bar": {}}},
"iterations": [
{"duration": 0.9,
"idle_duration": 0.1,
"output": {"additive": [], "complete": []},
"atomic_actions": {"foo": 0.6, "bar": 0.7},
"error": ["type", "message", "traceback"]
},
{"duration": 1.2,
"idle_duration": 0.3,
"output": {"additive": [], "complete": []},
"atomic_actions": {"foo": 0.6, "bar": 0.7},
"error": ["type", "message", "traceback"]
},
{"duration": 0.7,
"idle_duration": 0.5,
"scenario_output": {
"data": {"foo": 0.6, "bar": 0.7},
"errors": "some"
},
"atomic_actions": {"foo": 0.6, "bar": 0.7},
"error": ["type", "message", "traceback"]
},
{"duration": 0.5,
"idle_duration": 0.5,
"output": {"additive": [], "complete": []},
"atomic_actions": {"foo": 0.6, "bar": 0.7},
"error": ["type", "message", "traceback"]
}
]
}
]
}
self.task.detailed(test_uuid)
mock_task.get_detailed.assert_called_once_with(test_uuid,
extended_results=True)
self.task.detailed(test_uuid, iterations_data=True)
@mock.patch("rally.cli.commands.task.sys.stdout")
@mock.patch("rally.cli.commands.task.api.Task")
@mock.patch("rally.cli.commands.task.logging")
@ddt.data({"debug": True},
{"debug": False})
@ddt.unpack
def test_detailed_task_failed(self, mock_logging, mock_task,
mock_stdout, debug):
test_uuid = "test_task_id"
value = {
"id": "task",
"uuid": test_uuid,
"status": consts.TaskStatus.FAILED,
"results": [],
"verification_log": json.dumps(["error_type", "error_message",
"error_traceback"])
}
mock_task.get_detailed = mock.MagicMock(return_value=value)
mock_logging.is_debug.return_value = debug
self.task.detailed(test_uuid)
verification = yaml.safe_load(value["verification_log"])
if debug:
expected_calls = [mock.call("Task test_task_id: failed"),
mock.call("%s" % verification[2])]
mock_stdout.write.assert_has_calls(expected_calls, any_order=True)
else:
expected_calls = [mock.call("Task test_task_id: failed"),
mock.call("%s" % verification[0]),
mock.call("%s" % verification[1]),
mock.call("\nFor more details run:\nrally "
"-vd task detailed %s" % test_uuid)]
mock_stdout.write.assert_has_calls(expected_calls, any_order=True)
@mock.patch("rally.cli.commands.task.api.Task")
@mock.patch("rally.cli.commands.task.sys.stdout")
def test_detailed_task_status_not_in_finished_abort(self,
mock_stdout,
mock_task):
test_uuid = "test_task_id"
value = {
"id": "task",
"uuid": test_uuid,
"status": consts.TaskStatus.INIT,
"results": []
}
mock_task.get_detailed = mock.MagicMock(return_value=value)
self.task.detailed(test_uuid)
expected_calls = [mock.call("Task test_task_id: init"),
mock.call("\nThe task test_task_id marked as "
"'init'. Results available when it "
"is 'finished'.")]
mock_stdout.write.assert_has_calls(expected_calls, any_order=True)
@mock.patch("rally.cli.commands.task.envutils.get_global")
def test_detailed_no_task_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.detailed, None)
@mock.patch("rally.cli.commands.task.api.Task")
def test_detailed_wrong_id(self, mock_task):
test_uuid = "eb290c30-38d8-4c8f-bbcc-fc8f74b004ae"
mock_task.get_detailed = mock.MagicMock(return_value=None)
self.task.detailed(test_uuid)
mock_task.get_detailed.assert_called_once_with(test_uuid,
extended_results=True)
@mock.patch("json.dumps")
@mock.patch("rally.cli.commands.task.api.Task.get")
def test_results(self, mock_task_get, mock_json_dumps):
task_id = "foo_task_id"
data = [
{"key": "foo_key", "data": {"raw": "foo_raw", "sla": [],
"load_duration": 1.0,
"full_duration": 2.0}}
]
result = map(lambda x: {"key": x["key"],
"result": x["data"]["raw"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"],
"sla": x["data"]["sla"]}, data)
fake_task = fakes.FakeTask({"status": consts.TaskStatus.FINISHED})
fake_task.get_results = mock.MagicMock(return_value=data)
mock_task_get.return_value = fake_task
self.task.results(task_id)
self.assertEqual(1, mock_json_dumps.call_count)
self.assertEqual(1, len(mock_json_dumps.call_args[0]))
self.assertSequenceEqual(result, mock_json_dumps.call_args[0][0])
self.assertEqual({"sort_keys": True, "indent": 4},
mock_json_dumps.call_args[1])
mock_task_get.assert_called_once_with(task_id)
@mock.patch("rally.cli.commands.task.sys.stdout")
@mock.patch("rally.cli.commands.task.api.Task.get")
def test_results_no_data(self, mock_task_get, mock_stdout):
task_id = "foo_task_id"
fake_task = fakes.FakeTask({"status": consts.TaskStatus.FAILED})
mock_task_get.return_value = fake_task
self.assertEqual(1, self.task.results(task_id))
mock_task_get.assert_called_once_with(task_id)
expected_out = ("Task status is %s. Results "
"available when it is one of %s.") % (
consts.TaskStatus.FAILED,
", ".join((consts.TaskStatus.FINISHED,
consts.TaskStatus.ABORTED)))
mock_stdout.write.assert_has_calls([mock.call(expected_out)])
def _make_result(self, keys):
return [{"key": {"name": key, "pos": 0},
"data": {"raw": key + "_raw",
"sla": key + "_sla",
"load_duration": 1.2,
"full_duration": 2.3}} for key in keys]
@mock.patch("rally.cli.commands.task.jsonschema.validate",
return_value=None)
@mock.patch("rally.cli.commands.task.os.path")
@mock.patch("rally.cli.commands.task.open", create=True)
@mock.patch("rally.cli.commands.task.plot")
@mock.patch("rally.cli.commands.task.api.Task.get")
@mock.patch("rally.cli.commands.task.webbrowser")
def test_trends(self, mock_webbrowser, mock_task_get, mock_plot,
mock_open, mock_os_path, mock_validate):
mock_os_path.exists = lambda p: p.startswith("path_to_")
mock_os_path.expanduser = lambda p: p + "_expanded"
mock_os_path.realpath.side_effect = lambda p: "realpath_" + p
results_iter = iter([self._make_result(["bar"]),
self._make_result(["spam"])])
mock_task_get.return_value.get_results.side_effect = results_iter
mock_plot.trends.return_value = "rendered_trends_report"
mock_fd = mock.mock_open(
read_data="[\"result_1_from_file\", \"result_2_from_file\"]")
mock_open.side_effect = mock_fd
ret = self.task.trends(tasks=["ab123456-38d8-4c8f-bbcc-fc8f74b004ae",
"cd654321-38d8-4c8f-bbcc-fc8f74b004ae",
"path_to_file"],
out="output.html", out_format="html")
expected = [
{"load_duration": 1.2, "full_duration": 2.3, "sla": "bar_sla",
"key": {"name": "bar", "pos": 0}, "result": "bar_raw"},
{"load_duration": 1.2, "full_duration": 2.3, "sla": "spam_sla",
"key": {"name": "spam", "pos": 0}, "result": "spam_raw"},
"result_1_from_file", "result_2_from_file"]
mock_plot.trends.assert_called_once_with(expected)
self.assertEqual([mock.call("path_to_file_expanded", "r"),
mock.call("output.html_expanded", "w+")],
mock_open.mock_calls)
self.assertIsNone(ret)
self.assertEqual([mock.call("result_1_from_file",
task.api.Task.TASK_RESULT_SCHEMA),
mock.call("result_2_from_file",
task.api.Task.TASK_RESULT_SCHEMA)],
mock_validate.mock_calls)
self.assertEqual([mock.call("ab123456-38d8-4c8f-bbcc-fc8f74b004ae"),
mock.call().get_results(),
mock.call("cd654321-38d8-4c8f-bbcc-fc8f74b004ae"),
mock.call().get_results()],
mock_task_get.mock_calls)
self.assertFalse(mock_webbrowser.open_new_tab.called)
mock_fd.return_value.write.assert_called_once_with(
"rendered_trends_report")
@mock.patch("rally.cli.commands.task.jsonschema.validate",
return_value=None)
@mock.patch("rally.cli.commands.task.os.path")
@mock.patch("rally.cli.commands.task.open", create=True)
@mock.patch("rally.cli.commands.task.plot")
@mock.patch("rally.cli.commands.task.webbrowser")
def test_trends_single_file_and_open_webbrowser(
self, mock_webbrowser, mock_plot, mock_open, mock_os_path,
mock_validate):
mock_os_path.exists.return_value = True
mock_os_path.expanduser = lambda path: path
mock_os_path.realpath.side_effect = lambda p: "realpath_" + p
mock_open.side_effect = mock.mock_open(read_data="[\"result\"]")
ret = self.task.trends(tasks=["path_to_file"], open_it=True,
out="output.html", out_format="html")
self.assertIsNone(ret)
mock_webbrowser.open_new_tab.assert_called_once_with(
"file://realpath_output.html")
@mock.patch("rally.cli.commands.task.os.path")
@mock.patch("rally.cli.commands.task.open", create=True)
@mock.patch("rally.cli.commands.task.plot")
@mock.patch("rally.cli.commands.task.api.Task.get")
def test_trends_task_id_is_not_uuid_like(self, mock_task_get, mock_plot,
mock_open, mock_os_path):
mock_os_path.exists.return_value = False
mock_task_get.return_value.get_results.return_value = (
self._make_result(["foo"]))
ret = self.task.trends(tasks=["ab123456-38d8-4c8f-bbcc-fc8f74b004ae"],
out="output.html", out_format="html")
self.assertIsNone(ret)
ret = self.task.trends(tasks=["this-is-not-uuid"],
out="output.html", out_format="html")
self.assertEqual(1, ret)
@mock.patch("rally.cli.commands.task.os.path")
@mock.patch("rally.cli.commands.task.open", create=True)
@mock.patch("rally.cli.commands.task.plot")
def test_trends_wrong_results_format(self, mock_plot,
mock_open, mock_os_path):
mock_os_path.exists.return_value = True
mock_open.side_effect = mock.mock_open(read_data="[42]")
ret = self.task.trends(tasks=["path_to_file"],
out="output.html", out_format="html")
self.assertEqual(1, ret)
with mock.patch("rally.cli.commands.task.api.Task.TASK_RESULT_SCHEMA",
{"type": "number"}):
ret = self.task.trends(tasks=["path_to_file"],
out="output.html", out_format="html")
self.assertIsNone(ret)
def test_trends_no_tasks_given(self):
ret = self.task.trends(tasks=[],
out="output.html", out_format="html")
self.assertEqual(1, ret)
@mock.patch("rally.cli.commands.task.jsonschema.validate",
return_value=None)
@mock.patch("rally.cli.commands.task.os.path.realpath",
side_effect=lambda p: "realpath_%s" % p)
@mock.patch("rally.cli.commands.task.open",
side_effect=mock.mock_open(), create=True)
@mock.patch("rally.cli.commands.task.plot")
@mock.patch("rally.cli.commands.task.webbrowser")
@mock.patch("rally.cli.commands.task.api.Task.get")
def test_report_one_uuid(self, mock_task_get, mock_webbrowser,
mock_plot, mock_open, mock_realpath,
mock_validate):
task_id = "eb290c30-38d8-4c8f-bbcc-fc8f74b004ae"
data = [
{"key": {"name": "class.test", "pos": 0},
"data": {"raw": "foo_raw", "sla": "foo_sla",
"load_duration": 0.1,
"full_duration": 1.2}},
{"key": {"name": "class.test", "pos": 0},
"data": {"raw": "bar_raw", "sla": "bar_sla",
"load_duration": 2.1,
"full_duration": 2.2}}]
results = [{"key": x["key"],
"result": x["data"]["raw"],
"sla": x["data"]["sla"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]}
for x in data]
mock_results = mock.Mock(return_value=data)
mock_task_get.return_value = mock.Mock(get_results=mock_results)
mock_plot.plot.return_value = "html_report"
def reset_mocks():
for m in mock_task_get, mock_webbrowser, mock_plot, mock_open:
m.reset_mock()
self.task.report(tasks=task_id, out="/tmp/%s.html" % task_id)
mock_open.assert_called_once_with("/tmp/%s.html" % task_id, "w+")
mock_plot.plot.assert_called_once_with(results, include_libs=False)
mock_open.side_effect().write.assert_called_once_with("html_report")
mock_task_get.assert_called_once_with(task_id)
# JUnit
reset_mocks()
self.task.report(tasks=task_id, out="/tmp/%s.html" % task_id,
out_format="junit")
mock_open.assert_called_once_with("/tmp/%s.html" % task_id, "w+")
self.assertFalse(mock_plot.plot.called)
# HTML
reset_mocks()
self.task.report(task_id, out="output.html", open_it=True,
out_format="html")
mock_webbrowser.open_new_tab.assert_called_once_with(
"file://realpath_output.html")
mock_plot.plot.assert_called_once_with(results, include_libs=False)
# HTML with embedded JS/CSS
reset_mocks()
self.task.report(task_id, open_it=False, out="output.html",
out_format="html_static")
self.assertFalse(mock_webbrowser.open_new_tab.called)
mock_plot.plot.assert_called_once_with(results, include_libs=True)
@mock.patch("rally.cli.commands.task.jsonschema.validate",
return_value=None)
@mock.patch("rally.cli.commands.task.os.path.realpath",
side_effect=lambda p: "realpath_%s" % p)
@mock.patch("rally.cli.commands.task.open",
side_effect=mock.mock_open(), create=True)
@mock.patch("rally.cli.commands.task.plot")
@mock.patch("rally.cli.commands.task.webbrowser")
@mock.patch("rally.cli.commands.task.api.Task.get")
def test_report_bunch_uuids(self, mock_task_get, mock_webbrowser,
mock_plot, mock_open, mock_realpath,
mock_validate):
tasks = ["eb290c30-38d8-4c8f-bbcc-fc8f74b004ae",
"eb290c30-38d8-4c8f-bbcc-fc8f74b004af"]
data = [
{"key": {"name": "test", "pos": 0},
"data": {"raw": "foo_raw", "sla": "foo_sla",
"load_duration": 0.1,
"full_duration": 1.2}},
{"key": {"name": "test", "pos": 0},
"data": {"raw": "bar_raw", "sla": "bar_sla",
"load_duration": 2.1,
"full_duration": 2.2}}]
results = []
for task_uuid in tasks:
results.extend(
map(lambda x: {"key": x["key"],
"result": x["data"]["raw"],
"sla": x["data"]["sla"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]},
data))
mock_results = mock.Mock(return_value=data)
mock_task_get.return_value = mock.Mock(get_results=mock_results)
mock_plot.plot.return_value = "html_report"
def reset_mocks():
for m in mock_task_get, mock_webbrowser, mock_plot, mock_open:
m.reset_mock()
self.task.report(tasks=tasks, out="/tmp/1_test.html")
mock_open.assert_called_once_with("/tmp/1_test.html", "w+")
mock_plot.plot.assert_called_once_with(results, include_libs=False)
mock_open.side_effect().write.assert_called_once_with("html_report")
expected_get_calls = [mock.call(task) for task in tasks]
mock_task_get.assert_has_calls(expected_get_calls, any_order=True)
@mock.patch("rally.cli.commands.task.json.load")
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.jsonschema.validate",
return_value=None)
@mock.patch("rally.cli.commands.task.os.path.realpath",
side_effect=lambda p: "realpath_%s" % p)
@mock.patch("rally.cli.commands.task.open", create=True)
@mock.patch("rally.cli.commands.task.plot")
def test_report_one_file(self, mock_plot, mock_open, mock_realpath,
mock_validate, mock_path_exists, mock_json_load):
task_file = "/tmp/some_file.json"
data = [
{"key": {"name": "test", "pos": 0},
"data": {"raw": "foo_raw", "sla": "foo_sla",
"load_duration": 0.1,
"full_duration": 1.2}},
{"key": {"name": "test", "pos": 1},
"data": {"raw": "bar_raw", "sla": "bar_sla",
"load_duration": 2.1,
"full_duration": 2.2}}]
results = [{"key": x["key"],
"result": x["data"]["raw"],
"sla": x["data"]["sla"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]}
for x in data]
mock_plot.plot.return_value = "html_report"
mock_open.side_effect = mock.mock_open(read_data=results)
mock_json_load.return_value = results
def reset_mocks():
for m in mock_plot, mock_open, mock_json_load, mock_validate:
m.reset_mock()
self.task.report(tasks=task_file, out="/tmp/1_test.html")
expected_open_calls = [mock.call(task_file, "r"),
mock.call("/tmp/1_test.html", "w+")]
mock_open.assert_has_calls(expected_open_calls, any_order=True)
mock_plot.plot.assert_called_once_with(results, include_libs=False)
mock_open.side_effect().write.assert_called_once_with("html_report")
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.json.load")
@mock.patch("rally.cli.commands.task.open", create=True)
def test_report_exceptions(self, mock_open, mock_json_load,
mock_path_exists):
results = [
{"key": {"name": "test", "pos": 0},
"data": {"raw": "foo_raw", "sla": "foo_sla",
"load_duration": 0.1,
"full_duration": 1.2}}]
mock_open.side_effect = mock.mock_open(read_data=results)
mock_json_load.return_value = results
ret = self.task.report(tasks="/tmp/task.json",
out="/tmp/tmp.hsml")
self.assertEqual(ret, 1)
for m in mock_open, mock_json_load:
m.reset_mock()
mock_path_exists.return_value = False
ret = self.task.report(tasks="/tmp/task.json",
out="/tmp/tmp.hsml")
self.assertEqual(ret, 1)
@mock.patch("rally.cli.commands.task.sys.stderr")
@mock.patch("rally.cli.commands.task.os.path.exists", return_value=True)
@mock.patch("rally.cli.commands.task.json.load")
@mock.patch("rally.cli.commands.task.open", create=True)
def test_report_invalid_format(self, mock_open, mock_json_load,
mock_path_exists, mock_stderr):
result = self.task.report(tasks="/tmp/task.json", out="/tmp/tmp.html",
out_format="invalid")
self.assertEqual(1, result)
expected_out = "Invalid output format: invalid"
mock_stderr.write.assert_has_calls([mock.call(expected_out)])
@mock.patch("rally.cli.commands.task.cliutils.print_list")
@mock.patch("rally.cli.commands.task.envutils.get_global",
return_value="123456789")
@mock.patch("rally.cli.commands.task.api.Task.list",
return_value=[fakes.FakeTask(uuid="a",
created_at=dt.datetime.now(),
updated_at=dt.datetime.now(),
status="c",
tag="d",
deployment_name="some_name")])
def test_list(self, mock_task_list, mock_get_global, mock_print_list):
self.task.list(status="running")
mock_task_list.assert_called_once_with(
deployment=mock_get_global.return_value,
status=consts.TaskStatus.RUNNING)
headers = ["uuid", "deployment_name", "created_at", "duration",
"status", "tag"]
mock_print_list.assert_called_once_with(
mock_task_list.return_value, headers,
sortby_index=headers.index("created_at"))
@mock.patch("rally.cli.commands.task.cliutils.print_list")
@mock.patch("rally.cli.commands.task.envutils.get_global",
return_value="123456789")
@mock.patch("rally.cli.commands.task.api.Task.list",
return_value=[fakes.FakeTask(uuid="a",
created_at=dt.datetime.now(),
updated_at=dt.datetime.now(),
status="c",
tag="d",
deployment_name="some_name")])
def test_list_uuids_only(self, mock_task_list, mock_get_global,
mock_print_list):
self.task.list(status="running", uuids_only=True)
mock_task_list.assert_called_once_with(
deployment=mock_get_global.return_value,
status=consts.TaskStatus.RUNNING)
mock_print_list.assert_called_once_with(
mock_task_list.return_value, ["uuid"],
print_header=False, print_border=False)
def test_list_wrong_status(self):
self.assertEqual(1, self.task.list(deployment="fake",
status="wrong non existing status"))
@mock.patch("rally.cli.commands.task.api.Task.list", return_value=[])
def test_list_no_results(self, mock_task_list):
self.assertIsNone(
self.task.list(deployment="fake", all_deployments=True))
mock_task_list.assert_called_once_with()
mock_task_list.reset_mock()
self.assertIsNone(
self.task.list(deployment="d", status=consts.TaskStatus.RUNNING)
)
mock_task_list.assert_called_once_with(
deployment="d", status=consts.TaskStatus.RUNNING)
def test_delete(self):
task_uuid = "8dcb9c5e-d60b-4022-8975-b5987c7833f7"
force = False
with mock.patch("rally.cli.commands.task.api") as mock_api:
mock_api.Task.delete = mock.Mock()
self.task.delete(task_uuid, force=force)
mock_api.Task.delete.assert_called_once_with(task_uuid,
force=force)
@mock.patch("rally.cli.commands.task.api")
def test_delete_multiple_uuid(self, mock_api):
task_uuids = ["4bf35b06-5916-484f-9547-12dce94902b7",
"52cad69d-d3e4-47e1-b445-dec9c5858fe8",
"6a3cb11c-ac75-41e7-8ae7-935732bfb48f",
"018af931-0e5a-40d5-9d6f-b13f4a3a09fc"]
force = False
self.task.delete(task_uuids, force=force)
self.assertTrue(mock_api.Task.delete.call_count == len(task_uuids))
expected_calls = [mock.call(task_uuid, force=force) for task_uuid
in task_uuids]
self.assertTrue(mock_api.Task.delete.mock_calls == expected_calls)
@mock.patch("rally.cli.commands.task.cliutils.print_list")
@mock.patch("rally.cli.commands.task.api.Task.get")
def test_sla_check(self, mock_task_get, mock_print_list):
data = [{"key": {"name": "fake_name",
"pos": "fake_pos",
"kw": "fake_kw"},
"data": {"scenario_duration": 42.0,
"raw": [],
"sla": [{"benchmark": "KeystoneBasic.create_user",
"criterion": "max_seconds_per_iteration",
"pos": 0,
"success": False,
"detail": "Max foo, actually bar"}]}}]
mock_task_get().get_results.return_value = copy.deepcopy(data)
result = self.task.sla_check(task_id="fake_task_id")
self.assertEqual(1, result)
mock_task_get.assert_called_with("fake_task_id")
data[0]["data"]["sla"][0]["success"] = True
mock_task_get().get_results.return_value = data
result = self.task.sla_check(task_id="fake_task_id", tojson=True)
self.assertEqual(0, result)
@mock.patch("rally.cli.commands.task.os.path.isfile", return_value=True)
@mock.patch("rally.api.Task.validate")
@mock.patch("rally.cli.commands.task.open",
side_effect=mock.mock_open(read_data="{\"some\": \"json\"}"),
create=True)
def test_validate(self, mock_open, mock_task_validate,
mock_os_path_isfile):
self.task.validate("path_to_config.json", "fake_id")
mock_task_validate.assert_called_once_with("fake_id", {"some": "json"},
None)
@mock.patch("rally.cli.commands.task.os.path.isfile", return_value=True)
@mock.patch("rally.cli.commands.task.TaskCommands._load_task",
side_effect=task.FailedToLoadTask)
def test_validate_failed_to_load_task(self, mock__load_task,
mock_os_path_isfile):
args = mock.MagicMock()
args_file = mock.MagicMock()
result = self.task.validate("path_to_task", "fake_deployment_id",
task_args=args, task_args_file=args_file)
self.assertEqual(1, result)
mock__load_task.assert_called_once_with(
"path_to_task", args, args_file)
@mock.patch("rally.cli.commands.task.os.path.isfile", return_value=True)
@mock.patch("rally.cli.commands.task.TaskCommands._load_task")
@mock.patch("rally.api.Task.validate")
def test_validate_invalid(self, mock_task_validate, mock__load_task,
mock_os_path_isfile):
mock_task_validate.side_effect = exceptions.InvalidTaskException
result = self.task.validate("path_to_task", "deployment")
self.assertEqual(1, result)
mock_task_validate.assert_called_once_with(
"deployment", mock__load_task.return_value, None)
@mock.patch("rally.common.fileutils._rewrite_env_file")
@mock.patch("rally.cli.commands.task.api.Task.get", return_value=True)
def test_use(self, mock_task_get, mock__rewrite_env_file):
task_id = "80422553-5774-44bd-98ac-38bd8c7a0feb"
self.task.use(task_id)
mock__rewrite_env_file.assert_called_once_with(
os.path.expanduser("~/.rally/globals"),
["RALLY_TASK=%s\n" % task_id])
@mock.patch("rally.cli.commands.task.api.Task.get")
def test_use_not_found(self, mock_task_get):
task_id = "ddc3f8ba-082a-496d-b18f-72cdf5c10a14"
mock_task_get.side_effect = exceptions.TaskNotFound(uuid=task_id)
self.assertRaises(exceptions.TaskNotFound, self.task.use, task_id)
@mock.patch("rally.task.exporter.TaskExporter.get")
def test_export(self, mock_task_exporter_get):
mock_client = mock.Mock()
mock_exporter_class = mock.Mock(return_value=mock_client)
mock_task_exporter_get.return_value = mock_exporter_class
self.task.export("fake_uuid", "file-exporter:///fake_path.json")
mock_task_exporter_get.assert_called_once_with("file-exporter")
mock_client.export.assert_called_once_with("fake_uuid")
@mock.patch("rally.task.exporter.TaskExporter.get")
def test_export_exception(self, mock_task_exporter_get):
mock_client = mock.Mock()
mock_exporter_class = mock.Mock(return_value=mock_client)
mock_task_exporter_get.return_value = mock_exporter_class
mock_client.export.side_effect = IOError
self.task.export("fake_uuid", "file-exporter:///fake_path.json")
mock_task_exporter_get.assert_called_once_with("file-exporter")
mock_client.export.assert_called_once_with("fake_uuid")
@mock.patch("rally.cli.commands.task.sys.stdout")
@mock.patch("rally.task.exporter.TaskExporter.get")
def test_export_InvalidConnectionString(self, mock_task_exporter_get,
mock_stdout):
mock_exporter_class = mock.Mock(
side_effect=exceptions.InvalidConnectionString)
mock_task_exporter_get.return_value = mock_exporter_class
self.task.export("fake_uuid", "file-exporter:///fake_path.json")
mock_stdout.write.assert_has_calls([
mock.call("The connection string is not valid: None. "
"Please check your connection string."),
mock.call("\n")])
mock_task_exporter_get.assert_called_once_with("file-exporter")
@mock.patch("rally.cli.commands.task.plot.charts")
@mock.patch("rally.cli.commands.task.sys.stdout")
@mock.patch("rally.cli.commands.task.api.Task")
@ddt.data({"error_type": "test_no_trace_type",
"error_message": "no_trace_error_message",
"error_traceback": None,
},
{"error_type": "test_error_type",
"error_message": "test_error_message",
"error_traceback": "test\nerror\ntraceback",
})
@ddt.unpack
def test_show_task_errors_no_trace(self, mock_task, mock_stdout,
mock_charts, error_type, error_message,
error_traceback=None):
mock_charts.MainStatsTable.columns = ["Column 1", "Column 2"]
test_uuid = "test_task_id"
error_data = [error_type, error_message]
if error_traceback:
error_data.append(error_traceback)
mock_task.get_detailed.return_value = {
"id": "task",
"uuid": test_uuid,
"status": "finished",
"results": [{
"key": {
"name": "fake_name",
"pos": "fake_pos",
"kw": "fake_kw"
},
"info": {
"stat": {"cols": ["Column 1", "Column 2"],
"rows": [[11, 22], [33, 44]]},
"load_duration": 3.2,
"full_duration": 3.5,
"iterations_count": 1,
"iterations_failed": 1,
"atomic": {"foo": {}, "bar": {}}},
"iterations": [
{"duration": 0.9,
"idle_duration": 0.1,
"output": {"additive": [], "complete": []},
"atomic_actions": {"foo": 0.6, "bar": 0.7},
"error": error_data
},
]},
],
"verification_log": json.dumps([error_type, error_message,
error_traceback])
}
self.task.detailed(test_uuid)
mock_task.get_detailed.assert_called_once_with(test_uuid,
extended_results=True)
mock_stdout.write.assert_has_calls([
mock.call(error_traceback or "No traceback available.")
], any_order=False)
|
{
"content_hash": "8177e192719834a56d29b7b5377e180e",
"timestamp": "",
"source": "github",
"line_count": 969,
"max_line_length": 79,
"avg_line_length": 47.84004127966976,
"alnum_prop": 0.543909226222577,
"repo_name": "eayunstack/rally",
"id": "2e95fffc4e1cb42a0dcdc03f43b69a1db24a304d",
"size": "46987",
"binary": false,
"copies": "2",
"ref": "refs/heads/product",
"path": "tests/unit/cli/commands/test_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "36716"
},
{
"name": "Mako",
"bytes": "17389"
},
{
"name": "Python",
"bytes": "2988245"
},
{
"name": "Shell",
"bytes": "41128"
}
],
"symlink_target": ""
}
|
from __future__ import print_function # for stderr
import ConfigParser
import cgi
import cgitb
import csv
import datetime
import glob
import gzip
import json
import os
import re
import suds
import suds.client
import sys
import time
from io import BytesIO
from lxml import etree as et
config_file_name = "../../newspaper_statistics.py.cfg" # outside web root.
encoding = "utf-8" # What to use for output
# ---
# If parameters this is command line. (may look at SCRIPT_* environment variables instead)
commandLine = len(sys.argv) > 1
if commandLine:
# parse command line arguments on form "fromDate=2015-03-03" as map
parameters = {}
for arg in sys.argv[1:]:
keyvalue = arg.partition("=")
if (keyvalue[2]) > 0:
parameters[keyvalue[0]] = keyvalue[2]
else:
# We are a cgi script
cgitb.enable()
fieldStorage = cgi.FieldStorage()
parameters = dict((key, fieldStorage.getvalue(key)) for key in fieldStorage.keys())
# -- load configuration file. If not found, provide absolute path looked at.
absolute_config_file_name = os.path.abspath(config_file_name)
if not os.path.exists(absolute_config_file_name):
# http://stackoverflow.com/a/14981125/53897
print("Configuration file not found: ", absolute_config_file_name, file=sys.stderr)
exit(1)
config = ConfigParser.SafeConfigParser()
config.read(config_file_name)
# -- create web service client from WSDL url. see https://fedorahosted.org/suds/wiki/Documentation
mediestream_wsdl = config.get("cgi", "mediestream_wsdl")
if not mediestream_wsdl:
raise ValueError("no value for [cgi] mediestream_wsdl")
# FIXME: Explain below problem better.
# We need to disable the cache to avoid jumping through SELinux hoops but
# suds is a pain in the a** and has no way to properly disable caching
# This just crudely redefines the default ObjectCache() to be NoCache()
# noinspection PyUnusedLocal
def ObjectCache(**kw):
# noinspection PyUnresolvedReferences
return suds.cache.NoCache()
suds.client.ObjectCache = ObjectCache
mediestream_webservice = suds.client.Client(mediestream_wsdl)
# -- extract configuration and setup
if "type" in parameters:
requiredType = parameters["type"]
else:
# We cannot generically ask for any type in batch.
raise ValueError("'type' must be a parameter.")
if "chunksize" in parameters:
chunksize = int(parameters["chunksize"])
else:
# raise ValueError("'chunksize' (maximum size of summa request) must be a numeric parameter.")
chunksize = 100 # default to recommended by toes@kb.dk if none given (for backwards compatebility)
if "fromDate" in parameters:
start_str = parameters["fromDate"] # "2013-06-15"
else:
start_str = "2017-06-01"
if "toDate" in parameters:
end_str = parameters["toDate"]
else:
end_str = "2018-07-01"
# Example: d68a0380-012a-4cd8-8e5b-37adf6c2d47f (optionally trailed by a ".fileending")
re_doms_id_from_url = re.compile("([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})(\.[a-zA-Z0-9]*)?$")
statistics_file_pattern = config.get("cgi", "statistics_file_name_pattern")
if not statistics_file_pattern:
raise ValueError("no value for [cgi] statistics_file_name_pattern")
# http://stackoverflow.com/a/2997846/53897 - 10:00 is to avoid timezone issues in general.
start_date = datetime.date.fromtimestamp(time.mktime(time.strptime(start_str + " 10:00", '%Y-%m-%d %H:%M')))
end_date = datetime.date.fromtimestamp(time.mktime(time.strptime(end_str + " 10:00", '%Y-%m-%d %H:%M')))
namespaces = {
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"dc": "http://purl.org/dc/elements/1.1/"
}
downloadPDF = requiredType == "Download"
# -- go
# Titles for columns in CSV:
fieldnames = ["Timestamp", "Type", "AvisID", "Avis", "Adgangstype", "Udgivelsestidspunkt", "Udgivelsesnummer",
"Sidenummer", "Sektion", "Klient", "schacHomeOrganization", "eduPersonPrimaryAffiliation",
"eduPersonScopedAffiliation", "eduPersonPrincipalName", "eduPersonTargetedID",
"SBIPRoleMapper", "MediestreamFullAccess", "UUID"]
if not commandLine:
filename = "newspaper_stat-" + start_str + "-" + end_str
if requiredType != "":
filename = filename + "-" + requiredType
print("Content-type: text/csv")
print("Content-disposition: attachment; filename=" + filename + ".csv")
print("")
result_file = sys.stdout
result_dict_writer = csv.DictWriter(result_file, fieldnames, delimiter="\t")
# Writes out a row where each column name has been put in the corresponding column. If
# Danish characters show up in a header, these must be encoded too.
header = dict(zip(result_dict_writer.fieldnames, result_dict_writer.fieldnames))
result_dict_writer.writerow(header)
summa_resource_cache = {}
summa_resource_cache_max = 10000 # number of items to cache, when reached cache is flushed.
previously_seen_uniqueID = set() # only process ticket/domsID combos once
def createOutputLine(response, group_xpath, json_entry):
try:
shortFormat = response.xpath(
group_xpath + "record/field[@name='shortformat']/shortrecord")[
0]
except:
shortFormat = et.Element("empty")
# -- ready to generate output
# noinspection PyDictCreation
outputLine = {}
outputLine["Type"] = "info:fedora/doms:Newspaper_Collection"
outputLine["Adgangstype"] = json_entry["resource_type"]
outputLine["UUID"] = json_entry["resource_id"]
outputLine["Timestamp"] = datetime.datetime.fromtimestamp(json_entry["dateTime"]).strftime(
"%Y-%m-%d %H:%M:%S")
outputLine["Klient"] = "-" # disabled to conform to logging law - was: entry["remote_ip"]
# print(ET.tostring(shortFormat))
avisID_xpath = group_xpath + "record/field[@name='familyId']/text()"
outputLine["AvisID"] = (response.xpath(avisID_xpath) or [
""])[0]
outputLine["Avis"] = \
(shortFormat.xpath("rdf:RDF/rdf:Description/newspaperTitle/text()", namespaces=namespaces) or [""])[
0]
outputLine["Udgivelsestidspunkt"] = \
(shortFormat.xpath("rdf:RDF/rdf:Description/dateTime/text()", namespaces=namespaces) or [""])[0]
outputLine["Udgivelsesnummer"] = \
(shortFormat.xpath("rdf:RDF/rdf:Description/newspaperEdition/text()", namespaces=namespaces) or [
""])[0]
outputLine["schacHomeOrganization"] = ", ".join(
e for e in json_entry["userAttributes"].get("schacHomeOrganization", {}))
outputLine["eduPersonPrimaryAffiliation"] = ", ".join(
e for e in json_entry["userAttributes"].get("eduPersonPrimaryAffiliation", {}))
outputLine["eduPersonScopedAffiliation"] = ", ".join(
e for e in json_entry["userAttributes"].get("eduPersonScopedAffiliation", {}))
outputLine["eduPersonPrincipalName"] = ", ".join(
e for e in json_entry["userAttributes"].get("eduPersonPrincipalName", {}))
outputLine["eduPersonTargetedID"] = ", ".join(
e for e in json_entry["userAttributes"].get("eduPersonTargetedID", {}))
outputLine["SBIPRoleMapper"] = ", ".join(e for e in entry["userAttributes"].get("SBIPRoleMapper", {}))
outputLine["MediestreamFullAccess"] = ", ".join(
e for e in json_entry["userAttributes"].get("MediestreamFullAccess", {}))
if not downloadPDF:
# Does not make sense on editions
outputLine["Sektion"] = \
(shortFormat.xpath("rdf:RDF/rdf:Description/newspaperSection/text()",
namespaces=namespaces) or [""])[0]
outputLine["Sidenummer"] = \
(shortFormat.xpath("rdf:RDF/rdf:Description/newspaperPage/text()", namespaces=namespaces) or [
""])[0]
return outputLine
# ---
# https://stackoverflow.com/a/13335919/53897
for statistics_file_name in sorted(glob.iglob(statistics_file_pattern)):
# Log files in production are named:
# thumbnails.log
# thumbnails.log.2017-10-30.gz
# thumbnails.log.2017-10-31
# They are rolled over at midnight so contains the previous days log.
if not os.path.isfile(statistics_file_name):
continue
# Only process filenames with a YYYY-MM-DD date if they are in range.
# Todays log was rolled over at midnight at the first entry of today,
# and contains yestedays entries so we need an additional day
filenameDateMatch = re.search(r"\d\d\d\d-\d\d-\d\d", statistics_file_name)
if filenameDateMatch:
filename_date = datetime.date.fromtimestamp(
time.mktime(time.strptime(filenameDateMatch.group() + " 10:00", '%Y-%m-%d %H:%M')))
if not start_date <= filename_date <= (end_date + datetime.timedelta(days=1)):
continue
# For now just skip compressed files.
if statistics_file_name.endswith(".gz"):
statistics_file = gzip.open(statistics_file_name, "rb")
else:
statistics_file = open(statistics_file_name, "rb")
# Read the file in chunks of "chunksize" lines and make a single Summa request for each.
eof_seen = False
while not eof_seen:
query_keys = [] # for summa batch query
lineInformation = [] # processed chunk
while len(query_keys) < chunksize:
line = statistics_file.readline()
if not line:
eof_seen = True
break
# Mon Jun 22 15:28:02 2015: {"resource_id":"...","remote_ip":"...","userAttributes":{...},
# "dateTime":1434979682,"ticket_id":"...","resource_type":"Download"}
lineParts = line.partition(": ")
loggedJson = lineParts[2]
try:
entry = json.loads(loggedJson)
except:
print("Bad JSON skipped from ", statistics_file_name, ": ", loggedJson, file=sys.stderr)
continue
# -- line to be considered?
entryDate = datetime.date.fromtimestamp(entry["dateTime"])
if not start_date <= entryDate <= end_date:
continue
if requiredType != "" and not requiredType == entry["resource_type"]:
continue
resource_id = entry["resource_id"]
# -- only process each ticket/domsID once (deep zoom makes _many_ requests).
uniqueID = resource_id + " " + entry["ticket_id"] + " " + str(downloadPDF)
if uniqueID in previously_seen_uniqueID:
continue
else:
previously_seen_uniqueID.add(uniqueID)
if downloadPDF:
query_key = "doms_aviser_edition:uuid:" + resource_id
else:
query_key = "doms_aviser_page:uuid:" + resource_id
query_keys.append(query_key)
tuple = (query_key, entry)
lineInformation.append(tuple)
# -- Anything to process?
if len(query_keys) == 0:
continue
# -- Yes!
query = {}
if downloadPDF:
query["search.document.query"] = "editionUUID:(\"%s\")" % "\" OR \"".join(query_keys)
query["search.document.maxrecords"] = "%d" % (chunksize * 2) # all + margin
query["search.document.startindex"] = "0"
query["search.document.resultfields"] = "editionUUID, pageUUID, shortformat, familyId"
query["solrparam.facet"] = "false"
query["group"] = "true"
query["group.field"] = "editionUUID"
query["search.document.collectdocids"] = "false"
else:
query["search.document.query"] = "pageUUID:(\"%s\")" % "\" OR \"".join(query_keys)
query["search.document.maxrecords"] = "%d" % (chunksize * 2) # all + margin
query["search.document.startindex"] = "0"
query["search.document.resultfields"] = "pageUUID, shortformat, familyId"
query["solrparam.facet"] = "false"
query["group"] = "true"
query["group.field"] = "pageUUID"
query["search.document.collectdocids"] = "false"
queryJSON = json.dumps(query)
# FIXME: May time out. Handle that gracefully.
summa_resource_text = mediestream_webservice.service.directJSON(queryJSON)
# Get the ElementTree for the returned XML string.
summa_resource = et.parse(BytesIO(bytes(bytearray(summa_resource_text, encoding='utf-8'))))
# reprocess each line
for query_key, entry in lineInformation:
group_xpath = "/responsecollection/response/documentresult/group[@groupValue='" + query_key + "']/"
result = createOutputLine(summa_resource, group_xpath, entry)
encodedOutputLine = dict((key, result[key].encode(encoding)) for key in result.keys())
result_dict_writer.writerow(encodedOutputLine)
# --
# end - while not eof
statistics_file.close()
# result_file.close() - can't on sys.stdout.
# end - for statistics_name in ...
|
{
"content_hash": "de422523d395bc63366d2d686a2c6330",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 116,
"avg_line_length": 38.796407185628745,
"alnum_prop": 0.6409168081494058,
"repo_name": "statsbiblioteket/newspaper-fastcgi-ticket-checker",
"id": "46d676086f7e5a04ee4171b62c719c7433e9242b",
"size": "13294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newspaper-usage-statistics/src/main/scripts/statistics/statistics.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6975"
},
{
"name": "HTML",
"bytes": "2868"
},
{
"name": "Java",
"bytes": "26055"
},
{
"name": "Perl",
"bytes": "22418"
},
{
"name": "Python",
"bytes": "13294"
},
{
"name": "Shell",
"bytes": "1688"
}
],
"symlink_target": ""
}
|
"""
Module responsible for translating reference sequence data into GA4GH native
objects.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import json
import os
import random
import pysam
import ga4gh.datamodel as datamodel
import ga4gh.protocol as protocol
import ga4gh.exceptions as exceptions
DEFAULT_REFERENCESET_NAME = "Default"
"""
This is the name used for any reference set referred to in a BAM
file that does not provide the 'AS' tag in the @SQ header.
"""
class AbstractReferenceSet(datamodel.DatamodelObject):
"""
Class representing ReferenceSets. A ReferenceSet is a set of
References which typically comprise a reference assembly, such as
GRCh38.
"""
compoundIdClass = datamodel.ReferenceSetCompoundId
def __init__(self, localId):
super(AbstractReferenceSet, self).__init__(None, localId)
self._referenceIdMap = {}
self._referenceNameMap = {}
self._referenceIds = []
self._assemblyId = None
self._description = None
self._isDerived = False
self._ncbiTaxonId = None
self._sourceAccessions = []
self._sourceUri = None
def addReference(self, reference):
"""
Adds the specified reference to this ReferenceSet.
"""
id_ = reference.getId()
self._referenceIdMap[id_] = reference
self._referenceNameMap[reference.getLocalId()] = reference
self._referenceIds.append(id_)
def getReferences(self):
"""
Returns the References in this ReferenceSet.
"""
return [self._referenceIdMap[id_] for id_ in self._referenceIds]
def getNumReferences(self):
"""
Returns the number of references in this ReferenceSet.
"""
return len(self._referenceIds)
def getReferenceByIndex(self, index):
"""
Returns the reference at the specified index in this ReferenceSet.
"""
return self._referenceIdMap[self._referenceIds[index]]
def getReferenceByName(self, name):
"""
Returns the reference with the specified name.
"""
if name not in self._referenceNameMap:
raise exceptions.ReferenceNameNotFoundException(name)
return self._referenceNameMap[name]
def getReference(self, id_):
"""
Returns the Reference with the specified ID or raises a
ReferenceNotFoundException if it does not exist.
"""
if id_ not in self._referenceIdMap:
raise exceptions.ReferenceNotFoundException(id_)
return self._referenceIdMap[id_]
def getMd5Checksum(self):
"""
Returns the MD5 checksum for this reference set. This checksum is
calculated by making a list of `Reference.md5checksum` for all
`Reference`s in this set. We then sort this list, and take the
MD5 hash of all the strings concatenated together.
"""
references = sorted(
self.getReferences(),
key=lambda ref: ref.getMd5Checksum())
checksums = ''.join([ref.getMd5Checksum() for ref in references])
md5checksum = hashlib.md5(checksums).hexdigest()
return md5checksum
def getAssemblyId(self):
"""
Returns the assembly ID for this reference set.
This is the public id of this reference set, such as `GRCh37`
"""
return self._assemblyId
def getDescription(self):
"""
Returns the free text description of this reference set.
"""
return self._description
def getIsDerived(self):
"""
Returns True if this ReferenceSet is derived. A ReferenceSet
may be derived from a source if it contains additional sequences,
or some of the sequences within it are derived.
"""
return self._isDerived
def getSourceAccessions(self):
"""
Returns the list of source accession strings. These are all known
corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally
with a version number, e.g. `NC_000001.11`.
"""
return self._sourceAccessions
def getSourceUri(self):
"""
Returns the sourceURI for this ReferenceSet.
"""
return self._sourceUri
def getNcbiTaxonId(self):
"""
Returns the NCBI Taxon ID for this reference set. This is the
ID from http://www.ncbi.nlm.nih.gov/taxonomy (e.g. 9606->human)
indicating the species which this assembly is intended to model.
Note that contained `Reference`s may specify a different
`ncbiTaxonId`, as assemblies may contain reference sequences
which do not belong to the modeled species, e.g. EBV in a
human reference genome.
"""
return self._ncbiTaxonId
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReferenceSet.
"""
ret = protocol.ReferenceSet()
ret.assemblyId = self.getAssemblyId()
ret.description = self.getDescription()
ret.id = self.getId()
ret.isDerived = self.getIsDerived()
ret.md5checksum = self.getMd5Checksum()
ret.ncbiTaxonId = self.getNcbiTaxonId()
ret.referenceIds = self._referenceIds
ret.sourceAccessions = self.getSourceAccessions()
ret.sourceURI = self.getSourceUri()
ret.name = self.getLocalId()
return ret
class AbstractReference(datamodel.DatamodelObject):
"""
Class representing References. A Reference is a canonical
assembled contig, intended to act as a reference coordinate space
for other genomic annotations. A single Reference might represent
the human chromosome 1, for instance.
"""
compoundIdClass = datamodel.ReferenceCompoundId
def __init__(self, parentContainer, localId):
super(AbstractReference, self).__init__(parentContainer, localId)
self._length = -1
self._md5checksum = ""
self._sourceUri = None
self._sourceAccessions = []
self._isDerived = False
self._sourceDivergence = None
self._ncbiTaxonId = None
def getLength(self):
"""
Returns the length of this reference's sequence string.
"""
return self._length
def getName(self):
"""
Returns the name of this reference, e.g., '22'.
"""
return self.getLocalId()
def getIsDerived(self):
"""
Returns True if this Reference is derived. A sequence X is said to be
derived from source sequence Y, if X and Y are of the same length and
the per-base sequence divergence at A/C/G/T bases is sufficiently
small. Two sequences derived from the same official sequence share the
same coordinates and annotations, and can be replaced with the official
sequence for certain use cases.
"""
return self._isDerived
def getSourceDivergence(self):
"""
Returns the source divergence for this reference. The sourceDivergence
is the fraction of non-indel bases that do not match the
reference this record was derived from.
"""
return self._sourceDivergence
def getSourceAccessions(self):
"""
Returns the list of source accession strings. These are all known
corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally
with a version number, e.g. `NC_000001.11`.
"""
return self._sourceAccessions
def getSourceUri(self):
"""
The URI from which the sequence was obtained. Specifies a FASTA format
file/string with one name, sequence pair.
"""
return self._sourceUri
def getNcbiTaxonId(self):
"""
Returns the NCBI Taxon ID for this reference. This is the
ID from http://www.ncbi.nlm.nih.gov/taxonomy (e.g. 9606->human)
indicating the species which this assembly is intended to model.
Note that contained `Reference`s may specify a different
`ncbiTaxonId`, as assemblies may contain reference sequences
which do not belong to the modeled species, e.g. EBV in a
human reference genome.
"""
return self._ncbiTaxonId
def getMd5Checksum(self):
"""
Returns the MD5 checksum uniquely representing this `Reference` as a
lower-case hexadecimal string, calculated as the MD5 of the upper-case
sequence excluding all whitespace characters.
"""
return self._md5checksum
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this Reference.
"""
reference = protocol.Reference()
reference.id = self.getId()
reference.isDerived = self.getIsDerived()
reference.length = self.getLength()
reference.md5checksum = self.getMd5Checksum()
reference.name = self.getName()
reference.ncbiTaxonId = self.getNcbiTaxonId()
reference.sourceAccessions = self.getSourceAccessions()
reference.sourceDivergence = self.getSourceDivergence()
reference.sourceURI = self.getSourceUri()
return reference
def checkQueryRange(self, start, end):
"""
Checks to ensure that the query range is valid within this reference.
If not, raise ReferenceRangeErrorException.
"""
condition = (
(start < 0 or end > self.getLength()) or
start > end)
if condition:
raise exceptions.ReferenceRangeErrorException(
self.getId(), start, end)
def getBases(self, start, end):
"""
Returns the string representing the bases of this reference from
start (inclusive) to end (exclusive).
"""
raise NotImplemented()
##################################################################
#
# Simulated references
#
##################################################################
class SimulatedReferenceSet(AbstractReferenceSet):
"""
A simulated referenceSet
"""
def __init__(self, localId, randomSeed=0, numReferences=1):
super(SimulatedReferenceSet, self).__init__(localId)
self._randomSeed = randomSeed
self._randomGenerator = random.Random()
self._randomGenerator.seed(self._randomSeed)
self._description = "Simulated reference set"
self._assemblyId = str(random.randint(0, 2**32))
self._isDerived = bool(random.randint(0, 1))
self._ncbiTaxonId = random.randint(0, 2**16)
self._sourceAccessions = []
for i in range(random.randint(1, 3)):
self._sourceAccessions.append("sim_accession_{}".format(
random.randint(1, 2**32)))
self._sourceUri = "http://example.com/reference.fa"
for i in range(numReferences):
referenceSeed = self._randomGenerator.getrandbits(32)
referenceLocalId = "srs{}".format(i)
reference = SimulatedReference(
self, referenceLocalId, referenceSeed)
self.addReference(reference)
class SimulatedReference(AbstractReference):
"""
A simulated reference. Stores a random sequence of a given length, and
generates remaining attributes randomly.
"""
def __init__(self, parentContainer, localId, randomSeed=0, length=200):
super(SimulatedReference, self).__init__(parentContainer, localId)
rng = random.Random()
rng.seed(randomSeed)
self._length = length
bases = [rng.choice('ACGT') for _ in range(self._length)]
self._bases = ''.join(bases)
self._md5checksum = hashlib.md5(self._bases).hexdigest()
self._isDerived = bool(rng.randint(0, 1))
self._sourceDivergence = 0
if self._isDerived:
self._sourceDivergence = rng.uniform(0, 0.1)
self._ncbiTaxonId = random.randint(0, 2**16)
self._sourceAccessions = []
for i in range(random.randint(1, 3)):
self._sourceAccessions.append("sim_accession_{}".format(
random.randint(1, 2**32)))
self._sourceUri = "http://example.com/reference.fa"
def getBases(self, start, end):
self.checkQueryRange(start, end)
return self._bases[start:end]
##################################################################
#
# References based on htslib's FASTA file handling.
#
##################################################################
class HtslibReferenceSet(datamodel.PysamDatamodelMixin, AbstractReferenceSet):
"""
A referenceSet based on data on a file system
"""
def __init__(self, localId, dataDir, backend):
super(HtslibReferenceSet, self).__init__(localId)
self._dataDir = dataDir
self._setMetadata()
self._scanDataFiles(dataDir, ["*.fa.gz"])
def _setMetadata(self):
metadataFileName = '{}.json'.format(self._dataDir)
with open(metadataFileName) as metadataFile:
metadata = json.load(metadataFile)
try:
self._assemblyId = metadata['assemblyId']
self._description = metadata['description']
self._isDerived = metadata['isDerived']
self._ncbiTaxonId = metadata['ncbiTaxonId']
self._sourceAccessions = metadata['sourceAccessions']
self._sourceUri = metadata['sourceUri']
except KeyError as err:
raise exceptions.MissingReferenceSetMetadata(
metadataFileName, str(err))
def _addDataFile(self, path):
dirname, filename = os.path.split(path)
localId = filename.split(".")[0]
metadataFileName = os.path.join(dirname, "{}.json".format(localId))
with open(metadataFileName) as metadataFile:
metadata = json.load(metadataFile)
reference = HtslibReference(self, localId, path, metadata)
self.addReference(reference)
class HtslibReference(datamodel.PysamDatamodelMixin, AbstractReference):
"""
A reference based on data stored in a file on the file system
"""
def __init__(self, parentContainer, localId, dataFile, metadata):
super(HtslibReference, self).__init__(parentContainer, localId)
self._fastaFilePath = dataFile
fastaFile = self.getFileHandle(dataFile)
numReferences = len(fastaFile.references)
if numReferences != 1:
raise exceptions.NotExactlyOneReferenceException(
self._fastaFilePath, numReferences)
if fastaFile.references[0] != localId:
raise exceptions.InconsistentReferenceNameException(
self._fastaFilePath)
self._length = fastaFile.lengths[0]
try:
self._md5checksum = metadata["md5checksum"]
self._sourceUri = metadata["sourceUri"]
self._ncbiTaxonId = metadata["ncbiTaxonId"]
self._isDerived = metadata["isDerived"]
self._sourceDivergence = metadata["sourceDivergence"]
self._sourceAccessions = metadata["sourceAccessions"]
except KeyError as err:
raise exceptions.MissingReferenceMetadata(dataFile, str(err))
def getFastaFilePath(self):
"""
Returns the fasta file that this reference is derived from.
"""
return self._fastaFilePath
def openFile(self, dataFile):
return pysam.FastaFile(dataFile)
def getBases(self, start, end):
self.checkQueryRange(start, end)
fastaFile = self.getFileHandle(self._fastaFilePath)
# TODO we should have some error checking here...
bases = fastaFile.fetch(self.getLocalId(), start, end)
return bases
|
{
"content_hash": "34aebc34c780ec2d06819a64ffee5cc7",
"timestamp": "",
"source": "github",
"line_count": 435,
"max_line_length": 79,
"avg_line_length": 36.48965517241379,
"alnum_prop": 0.6259056259056259,
"repo_name": "srblum/server",
"id": "6cc7f6c95be30aafba21bf62ae93cd57fe8ee78e",
"size": "15873",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "ga4gh/datamodel/references.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4289"
},
{
"name": "Python",
"bytes": "731439"
},
{
"name": "Shell",
"bytes": "1085"
}
],
"symlink_target": ""
}
|
__author__ = 'Seqian Wang'
import mysql.connector
import Config.DBConfig as dbConfig
"""
Manage the Database interaction (Initiate connection, read, write, update)
"""
class DbUtils:
"""
def __init__(self):
pass
@staticmethod
def execute_query(query, **kwargs):
con = MySQLdb.msqlDB.connect(dbConfig.DBParams['host'], dbConfig.DBParams['user'], dbConfig.DBParams['userPass'],
dbConfig.DBParams['dbName'])
con.autocommit(True)
cursor = con.cursor()
cursor.execute(query)
if 'numOfResults' in kwargs:
return cursor.fetchmany(kwargs['numOfResults'])
else:
return cursor.fetchall()
"""
def __init__(self, location=dbConfig.DBParams['host'], username=dbConfig.DBParams['user'], password=dbConfig.DBParams['userPass'], database=dbConfig.DBParams['dbName']):
self.location = location
self.username = username
self.password = password
self.database = database
self.conn = mysql.connector.connect(host=self.location,
database=self.database,
user=self.username,
password=self.password)
self.conn.autocommit = True
self.cursor = self.conn.cursor()
def executeNoResult(self, sqlStr):
try:
self.cursor.execute(sqlStr)
self.commit()
except:
self.rollback()
raise
def executeAllResults(self, sqlStr):
try:
self.cursor.execute(sqlStr)
return self.cursor.fetchall()
except:
self.rollback()
raise
def executeSomeResults(self, sqlStr, numOfResults):
try:
self.cursor.execute(sqlStr)
return self.cursor.fetchmany(numOfResults)
except:
self.rollback()
raise
def commit(self):
self.conn.commit()
def rollback(self):
self.conn.rollback()
def close(self):
self.conn.close()
|
{
"content_hash": "411f6bb913130b14ad7143200ee1c57f",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 173,
"avg_line_length": 29.26388888888889,
"alnum_prop": 0.5666824869482677,
"repo_name": "sulantha2006/Processing_Pipeline",
"id": "383584eb076c790db800562d2308d5da3dd56f43",
"size": "2107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Utils/DbUtils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "M",
"bytes": "713"
},
{
"name": "MATLAB",
"bytes": "13833"
},
{
"name": "Perl",
"bytes": "141400"
},
{
"name": "Python",
"bytes": "365424"
},
{
"name": "Shell",
"bytes": "201923"
}
],
"symlink_target": ""
}
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
def find_largest_input_or_output(all_args_list) -> dict:
max_arg = {'Size(MB)': 0}
for arg in all_args_list:
if arg.get('Size(MB)') > max_arg.get('Size(MB)'):
max_arg = arg
return max_arg
def get_largest_inputs_and_outputs(inputs_and_outputs, largest_inputs_and_outputs, incident_id) -> None:
inputs = []
outputs = []
urls = demisto.demistoUrls()
server_url = urls.get('server', '')
incident_url = os.path.join(server_url, '#', 'incident', incident_id)
if inputs_and_outputs:
# In case no inputs and outputs are found a getInvPlaybookMetaData will return a string.
# in that case we ignore the results and move on.
if isinstance(inputs_and_outputs, str):
return
for task in inputs_and_outputs:
task_id = task.get('id')
if 'outputs' in task:
for output in task.get('outputs'):
task_url = os.path.join(server_url, '#', 'WorkPlan', incident_id, task_id)
outputs.append({
'IncidentID': f"[{incident_id}]({incident_url})",
'TaskID': f"[{task_id}]({task_url})",
'TaskName': task.get('name'),
'Name': output.get('name'),
'Size(MB)': float(output.get('size', 0)) / 1024,
"InputOrOutput": 'Output',
})
else:
for arg in task.get('args'):
task_url = os.path.join(server_url, '#', 'WorkPlan', incident_id, task_id)
inputs.append({
'IncidentID': f"[{incident_id}]({incident_url})",
'TaskID': f"[{task_id}]({task_url})",
'TaskName': task.get('name'),
'Name': arg.get('name'),
'Size(MB)': float(arg.get('size', 0)) / 1024,
'InputOrOutput': "Input",
})
if inputs:
largest_inputs_and_outputs.append(find_largest_input_or_output(inputs))
if outputs:
largest_inputs_and_outputs.append(find_largest_input_or_output(outputs))
def get_extra_data_from_investigations(investigations: list) -> list:
largest_inputs_and_outputs: List = []
for inv in investigations:
raw_output = demisto.executeCommand('getInvPlaybookMetaData',
args={
"incidentId": inv.get('IncidentID'),
})
if is_error(raw_output):
raise DemistoException(f'Failed to run getInvPlaybookMetaData:\n{get_error(raw_output)}')
inputs_and_outputs = raw_output[0].get('Contents', {}).get('tasks')
get_largest_inputs_and_outputs(inputs_and_outputs, largest_inputs_and_outputs, inv.get('IncidentID'))
return largest_inputs_and_outputs
def main():
try:
args: Dict = demisto.args()
if is_demisto_version_ge("6.2.0"):
deprecate_msg = "Warning: This script has been deprecated. Please checkout the System Diagnostic page " \
"for an alternative."
if not argToBoolean(args.get('ignore_deprecated')):
raise DemistoException(deprecate_msg)
else:
demisto.info(deprecate_msg)
is_table_result = argToBoolean(args.get('table_result', False))
raw_output = demisto.executeCommand('GetLargestInvestigations',
args={
'from': args.get('from'),
'to': args.get('to'),
'table_result': 'true',
'ignore_deprecated': 'true',
})
if is_error(raw_output):
raise DemistoException(f'Failed to run GetLargestInvestigations:\n{get_error(raw_output)}')
investigations = raw_output[0].get('Contents', {}).get('data')
data = get_extra_data_from_investigations(investigations)
if not is_table_result:
return_results(tableToMarkdown('Largest Inputs And Outputs In Incidents', data))
else:
return_results(data)
except Exception as exc:
return_error(f'Failed to execute GetLargestInputsAndOuputsInIncidents.\nError: {exc}', error=exc)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
{
"content_hash": "0cf31a1fb827d77e7b34bb7abf3ede23",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 117,
"avg_line_length": 44,
"alnum_prop": 0.5223024638912489,
"repo_name": "VirusTotal/content",
"id": "6aec7a1e1893e96be4909d0fdcc9f84cc76e362c",
"size": "4708",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/CommonWidgets/Scripts/GetLargestInputsAndOuputsInIncidents/GetLargestInputsAndOuputsInIncidents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=150)),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Line',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('number', models.IntegerField(default=0)),
('text', models.TextField(default=b'', null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Script',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('last_modified', models.DateTimeField(default=django.utils.timezone.now)),
('dataset', models.ForeignKey(related_name='scripts', to='corpus.Dataset')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('st_col', models.IntegerField(default=0)),
('ed_col', models.IntegerField(default=0)),
('type', models.CharField(default=b'', max_length=32, null=True, blank=True)),
('text', models.TextField(default=b'', null=True, blank=True)),
('line', models.ForeignKey(related_name='tokens', to='corpus.Line')),
('script', models.ForeignKey(related_name='tokens', to='corpus.Script')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterIndexTogether(
name='token',
index_together=set([('script', 'type')]),
),
migrations.AddField(
model_name='line',
name='script',
field=models.ForeignKey(related_name='lines', to='corpus.Script'),
preserve_default=True,
),
migrations.AlterIndexTogether(
name='line',
index_together=set([('script', 'number')]),
),
]
|
{
"content_hash": "5ad70ed4d26ddb789443c04cc98193d9",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 114,
"avg_line_length": 37.07692307692308,
"alnum_prop": 0.5179806362378977,
"repo_name": "nanchenchen/script-analysis",
"id": "476f6528682fa8d682629597cca97d120e873dda",
"size": "2916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyanalysis/apps/corpus/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53354"
},
{
"name": "HTML",
"bytes": "92901"
},
{
"name": "JavaScript",
"bytes": "286195"
},
{
"name": "Python",
"bytes": "202204"
},
{
"name": "Ruby",
"bytes": "8396"
},
{
"name": "Shell",
"bytes": "13182"
}
],
"symlink_target": ""
}
|
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
pass # It's ok to not import because this is only necessary to upload results to BQ.
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
'mac': ['poll'],
}
BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
def get_bqtest_data(limit=None):
import big_query_utils
bq = big_query_utils.create_big_query()
query = """
SELECT
filtered_test_name,
SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
MAX(cpu_measured) + 0.01 as cpu
FROM (
SELECT
REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
result, cpu_measured
FROM
[grpc-testing:jenkins_test_results.aggregate_results]
WHERE
timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
AND platform = '""" + platform_string() + """'
AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
GROUP BY
filtered_test_name"""
if limit:
query += " limit {}".format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3)
test_data = [
BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
float(row['f'][2]['v'])) for row in page['rows']
]
return test_data
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception(
"Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
def max_parallel_tests_for_current_platform():
# Too much test parallelization has only been seen to be a problem
# so far on windows.
if jobset.platform_string() == 'windows':
return 64
return 1024
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self,
config,
environ=None,
timeout_multiplier=1,
tool_prefix=[],
iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self,
cmdline,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None,
environ={},
cpu_cost=1.0,
flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
if not flaky and shortname and shortname in flaky_tests:
flaky = True
if shortname in shortname_to_cpu:
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(
cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds
if timeout_seconds else None),
flake_retries=4 if flaky or args.allow_flakes else 0,
timeout_retries=1 if flaky or args.allow_flakes else 0)
def get_c_tests(travis, test_lang):
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [
tgt for tgt in js
if tgt['language'] == test_lang and platform_string() in
tgt[platforms_str] and not (travis and tgt['flaky'])
]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception(
'Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple('_ConfigVars', [
'shell',
'builder',
'builder_prefix_arguments',
'venv_relative_python',
'toolchain',
'runner',
'test_name',
'iomgr_platform',
])
def _python_config_generator(name, major, minor, bits, config_vars):
name += '_' + config_vars.iomgr_platform
return PythonConfig(
name, config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0]),
config_vars.test_name
])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)
] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner +
[os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(
self.args.compiler,
['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
self._use_cmake = True
self._make_options = []
elif self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(
self.args.use_docker, self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
try:
cflags += subprocess.check_output(
['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(
['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += [
'EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)
]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
auto_timeout_scaling = target.get('auto_timeout_scaling', True)
polling_strategies = (_POLLING_STRATEGIES.get(
self.platform, ['all']) if target.get('uses_polling', True) else
['none'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env = {
'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY':
polling_strategy,
'GRPC_VERBOSITY':
'DEBUG'
}
resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
if resolver:
env['GRPC_DNS_RESOLVER'] = resolver
shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
if polling_strategy in target.get('excluded_poll_engines', []):
continue
timeout_scaling = 1
if auto_timeout_scaling:
config = self.args.config
if ('asan' in config or config == 'msan' or
config == 'tsan' or config == 'ubsan' or
config == 'helgrind' or config == 'memcheck'):
# Scale overall test timeout if running under various sanitizers.
# scaling value is based on historical data analysis
timeout_scaling *= 3
elif polling_strategy == 'poll-cv':
# scale test timeout if running with poll-cv
# sanitizer and poll-cv scaling is not cumulative to ensure
# reasonable timeout values.
# TODO(jtattermusch): based on historical data and 5min default
# test timeout poll-cv scaling is currently not useful.
# Leaving here so it can be reintroduced if the default test timeout
# is decreased in the future.
timeout_scaling *= 1
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
binary = 'cmake/build/%s/%s.exe' % (
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config,
target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
list_test_command = None
filter_test_command = None
# these are the flag defined by gtest and benchmark framework to list
# and filter test runs. We use them to split each individual test
# into its own JobSpec, and thus into its own process.
if 'benchmark' in target and target['benchmark']:
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--benchmark_list_tests'],
stderr=fnull)
for line in tests.split('\n'):
test = line.strip()
if not test: continue
cmdline = [binary,
'--benchmark_filter=%s$' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
elif 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a complete
# list of the tests contained in a binary for each test, we then
# add a job to run, filtering for just that test.
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--gtest_list_tests'], stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary,
'--gtest_filter=%s' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' % (' '.join(cmdline),
shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
shortname = target.get('shortname', ' '.join(
pipes.quote(arg) for arg in cmdline))
shortname += shortname_ext
out.append(
self.config.job_spec(
cmdline,
shortname=shortname,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get(
'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
* timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return [
'buildtests_%s' % self.make_target,
'tools_%s' % self.make_target, 'check_epollexclusive'
]
def make_options(self):
return self._make_options
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
self._cmake_generator_option, self._cmake_arch_option
]]
elif self._use_cmake:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
return [
'CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix
]
def _gcc_make_options(self, version_suffix):
return [
'CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix
]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'gcc7.2':
return ('ubuntu1710', [])
elif compiler == 'gcc_musl':
return ('alpine', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604',
self._clang_make_options(version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
# This tests Node on grpc/grpc-node and will become the standard for Node testing
class RemoteNodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(self.args.compiler, [
'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
'electron1.3', 'electron1.6'
])
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '8'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
# TODO: update with Windows/electron scripts when available for grpc/grpc-node
def test_specs(self):
if self.platform == 'windows':
return [
self.config.job_spec(
['tools\\run_tests\\helper_scripts\\run_node.bat'])
]
else:
return [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_grpc-node.sh'],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'grpc-node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(
['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(
collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open(
'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [
self.config.job_spec(
config.run,
timeout_seconds=5 * 60,
environ=dict(
list(environment.items()) + [(
'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),
) for suite_name in tests_json for config in self.pythons
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
if self.config.build_config != 'gcov':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (
self.python_manager_name(), _docker_arch_suffix(self.args.arch))
def python_manager_name(self):
if self.args.compiler in ['python3.5', 'python3.6']:
return 'pyenv'
elif self.args.compiler == 'python_alpine':
return 'alpine'
else:
return 'jessie'
def _get_pythons(self, args):
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python_msys2.sh')
]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python.sh')
]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
test_command = 'test_lite'
if args.iomgr_platform == 'gevent':
test_command = 'test_gevent'
runner = [
os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
]
config_vars = _PythonConfigVars(
shell, builder, builder_prefix_arguments, venv_relative_python,
toolchain, runner, test_command, args.iomgr_platform)
python27_config = _python_config_generator(
name='py27',
major='2',
minor='7',
bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(
name='py34',
major='3',
minor='4',
bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(
name='py35',
major='3',
minor='5',
bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(
name='py36',
major='3',
minor='6',
bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(
name='pypy', major='2', config_vars=config_vars)
pypy32_config = _pypy_config_generator(
name='pypy3', major='3', config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python35_config,)
else:
return (
python27_config,
python34_config,
)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
elif args.compiler == 'python_alpine':
return (python27_config,)
elif args.compiler == 'all_the_cpythons':
return (
python27_config,
python34_config,
python35_config,
python36_config,
)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
tests = [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
tests.append(
self.config.job_spec(
['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
timeout_seconds=10 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return tests
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['coreclr', 'default'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64'
self._make_options = []
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
self._docker_distro = 'jessie'
if self.platform == 'mac':
# TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
self._make_options = ['EMBED_OPENSSL=true']
if self.args.compiler != 'coreclr':
# On Mac, official distribution of mono is 32bit.
self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All', '--noresult', '--workers=1']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
assembly_subdir += '/net45'
if self.platform == 'windows':
runtime_cmd = []
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
assembly_subdir,
assembly,
assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file,
'--test=%s' % test] + nunit_args
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = [
'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file, '-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*', '-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
self._cmake_arch_option
]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return self._make_options
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(
['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/run_plugin_tests.sh'],
timeout_seconds=60 * 60,
shortname='objc-plugin-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-helloworld',
cpu_cost=1e6,
environ={
'SCHEME': 'HelloWorld',
'EXAMPLE_PATH': 'examples/objective-c/helloworld'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-routeguide',
cpu_cost=1e6,
environ={
'SCHEME': 'RouteGuideClient',
'EXAMPLE_PATH': 'examples/objective-c/route_guide'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-authsample',
cpu_cost=1e6,
environ={
'SCHEME': 'AuthSample',
'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-sample-frameworks',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
'FRAMEWORKS': 'YES'
}),
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=10 * 60,
shortname='objc-build-example-switftsample',
cpu_cost=1e6,
environ={
'SCHEME': 'SwiftSample',
'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
}),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [['src/objective-c/tests/build_tests.sh']]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ = {'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
return [
self.config.job_spec(
cmd['script'].split(),
timeout_seconds=30 * 60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict(
(cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'grpc-node': RemoteNodeLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc': ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.'
% arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' %
args.arch)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument(
'-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
argp.add_argument(
'-n',
'--runs_per_test',
default=1,
type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument(
'-p',
'--sample_percent',
default=100.0,
type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument(
'-f', '--forever', default=False, action='store_const', const=True)
argp.add_argument(
'-t', '--travis', default=False, action='store_const', const=True)
argp.add_argument(
'--newline_on_success', default=False, action='store_const', const=True)
argp.add_argument(
'-l',
'--language',
choices=['all'] + sorted(_LANGUAGES.keys()),
nargs='+',
default=['all'])
argp.add_argument(
'-S', '--stop_on_failure', default=False, action='store_const', const=True)
argp.add_argument(
'--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument(
'--allow_flakes',
default=False,
action='store_const',
const=True,
help=
'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
)
argp.add_argument(
'--arch',
choices=['default', 'x86', 'x64'],
default='default',
help=
'Selects architecture to target. For some platforms "default" is the only supported choice.'
)
argp.add_argument(
'--compiler',
choices=[
'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc7.2',
'gcc_musl', 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'python2.7',
'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine',
'all_the_cpythons', 'electron1.3', 'electron1.6', 'coreclr', 'cmake',
'cmake_vs2015', 'cmake_vs2017'
],
default='default',
help=
'Selects compiler to use. Allowed values depend on the platform and language.'
)
argp.add_argument(
'--iomgr_platform',
choices=['native', 'uv', 'gevent'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument(
'--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but don\'t run any tests.')
argp.add_argument(
'--measure_cpu_costs',
default=False,
action='store_const',
const=True,
help='Measure the cpu costs of tests')
argp.add_argument(
'--update_submodules',
default=[],
nargs='*',
help=
'Update some submodules before building. If any are updated, also run generate_projects. '
+
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
)
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument(
'-x',
'--xml_report',
default=None,
type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument(
'--report_suite_name',
default='tests',
type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument(
'--quiet_success',
default=False,
action='store_const',
const=True,
help=
'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ 'Useful when running many iterations of each test (argument -n).')
argp.add_argument(
'--force_default_poller',
default=False,
action='store_const',
const=True,
help='Don\'t try to iterate over many polling strategies when they exist')
argp.add_argument(
'--force_use_pollers',
default=None,
type=str,
help='Only use the specified comma-delimited list of polling engines. '
'Example: --force_use_pollers epollsig,poll '
' (This flag has no effect if --force_default_poller flag is also used)')
argp.add_argument(
'--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
argp.add_argument(
'--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
argp.add_argument(
'--disable_auto_set_flakes',
default=False,
const=True,
action='store_const',
help='Disable rerunning historically flaky tests')
args = argp.parse_args()
flaky_tests = set()
shortname_to_cpu = {}
if not args.disable_auto_set_flakes:
try:
for test in get_bqtest_data():
if test.flaky: flaky_tests.add(test.name)
if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
except:
print(
"Unexpected error getting flaky tests: %s" % traceback.format_exc())
if args.force_default_poller:
_POLLING_STRATEGIES = {}
elif args.force_use_pollers:
_POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print(
'WARNING: may need to regenerate projects, but since we are not on')
print(
' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
lang_list = _LANGUAGES.keys()
else:
lang_list = args.language
# We don't support code coverage on some languages
if 'gcov' in args.config:
for bad in ['objc', 'sanity']:
if bad in lang_list:
lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
l.configure(run_config, args)
language_make_options = []
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print(
'languages with custom make options cannot be built simultaneously with other languages'
)
sys.exit(1)
else:
# Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
# together, and is only used under gcov. All other configs should build languages individually.
language_make_options = list(
set([
make_option
for lang in languages
for make_option in lang.make_options()
]))
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print(
'IMPORTANT: The changes you are testing need to be locally committed'
)
print(
'because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print(
'Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print(
'Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call(
'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
return [
jobset.JobSpec(
[
'cmake', '--build', '.', '--target',
'%s' % target, '--config', _MSBUILD_CONFIG[cfg]
],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets
]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
return [
jobset.JobSpec(
[os.getenv('MAKE', 'make'), '-j',
'%d' % args.jobs] + targets,
cwd='cmake/build',
timeout_seconds=None)
]
if targets:
return [
jobset.JobSpec(
[
os.getenv('MAKE', 'make'), '-f', makefile, '-j',
'%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
args.slowdown,
'CONFIG=%s' % cfg, 'Q='
] + language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
timeout_seconds=None)
]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(
set(
jobset.JobSpec(
cmdline, environ=build_step_environ(build_config), flake_retries=2)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(
make_jobspec(build_config, list(targets), makefile)
for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(
set(
jobset.JobSpec(
cmdline,
environ=build_step_environ(build_config),
timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(
set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(
urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen(
'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
def _has_epollexclusive():
binary = 'bins/%s/check_epollexclusive' % args.config
if not os.path.exists(binary):
return False
try:
subprocess.check_call(binary)
return True
except subprocess.CalledProcessError, e:
return False
except OSError, e:
# For languages other than C and Windows the binary won't exist
return False
# returns a list of things that failed (or an empty list on success)
def _build_and_run(check_cancelled,
newline_on_success,
xml_report=None,
build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps,
maxjobs=1,
stop_on_failure=True,
newline_on_success=newline_on_success,
travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string(
) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string(
)]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
# start antagonists
antagonists = [
subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)
]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec for language in languages for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(
one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent / 100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run
) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run)
if infinite_runs else itertools.repeat(
massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message(
'START',
'Running tests quietly, only failing tests will be reported',
do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs,
check_cancelled,
newline_on_success=newline_on_success,
travis=args.travis,
maxjobs=args.jobs,
maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success,
max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE',
'%s [%d/%d runs flaked]' % (k, num_failures,
num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_results_to_bq(resultset, args.bq_result_table, args,
platform_string())
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps,
maxjobs=1,
stop_on_failure=False,
newline_on_success=newline_on_success,
travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(
check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message(
'SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(
check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
|
{
"content_hash": "9263963cb8a73c23663b784cb7f2c875",
"timestamp": "",
"source": "github",
"line_count": 1845,
"max_line_length": 135,
"avg_line_length": 34.87208672086721,
"alnum_prop": 0.5418020174388785,
"repo_name": "kpayson64/grpc",
"id": "4146eec42df6e10d58d134ecb5112e6bde7c79a9",
"size": "64938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/run_tests/run_tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "26669"
},
{
"name": "C",
"bytes": "1460088"
},
{
"name": "C#",
"bytes": "1652475"
},
{
"name": "C++",
"bytes": "28950022"
},
{
"name": "CMake",
"bytes": "512326"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Go",
"bytes": "27069"
},
{
"name": "Java",
"bytes": "6907"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "M4",
"bytes": "44036"
},
{
"name": "Makefile",
"bytes": "1093815"
},
{
"name": "Objective-C",
"bytes": "274212"
},
{
"name": "Objective-C++",
"bytes": "22096"
},
{
"name": "PHP",
"bytes": "414581"
},
{
"name": "Python",
"bytes": "2263734"
},
{
"name": "Ruby",
"bytes": "820366"
},
{
"name": "Shell",
"bytes": "421626"
},
{
"name": "Swift",
"bytes": "3435"
}
],
"symlink_target": ""
}
|
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class RecommendationRequestJson(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'credential_id': 'int',
'credential_name': 'str',
'region': 'str',
'platform_variant': 'str',
'filters': 'dict(str, str)',
'availability_zone': 'str',
'blueprint_name': 'str',
'blueprint_id': 'int'
}
attribute_map = {
'credential_id': 'credentialId',
'credential_name': 'credentialName',
'region': 'region',
'platform_variant': 'platformVariant',
'filters': 'filters',
'availability_zone': 'availabilityZone',
'blueprint_name': 'blueprintName',
'blueprint_id': 'blueprintId'
}
def __init__(self, credential_id=None, credential_name=None, region=None, platform_variant=None, filters=None, availability_zone=None, blueprint_name=None, blueprint_id=None):
"""
RecommendationRequestJson - a model defined in Swagger
"""
self._credential_id = None
self._credential_name = None
self._region = None
self._platform_variant = None
self._filters = None
self._availability_zone = None
self._blueprint_name = None
self._blueprint_id = None
if credential_id is not None:
self.credential_id = credential_id
if credential_name is not None:
self.credential_name = credential_name
if region is not None:
self.region = region
if platform_variant is not None:
self.platform_variant = platform_variant
if filters is not None:
self.filters = filters
if availability_zone is not None:
self.availability_zone = availability_zone
if blueprint_name is not None:
self.blueprint_name = blueprint_name
if blueprint_id is not None:
self.blueprint_id = blueprint_id
@property
def credential_id(self):
"""
Gets the credential_id of this RecommendationRequestJson.
credential resource id for the request
:return: The credential_id of this RecommendationRequestJson.
:rtype: int
"""
return self._credential_id
@credential_id.setter
def credential_id(self, credential_id):
"""
Sets the credential_id of this RecommendationRequestJson.
credential resource id for the request
:param credential_id: The credential_id of this RecommendationRequestJson.
:type: int
"""
self._credential_id = credential_id
@property
def credential_name(self):
"""
Gets the credential_name of this RecommendationRequestJson.
credential resource name for the request
:return: The credential_name of this RecommendationRequestJson.
:rtype: str
"""
return self._credential_name
@credential_name.setter
def credential_name(self, credential_name):
"""
Sets the credential_name of this RecommendationRequestJson.
credential resource name for the request
:param credential_name: The credential_name of this RecommendationRequestJson.
:type: str
"""
self._credential_name = credential_name
@property
def region(self):
"""
Gets the region of this RecommendationRequestJson.
Related region
:return: The region of this RecommendationRequestJson.
:rtype: str
"""
return self._region
@region.setter
def region(self, region):
"""
Sets the region of this RecommendationRequestJson.
Related region
:param region: The region of this RecommendationRequestJson.
:type: str
"""
self._region = region
@property
def platform_variant(self):
"""
Gets the platform_variant of this RecommendationRequestJson.
cloud provider api variant
:return: The platform_variant of this RecommendationRequestJson.
:rtype: str
"""
return self._platform_variant
@platform_variant.setter
def platform_variant(self, platform_variant):
"""
Sets the platform_variant of this RecommendationRequestJson.
cloud provider api variant
:param platform_variant: The platform_variant of this RecommendationRequestJson.
:type: str
"""
self._platform_variant = platform_variant
@property
def filters(self):
"""
Gets the filters of this RecommendationRequestJson.
filter for resources
:return: The filters of this RecommendationRequestJson.
:rtype: dict(str, str)
"""
return self._filters
@filters.setter
def filters(self, filters):
"""
Sets the filters of this RecommendationRequestJson.
filter for resources
:param filters: The filters of this RecommendationRequestJson.
:type: dict(str, str)
"""
self._filters = filters
@property
def availability_zone(self):
"""
Gets the availability_zone of this RecommendationRequestJson.
related availability zone
:return: The availability_zone of this RecommendationRequestJson.
:rtype: str
"""
return self._availability_zone
@availability_zone.setter
def availability_zone(self, availability_zone):
"""
Sets the availability_zone of this RecommendationRequestJson.
related availability zone
:param availability_zone: The availability_zone of this RecommendationRequestJson.
:type: str
"""
self._availability_zone = availability_zone
@property
def blueprint_name(self):
"""
Gets the blueprint_name of this RecommendationRequestJson.
name that could indentify an existing blueprint
:return: The blueprint_name of this RecommendationRequestJson.
:rtype: str
"""
return self._blueprint_name
@blueprint_name.setter
def blueprint_name(self, blueprint_name):
"""
Sets the blueprint_name of this RecommendationRequestJson.
name that could indentify an existing blueprint
:param blueprint_name: The blueprint_name of this RecommendationRequestJson.
:type: str
"""
self._blueprint_name = blueprint_name
@property
def blueprint_id(self):
"""
Gets the blueprint_id of this RecommendationRequestJson.
id that could indentify an existing blueprint
:return: The blueprint_id of this RecommendationRequestJson.
:rtype: int
"""
return self._blueprint_id
@blueprint_id.setter
def blueprint_id(self, blueprint_id):
"""
Sets the blueprint_id of this RecommendationRequestJson.
id that could indentify an existing blueprint
:param blueprint_id: The blueprint_id of this RecommendationRequestJson.
:type: int
"""
self._blueprint_id = blueprint_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, RecommendationRequestJson):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "97da6675903882a33e1a587070b25d1c",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 984,
"avg_line_length": 31.70219435736677,
"alnum_prop": 0.6175220013843568,
"repo_name": "Chaffelson/whoville",
"id": "d060150f80c369c9d6f7a1abca764a09643bcb04",
"size": "10130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whoville/cloudbreak/models/recommendation_request_json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "6961"
},
{
"name": "HTML",
"bytes": "72038"
},
{
"name": "Python",
"bytes": "3729355"
},
{
"name": "Shell",
"bytes": "95963"
},
{
"name": "TSQL",
"bytes": "345"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from skbio.util import classproperty, overrides
from ._nucleotide_sequence import NucleotideSequence
from ._iupac_sequence import IUPACSequence
class RNA(NucleotideSequence):
"""Store RNA sequence data and optional associated metadata.
Only characters in the IUPAC RNA character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the RNA sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC RNA character set. If ``False``, validation
will not be performed. Turning off validation will improve runtime
performance. If invalid characters are present, however, there is
**no guarantee that operations performed on the resulting object will
work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
case_insenstive : bool, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC RNA characters.
Attributes
----------
values
metadata
positional_metadata
alphabet
gap_chars
nondegenerate_chars
degenerate_chars
degenerate_map
complement_map
See Also
--------
DNA
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import RNA
>>> RNA('ACCGAAU')
RNA
-----------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has non-degenerates: True
-----------------------------
0 ACCGAAU
Convert lowercase characters to uppercase:
>>> RNA('AcCGaaU', lowercase=True)
RNA
-----------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has non-degenerates: True
-----------------------------
0 ACCGAAU
"""
@classproperty
@overrides(NucleotideSequence)
def complement_map(cls):
comp_map = {
'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
@classproperty
@overrides(IUPACSequence)
def nondegenerate_chars(cls):
return set("ACGU")
@classproperty
@overrides(IUPACSequence)
def degenerate_map(cls):
return {
"R": set("AG"), "Y": set("CU"), "M": set("AC"), "K": set("UG"),
"W": set("AU"), "S": set("GC"), "B": set("CGU"), "D": set("AGU"),
"H": set("ACU"), "V": set("ACG"), "N": set("ACGU")
}
|
{
"content_hash": "59fabaf0f75952f8256a98596af101ea",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 79,
"avg_line_length": 31.5625,
"alnum_prop": 0.5864214992927864,
"repo_name": "Achuth17/scikit-bio",
"id": "8e914233c731b8819e1022872e18443b6a0d1384",
"size": "3889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skbio/sequence/_rna.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39087"
},
{
"name": "CSS",
"bytes": "4379"
},
{
"name": "Groff",
"bytes": "259"
},
{
"name": "Makefile",
"bytes": "585"
},
{
"name": "Python",
"bytes": "1852175"
}
],
"symlink_target": ""
}
|
from __future__ import division
from joint_dependency.simulation import (create_world, create_lockbox,
Controller,
ActionMachine)
from joint_dependency.recorder import Record
from joint_dependency.inference import (model_posterior, same_segment,
exp_cross_entropy, random_objective,
exp_neg_entropy, heuristic_proximity)
try:
from joint_dependency.ros_adapter import (RosActionMachine,
create_ros_lockbox)
except ImportError:
print("Disable ROS.")
from joint_dependency.utils import rand_max
try:
import bayesian_changepoint_detection.offline_changepoint_detection as bcd
except:
bcd = None
print("Disable Changepoint Detection")
from functools import partial
import datetime
try:
import dill as cPickle
except ImportError:
import pickle as cPickle
import multiprocessing
import multiprocessing.dummy
import argparse
import numpy as np
import pandas as pd
from scipy.stats import entropy
from progressbar import ProgressBar, Bar, Percentage
from blessings import Terminal
from copy import deepcopy
import time
term = Terminal()
class Writer(object):
"""Create an object with a write method that writes to a
specific place on the screen, defined at instantiation.
This is the glue between blessings and progressbar.
"""
def __init__(self, location):
"""
Input: location - tuple of ints (x, y), the position
of the bar in the terminal
"""
self.location = location
def write(self, string):
with term.location(*self.location):
print(string)
def generate_filename(metadata):
return "data_" + str(metadata["Date"]).replace(" ", "-")\
.replace("/", "-").replace(":", "-") + "_" + metadata['Objective'] + (".pkl")
def init(world):
P_cp = []
experiences = []
for i, joint in enumerate(world.joints):
P_cp.append(np.array([.1] * 360))
experiences.append([])
return P_cp, experiences
def compute_p_same(p_cp):
p_same = []
for pcp in p_cp:
p_same.append(same_segment(pcp))
return p_same
def get_best_point(objective_fnc, experiences, p_same, alpha_prior,
model_prior, N_samples, world, locked_states,
action_sampling_fnc,
idx_last_successes=[], idx_last_failures=[],
use_joint_positions=False):
actions = action_sampling_fnc(N_samples, world, locked_states)
action_values = []
for action in actions:
check_joint = np.random.randint(0, len(world.joints))
value = objective_fnc(experiences[check_joint],
action[1],
np.asarray(p_same),
alpha_prior,
model_prior[check_joint],
None,
idx_last_successes,
action[0],
idx_last_failures,
world,
use_joint_positions)
action_values.append((action[1], check_joint, action[0], value))
best_action = rand_max(action_values, lambda x: x[3])
return best_action
def small_joint_state_sampling(_, world, locked_states):
actions = []
for j, joint in enumerate(world.joints):
#if locked_states[j] == 0:
for _pos in (joint.min_limit, joint.max_limit):
pos = [joint.get_q() for joint in world.joints]
if abs(pos[j] - _pos) < 0.9:
continue
pos[j] = _pos
#TODO deepcopy needed?
actions.append((j, deepcopy(pos)))
return actions
def large_joint_state_sampling(N_samples, world, locked_states):
actions = []
for i in range(N_samples):
pos = np.ndarray((len(world.joints),))
for j, joint in enumerate(world.joints):
if locked_states[j] == 1:
pos[j] = int(joint.get_q())
else:
pos[j] = np.random.randint(joint.min_limit, joint.max_limit)
actions.append((j, deepcopy(pos)))
return actions
def large_joint_state_one_joint_moving_sampling(N_samples, world,
locked_state):
actions = []
for i in range(N_samples):
pos = np.asarray([int(joint.get_q()) for joint in world.joints])
joint_idx = np.random.choice(
np.where(np.asarray(locked_state) == 0)[0])
joint = world.joints[joint_idx]
pos[joint_idx] = np.random.randint(joint.min_limit, joint.max_limit)
actions.append((joint_idx, deepcopy(pos)))
#print((joint_idx, pos))
return actions
def get_probability_over_degree(P, qs):
probs = np.zeros((360,))
count = np.zeros((360,))
for i, pos in enumerate(qs[:-2]):
deg = int(pos)%360
probs[deg] += P[i]
count[deg] += 1
probs = probs/count
prior = 10e-8
probs = np.array([prior if np.isnan(p) else p for p in probs])
return probs, count
def update_p_cp(world, use_ros):
P_cp = []
pid = multiprocessing.current_process().pid
for j, joint in enumerate(world.joints):
if use_ros:
q = Record.records[pid]["q_" + str(j)].as_matrix()
af = Record.records[pid]["applied_force_" + str(j)][0:].as_matrix()
v = q[1:] - q[0:-1] # we can't measure the velocity directly
vn = v[:] + af[1:]
d = np.zeros((v.shape[0] + 1,))
d[1:] = abs((vn**2 - v[:]**2)/(0.1 * vn))
else:
v = Record.records[pid]["v_" + str(j)][0:].as_matrix()
af = Record.records[pid]["applied_force_" + str(j)][0:].as_matrix()
vn = v[:-1] + af[:-1]
d = np.zeros(v.shape)
d[1:] = abs((vn**2 - v[1:]**2)/(0.1 * vn))
nans, x = nan_helper(d)
d[nans] = np.interp(x(nans), x(~nans), d[~nans])
Q, P, Pcp = bcd.offline_changepoint_detection(
data=d,
prior_func=partial(bcd.const_prior, l=(len(d)+1)),
observation_log_likelihood_function=
bcd.gaussian_obs_log_likelihood,
truncate=-50)
p_cp, count = get_probability_over_degree(
np.exp(Pcp).sum(0)[:1],
Record.records[pid]['q_' + str(j)][-1:].as_matrix())
P_cp.append(p_cp)
return P_cp
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def calc_posteriors(world, experiences, P_same, alpha_prior, model_prior):
posteriors = []
for i, joint in enumerate(world.joints):
posteriors.append(model_posterior(experiences[i], np.asarray(P_same),
alpha_prior,
np.asarray(model_prior[i])))
return posteriors
def dependency_learning(N_actions, N_samples, world, objective_fnc,
use_change_points, alpha_prior, model_prior,
action_machine, location, action_sampling_fnc,
use_ros, use_joint_positions=False):
#writer = Writer(location)
widgets = [ Bar(), Percentage(),
" (Run #{}, PID {})".format(0,
multiprocessing.current_process().pid)]
progress = ProgressBar(maxval=N_actions+2, #fd=writer,
widgets=widgets).start()
progress.update(0)
# init phase
# initialize the probability distributions
P_cp, experiences = init(world)
# get locking state of all joints by actuating them once
jpos = np.array([int(j.get_q()) for j in world.joints])
locked_states = [None] * len(world.joints)
locked_states_before = [None] * len(world.joints)
if use_change_points:
for i, joint in enumerate(world.joints):
print(action_machine)
action_pos = np.array(jpos)
action_pos[i] = world.joints[i].max_limit
action_machine.run_action(action_pos)
action_pos[i] = world.joints[i].min_limit
action_machine.run_action(action_pos)
P_cp = update_p_cp(world, use_ros)
P_same = compute_p_same(P_cp)
else:
P_same = compute_p_same(P_cp)
# for j, joint in enumerate(world.joints):
# locked_states[j] = action_machine.check_state(j)
# add the experiences
# new_experience = {'data': jpos, 'value': locked_states[j]}
# experiences[j].append(new_experience)
# perform actions as long the entropy of all model distributions is still
# big
# while (np.array([entropy(p) for p in posteriors]) > .25).any():
data = pd.DataFrame()
progress.update(1)
metadata = {'ChangePointDetection': use_change_points,
'Date': datetime.datetime.now(),
'Objective': objective_fnc.__name__,
#'World': world,
'ModelPrior': model_prior,
'AlphaPrior': alpha_prior,
'P_cp': P_cp,
'P_same': P_same}
idx_last_successes = []
idx_last_failures = []
# store empty data frame so file is available
filename = generate_filename(metadata)
with open(filename, "w") as _file:
cPickle.dump((data, metadata), _file)
for idx in range(N_actions):
current_data = pd.DataFrame(index=[idx])
# get best action according to objective function
pos, checked_joint, moved_joint, value = \
get_best_point(objective_fnc,
experiences,
P_same,
alpha_prior,
model_prior,
N_samples,
world,
locked_states,
action_sampling_fnc,
idx_last_successes,
idx_last_failures,
use_joint_positions)
if moved_joint is None:
print("We finished the exploration")
print("This usually happens when you use the heuristic_proximity "
"that has as objective to estimate the dependency structure "
"and not to reduce the entropy")
break
for n, p in enumerate(pos):
current_data["DesiredPos" + str(n)] = [p]
current_data["CheckedJoint"] = [checked_joint]
# save the joint and locked states before the action
locked_states_before = [joint.is_locked()
for joint in world.joints]
jpos_before = np.array([int(j.get_q()) for j in world.joints])
action_outcome = True
if np.all(np.abs(pos - jpos_before) < .1):
# if we want a no-op don't actually call the robot
jpos = pos
else:
# run best action, i.e. move joints to desired position
action_outcome = action_machine.run_action(pos, moved_joint)
# get real position after action (PD-controllers aren't perfect)
jpos = np.array([int(j.get_q()) for j in world.joints])
for n, p in enumerate(jpos):
current_data["RealPos" + str(n)] = [p]
# save the locked states after the action
# test whether the joints are locked or not
locked_states = [joint.is_locked()
for joint in world.joints]
for n, p in enumerate(locked_states):
current_data["LockingState" + str(n)] = [p]
# if the locked states changed the action was successful, if not,
# it was a failure
# CORRECTION: it could be that a joint moves but it does not unlock a
# mechanism. Then it won't be a failure nor a success. We just do not
# add it no any list
if action_outcome:
idx_last_failures = []
idx_last_successes.append(moved_joint)
else:
idx_last_failures.append(moved_joint)
# add new experience
new_experience = {'data': jpos, 'value': locked_states[moved_joint]}
experiences[moved_joint].append(new_experience)
# calculate model posterior
posteriors = calc_posteriors(world, experiences, P_same, alpha_prior,
model_prior)
for n, p in enumerate(posteriors):
current_data["Posterior" + str(n)] = [p]
current_data["Entropy" + str(n)] = [entropy(p)]
data = data.append(current_data)
progress.update(idx+1)
filename = generate_filename(metadata)
with open(filename, "w") as _file:
cPickle.dump((data, metadata), _file)
progress.finish()
return data, metadata
def build_model_prior_simple(world, independent_prior):
n = len(world.joints)
# the model prior is proportional to 1/distance between the joints
model_prior = np.array([[0 if x == y
else independent_prior if x == n
else 1/abs(x-y)
for x in range(n+1)]
for y in range(n)])
# normalize
model_prior[:, :-1] = ((model_prior.T[:-1, :] /
np.sum(model_prior[:, :-1], 1)).T *
(1-independent_prior))
return model_prior
def build_model_prior_3d(world, independent_prior):
j = world.joints
n = len(j)
model_prior = np.array([[0 if x == y
else independent_prior
if x == n
else 1/np.linalg.norm(
np.asarray(j[x].position)-np.asarray(j[y].position)
)
for x in range(n+1)]
for y in range(n)])
# normalize
model_prior[:, :-1] = ((model_prior.T[:-1, :] /
np.sum(model_prior[:, :-1], 1)).T *
(1-independent_prior))
return model_prior
def run_experiment(args):
# reset all things for every new experiment
pid = multiprocessing.current_process().pid
seed = time.gmtime()
np.random.seed(seed)
if bcd:
bcd.offline_changepoint_detection.data = None
Record.records[pid] = pd.DataFrame()
if args.use_ros:
world = create_ros_lockbox()
action_machine = RosActionMachine(world)
else:
world = create_lockbox(
use_joint_positions=args.use_joint_positions,
use_simple_locking_state=args.use_simple_locking_state)
controllers = []
for j, _ in enumerate(world.joints):
controllers.append(Controller(world, j))
action_machine = ActionMachine(world, controllers, .1)
alpha_prior = np.array([.1, .1])
independent_prior = .7
# the model prior is proportional to 1/distance between the joints
#if args.use_joint_positions:
model_prior = build_model_prior_3d(world, independent_prior)
# else:
# model_prior = build_model_prior_simple(world, independent_prior)
# normalize
# model_prior[:, :-1] = ((model_prior.T[:-1, :] /
# np.sum(model_prior[:, :-1], 1)).T *
# (1-independent_prior))
if args.objective == "random":
objective = random_objective
elif args.objective == "entropy":
objective = exp_neg_entropy
elif args.objective == "cross_entropy":
objective = exp_cross_entropy
elif args.objective == "heuristic_proximity":
objective = heuristic_proximity
else:
raise Exception("You tried to choose an objective that doesn't exist: "+args.objective)
if args.joint_state == "small":
action_sampling_fnc = small_joint_state_sampling
elif args.joint_state == "large":
action_sampling_fnc = large_joint_state_one_joint_moving_sampling
else:
raise Exception("No proper action sampling function chosen.")
data, metadata = dependency_learning(
N_actions=args.queries,
N_samples=args.samples,
world=world,
objective_fnc=objective,
use_change_points=args.changepoint,
alpha_prior=alpha_prior,
model_prior=model_prior,
action_machine=action_machine,
location=None,
action_sampling_fnc=action_sampling_fnc,
use_ros=args.use_ros,
use_joint_positions=args.use_joint_positions)
metadata['Seed'] = seed
filename = generate_filename(metadata)
with open(filename, "wb") as _file:
cPickle.dump((data, metadata), _file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--objective", required=True,
help="The objective to optimize for exploration",
choices=['random', 'entropy', 'cross_entropy',
'heuristic_proximity'])
parser.add_argument("-c", "--changepoint", action='store_true',
help="Should change points used as prior")
parser.add_argument("-t", "--threads", type=int,
default=multiprocessing.cpu_count(),
help="Number of threads used")
parser.add_argument("-q", "--queries", type=int, default=20,
help="How many queries should the active learner make")
parser.add_argument("-s", "--samples", type=int, default=4000,
help="How many samples should be drawn for "
"optimization")
parser.add_argument("-r", "--runs", type=int, default=20,
help="Number of runs")
parser.add_argument("-p", "--prob-file", type=str, default=None,
help="The file with the probability distributions")
parser.add_argument("--use_ros", action='store_true',
help="Enable ROS/real robot usage.")
parser.add_argument("--joint_state", type=str, default='large',
help="Should we use a large or a small joint state "
"(large/small).")
parser.add_argument("--use_joint_positions", action='store_true',
help="Don't assume a linear sequence of joints but 3d "
"positions.")
parser.add_argument("--use_simple_locking_state", action='store_true',
help="Don't randomize the locking configuration, but "
"have joint limits lock other joints")
args = parser.parse_args()
print(term.clear)
run_experiment(args)
print(term.clear)
if __name__ == '__main__':
main()
|
{
"content_hash": "129fe976c2e32c32ef66f4c268b69c8a",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 95,
"avg_line_length": 35.92962962962963,
"alnum_prop": 0.5509225853004844,
"repo_name": "hildensia/joint_dependency",
"id": "e04ebf32b5bc5c9a97aee67944042cd2fef3f9fc",
"size": "19402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "joint_dependency/experiments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78895"
}
],
"symlink_target": ""
}
|
import text_stats
#Text from 'http://en.wikipedia.org/wiki/Electric_charge'
wiki_text = """ Electric charge is the physical property of matter that causes
it to experience a force when placed in an electromagnetic
field. There are two types of electric charges: positive and
negative. Positively charged substances are repelled from
other positively charged substances, but attracted to
negatively charged substances; negatively charged substances
are repelled from negative and attracted to positive. An
object is negatively charged if it has an excess of electrons,
and is otherwise positively charged or uncharged. The SI
derived unit of electric charge is the coulomb (C), although
in electrical engineering it is also common to use the
ampere-hour (Ah), and in chemistry it is common to use the
elementary charge (e) as a unit. The symbol Q is often used to
denote charge. The early knowledge of how charged substances
interact is now called classical electrodynamics, and is still
very accurate if quantum effects do not need to be considered.
The electric charge is a fundamental conserved property of
some subatomic particles, which determines their
electromagnetic interaction. Electrically charged matter is
influenced by, and produces, electromagnetic fields. The
interaction between a moving charge and an electromagnetic
field is the source of the electromagnetic force, which is one
of the four fundamental forces (See also: magnetic field).
Twentieth-century experiments demonstrated that electric
charge is quantized; that is, it comes in integer multiples
of individual small units called the elementary charge, e,
approximately equal to 1.602×10−19 coulombs (except for
particles called quarks, which have charges that are integer
multiples of e/3). The proton has a charge of +e, and the
electron has a charge of −e. The study of charged particles,
and how their interactions are mediated by photons, is called
quantum electrodynamics.
"""
#TODO add option for input parameters, maybe use file or url
def main():
word_stats = text_stats.text_stats()
word_stats.extract_stats(wiki_text)
word_stats.print_sorted_dictionary(word_stats.freq_table)
if __name__ == '__main__':
main()
|
{
"content_hash": "87afc59293ec7223637246df595bcd83",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 78,
"avg_line_length": 61.88636363636363,
"alnum_prop": 0.6496511200881381,
"repo_name": "convolu/string_stats",
"id": "f1635f8f1eca749098ed3851b411e2fcef8fcd81",
"size": "2728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11194"
}
],
"symlink_target": ""
}
|
"""
Django Feedme
Digest.py
Author: Derek Stegelman
"""
from __future__ import unicode_literals
from django.template import loader, Context
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import EmailMultiAlternatives
from .models import FeedItem
def send_digest():
"""
Send the daily digest for all users. This sends
both a .txt and .html version of the email.
:return:
"""
text_template = loader.get_template('feedme/mail/digest.txt')
html_template = loader.get_template('feedme/mail/digest.html')
subject = 'Daily FeedMe Digest'
for user in User.objects.all():
items = FeedItem.objects.my_feed_items(user).yesterday()
if items:
context_dict = {'items': items}
context = Context(context_dict)
from_email = getattr(settings, 'FEEDME_FROM_EMAIL', 'test@test.com')
text_content = text_template.render(context)
html_content = html_template.render(context)
msg = EmailMultiAlternatives(subject, text_content, from_email, [user.email])
msg.attach_alternative(html_content, "text/html")
msg.send()
|
{
"content_hash": "ae4877218b5c5cac65fa2a04d8fd220b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 89,
"avg_line_length": 32.4054054054054,
"alnum_prop": 0.6688907422852377,
"repo_name": "dstegelman/django-feedme",
"id": "b457dc2c1d10879ec864446b7d98592d73783ff2",
"size": "1199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feedme/digest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7050"
},
{
"name": "JavaScript",
"bytes": "643"
},
{
"name": "Python",
"bytes": "83577"
}
],
"symlink_target": ""
}
|
import codecs
import os
from setuptools import setup, find_packages
from debinterface import __version__
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as file_src:
return file_src.read()
REQUIREMENTS = []
VERSION = __version__
URL = 'https://github.com/nMustaki/debinterface'
setup(
name="debinterface",
version=VERSION,
description=("A simple Python library for dealing with "
"the /etc/network/interfaces file in most "
"Debian based distributions."),
long_description=read("README.rst"),
license="BSD",
maintainer='Nathan Mustaki',
maintainer_email='feydaykyn@gmail.com',
author="Douglas Greenbaum",
author_email="dggreenbaum@greenbad.org",
url=URL,
packages=find_packages(exclude=["test"]),
package_data={
'debinterface': ['py.typed'],
},
install_requires=REQUIREMENTS,
extras_require={
'dev': ['check-manifest', 'twine']
},
test_suite="test",
download_url='{0}/archive/v{1}.zip'.format(URL, VERSION),
keywords=['debian', 'network', 'system', 'configuration'],
classifiers=(
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: System :: Systems Administration',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
)
)
|
{
"content_hash": "584b7535aca4b63f6083b55b88014029",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 76,
"avg_line_length": 30.80952380952381,
"alnum_prop": 0.622359608449253,
"repo_name": "nMustaki/debinterface",
"id": "035db01be9d386ce2744ace665b6e2b68ea23d86",
"size": "1963",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "95832"
},
{
"name": "Shell",
"bytes": "461"
}
],
"symlink_target": ""
}
|
"""
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
from __future__ import annotations
from collections import abc
from typing import (
TYPE_CHECKING,
Any,
Hashable,
Sequence,
cast,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
)
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
dict_compat,
maybe_cast_to_datetime,
maybe_convert_platform,
maybe_infer_to_datetimelike,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_list_like,
is_named_tuple,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core import (
algorithms,
common as com,
)
from pandas.core.arrays import (
Categorical,
DatetimeArray,
ExtensionArray,
TimedeltaArray,
)
from pandas.core.construction import (
ensure_wrapped_if_datetimelike,
extract_array,
range_to_ndarray,
sanitize_array,
)
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
TimedeltaIndex,
default_index,
ensure_index,
get_objs_combined_axis,
union_indexes,
)
from pandas.core.internals.array_manager import (
ArrayManager,
SingleArrayManager,
)
from pandas.core.internals.blocks import (
BlockPlacement,
ensure_block_shape,
new_block_2d,
)
from pandas.core.internals.managers import (
BlockManager,
SingleBlockManager,
create_block_manager_from_blocks,
create_block_manager_from_column_arrays,
)
if TYPE_CHECKING:
from numpy.ma.mrecords import MaskedRecords
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(
arrays,
columns: Index,
index,
*,
dtype: DtypeObj | None = None,
verify_integrity: bool = True,
typ: str | None = None,
consolidate: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if verify_integrity:
# figure out the index, if necessary
if index is None:
index = _extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# _homogenize ensures
# - all(len(x) == len(index) for x in arrays)
# - all(x.ndim == 1 for x in arrays)
# - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays)
# - all(type(x) is not PandasArray for x in arrays)
else:
index = ensure_index(index)
arrays = [extract_array(x, extract_numpy=True) for x in arrays]
# Reached via DataFrame._from_arrays; we do validation here
for arr in arrays:
if (
not isinstance(arr, (np.ndarray, ExtensionArray))
or arr.ndim != 1
or len(arr) != len(index)
):
raise ValueError(
"Arrays must be 1-dimensional np.ndarray or ExtensionArray "
"with length matching len(index)"
)
columns = ensure_index(columns)
if len(columns) != len(arrays):
raise ValueError("len(arrays) must match len(columns)")
# from BlockManager perspective
axes = [columns, index]
if typ == "block":
return create_block_manager_from_column_arrays(
arrays, axes, consolidate=consolidate
)
elif typ == "array":
return ArrayManager(arrays, [index, columns])
else:
raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
def rec_array_to_mgr(
data: MaskedRecords | np.recarray | np.ndarray,
index,
columns,
dtype: DtypeObj | None,
copy: bool,
typ: str,
):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
else:
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
if isinstance(data, np.ma.MaskedArray):
# GH#42200 we only get here with MaskedRecords, but check for the
# parent class MaskedArray to avoid the need to import MaskedRecords
data = cast("MaskedRecords", data)
new_arrays = fill_masked_arrays(data, arr_columns)
else:
# error: Incompatible types in assignment (expression has type
# "List[ExtensionArray]", variable has type "List[ndarray]")
new_arrays = arrays # type: ignore[assignment]
# create the manager
# error: Argument 1 to "reorder_arrays" has incompatible type "List[ndarray]";
# expected "List[Union[ExtensionArray, ndarray]]"
arrays, arr_columns = reorder_arrays(
new_arrays, arr_columns, columns, len(index) # type: ignore[arg-type]
)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ)
if copy:
mgr = mgr.copy()
return mgr
def fill_masked_arrays(data: MaskedRecords, arr_columns: Index) -> list[np.ndarray]:
"""
Convert numpy MaskedRecords to ensure mask is softened.
"""
new_arrays = []
for col in arr_columns:
arr = data[col]
fv = arr.fill_value
mask = ma.getmaskarray(arr)
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
return new_arrays
def mgr_to_mgr(mgr, typ: str, copy: bool = True):
"""
Convert to specific type of Manager. Does not copy if the type is already
correct. Does not guarantee a copy otherwise. `copy` keyword only controls
whether conversion from Block->ArrayManager copies the 1D arrays.
"""
new_mgr: Manager
if typ == "block":
if isinstance(mgr, BlockManager):
new_mgr = mgr
else:
if mgr.ndim == 2:
new_mgr = arrays_to_mgr(
mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block"
)
else:
new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index)
elif typ == "array":
if isinstance(mgr, ArrayManager):
new_mgr = mgr
else:
if mgr.ndim == 2:
arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))]
if copy:
arrays = [arr.copy() for arr in arrays]
new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]])
else:
array = mgr.internal_values()
if copy:
array = array.copy()
new_mgr = SingleArrayManager([array], [mgr.index])
else:
raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
return new_mgr
# ---------------------------------------------------------------------
# DataFrame Constructor Interface
def ndarray_to_mgr(
values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str
) -> Manager:
# used in DataFrame.__init__
# input must be a ndarray, list, Series, Index, ExtensionArray
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = Index([values.name])
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
vdtype = getattr(values, "dtype", None)
if is_1d_only_ea_dtype(vdtype) or isinstance(dtype, ExtensionDtype):
# GH#19157
if isinstance(values, np.ndarray) and values.ndim > 1:
# GH#12513 a EA dtype passed with a 2D array, split into
# multiple EAs that view the values
values = [values[:, n] for n in range(values.shape[1])]
else:
values = [values]
if columns is None:
columns = Index(range(len(values)))
else:
columns = ensure_index(columns)
return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ)
elif is_extension_array_dtype(vdtype) and not is_1d_only_ea_dtype(vdtype):
# i.e. Datetime64TZ
values = extract_array(values, extract_numpy=True)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape(-1, 1)
else:
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None and not is_dtype_equal(values.dtype, dtype):
shape = values.shape
flat = values.ravel()
# GH#40110 see similar check inside sanitize_array
rcf = not (is_integer_dtype(dtype) and values.dtype.kind == "f")
values = sanitize_array(
flat, None, dtype=dtype, copy=copy, raise_cast_failure=rcf
)
values = values.reshape(shape)
# _prep_ndarray ensures that values.ndim == 2 at this point
index, columns = _get_axes(
values.shape[0], values.shape[1], index=index, columns=columns
)
_check_values_indices_shape_match(values, index, columns)
if typ == "array":
if issubclass(values.dtype.type, str):
values = np.array(values, dtype=object)
if dtype is None and is_object_dtype(values.dtype):
arrays = [
ensure_wrapped_if_datetimelike(
maybe_infer_to_datetimelike(values[:, i])
)
for i in range(values.shape[1])
]
else:
if is_datetime_or_timedelta_dtype(values.dtype):
values = ensure_wrapped_if_datetimelike(values)
arrays = [values[:, i] for i in range(values.shape[1])]
return ArrayManager(arrays, [index, columns], verify_integrity=False)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values.dtype):
obj_columns = list(values)
maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns]
# don't convert (and copy) the objects if no type inference occurs
if any(x is not y for x, y in zip(obj_columns, maybe_datetime)):
dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime]
block_values = [
new_block_2d(dvals_list[n], placement=BlockPlacement(n))
for n in range(len(dvals_list))
]
else:
bp = BlockPlacement(slice(len(columns)))
nb = new_block_2d(values, placement=bp)
block_values = [nb]
else:
bp = BlockPlacement(slice(len(columns)))
nb = new_block_2d(values, placement=bp)
block_values = [nb]
if len(columns) == 0:
block_values = []
return create_block_manager_from_blocks(
block_values, [columns, index], verify_integrity=False
)
def _check_values_indices_shape_match(
values: np.ndarray, index: Index, columns: Index
) -> None:
"""
Check that the shape implied by our axes matches the actual shape of the
data.
"""
if values.shape[1] != len(columns) or values.shape[0] != len(index):
# Could let this raise in Block constructor, but we get a more
# helpful exception message this way.
if values.shape[0] == 0:
raise ValueError("Empty data passed with indices specified.")
passed = values.shape
implied = (len(index), len(columns))
raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
def dict_to_mgr(
data: dict,
index,
columns,
*,
dtype: DtypeObj | None = None,
typ: str = "block",
copy: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
Used in DataFrame.__init__
"""
arrays: Sequence[Any] | Series
if columns is not None:
from pandas.core.series import Series
arrays = Series(data, index=columns, dtype=object)
missing = arrays.isna()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = _extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
nan_dtype: DtypeObj
if dtype is None or (
isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.flexible)
):
# GH#1783
nan_dtype = np.dtype("object")
else:
nan_dtype = dtype
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
arrays.loc[missing] = [val] * missing.sum()
arrays = list(arrays)
columns = ensure_index(columns)
else:
keys = list(data.keys())
columns = Index(keys)
arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
# GH#24096 need copy to be deep for datetime64tz case
# TODO: See if we can avoid these copies
arrays = [arr if not isinstance(arr, Index) else arr._data for arr in arrays]
arrays = [
arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays
]
if copy:
# arrays_to_mgr (via form_blocks) won't make copies for EAs
# dtype attr check to exclude EADtype-castable strs
arrays = [
x
if not hasattr(x, "dtype") or not isinstance(x.dtype, ExtensionDtype)
else x.copy()
for x in arrays
]
# TODO: can we get rid of the dt64tz special case above?
return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy)
def nested_data_to_arrays(
data: Sequence,
columns: Index | None,
index: Index | None,
dtype: DtypeObj | None,
) -> tuple[list[ArrayLike], Index, Index]:
"""
Convert a single sequence of arrays to multiple arrays.
"""
# By the time we get here we have already checked treat_as_nested(data)
if is_named_tuple(data[0]) and columns is None:
columns = ensure_index(data[0]._fields)
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
if index is None:
if isinstance(data[0], ABCSeries):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
# GH#38845 hit in test_constructor_categorical
index = default_index(len(data[0]))
else:
index = default_index(len(data))
return arrays, columns, index
def treat_as_nested(data) -> bool:
"""
Check if we should use nested_data_to_arrays.
"""
return (
len(data) > 0
and is_list_like(data[0])
and getattr(data[0], "ndim", 1) == 1
and not (isinstance(data, ExtensionArray) and data.ndim == 2)
)
# ---------------------------------------------------------------------
def _prep_ndarray(values, copy: bool = True) -> np.ndarray:
if isinstance(values, TimedeltaArray) or (
isinstance(values, DatetimeArray) and values.tz is None
):
# On older numpy, np.asarray below apparently does not call __array__,
# so nanoseconds get dropped.
values = values._ndarray
if not isinstance(values, (np.ndarray, ABCSeries, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
elif isinstance(values, range):
arr = range_to_ndarray(values)
return arr[..., np.newaxis]
def convert(v):
if not is_list_like(v) or isinstance(v, ABCDataFrame):
return v
v = extract_array(v, extract_numpy=True)
res = maybe_convert_platform(v)
return res
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
if is_list_like(values[0]):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861 see test_constructor_list_of_lists
values = np.array([convert(v) for v in values])
else:
values = convert(values)
else:
# drop subclass info
values = np.array(values, copy=copy)
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError(f"Must pass 2-d input. shape={values.shape}")
return values
def _homogenize(data, index: Index, dtype: DtypeObj | None) -> list[ArrayLike]:
oindex = None
homogenized = []
for val in data:
if isinstance(val, ABCSeries):
if dtype is not None:
val = val.astype(dtype, copy=False)
if val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index, copy=False)
val = val._values
else:
if isinstance(val, dict):
# GH#41785 this _should_ be equivalent to (but faster than)
# val = create_series_with_explicit_dtype(val, index=index)._values
if oindex is None:
oindex = index.astype("O")
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
# see test_constructor_dict_datetime64_index
val = dict_compat(val)
else:
# see test_constructor_subclass_dict
val = dict(val)
val = lib.fast_multiget(val, oindex._values, default=np.nan)
val = sanitize_array(
val, index, dtype=dtype, copy=False, raise_cast_failure=False
)
com.require_length_match(val, index)
homogenized.append(val)
return homogenized
def _extract_index(data) -> Index:
"""
Try to infer an Index from the passed data, raise ValueError on failure.
"""
index = None
if len(data) == 0:
index = Index([])
else:
raw_lengths = []
indexes: list[list[Hashable] | Index] = []
have_raw_arrays = False
have_series = False
have_dicts = False
for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(val))
elif isinstance(val, np.ndarray) and val.ndim > 1:
raise ValueError("Per-column arrays must each be 1-dimensional")
if not indexes and not raw_lengths:
raise ValueError("If using all scalar values, you must pass an index")
elif have_series:
index = union_indexes(indexes)
elif have_dicts:
index = union_indexes(indexes, sort=False)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError("All arrays must be of the same length")
if have_dicts:
raise ValueError(
"Mixing dicts with non-Series may lead to ambiguous ordering."
)
if have_series:
assert index is not None # for mypy
if lengths[0] != len(index):
msg = (
f"array length {lengths[0]} does not match index "
f"length {len(index)}"
)
raise ValueError(msg)
else:
index = default_index(lengths[0])
# error: Argument 1 to "ensure_index" has incompatible type "Optional[Index]";
# expected "Union[Union[Union[ExtensionArray, ndarray], Index, Series],
# Sequence[Any]]"
return ensure_index(index) # type: ignore[arg-type]
def reorder_arrays(
arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int
) -> tuple[list[ArrayLike], Index]:
"""
Pre-emptively (cheaply) reindex arrays with new columns.
"""
# reorder according to the columns
if columns is not None:
if not columns.equals(arr_columns):
# if they are equal, there is nothing to do
new_arrays: list[ArrayLike | None]
new_arrays = [None] * len(columns)
indexer = arr_columns.get_indexer(columns)
for i, k in enumerate(indexer):
if k == -1:
# by convention default is all-NaN object dtype
arr = np.empty(length, dtype=object)
arr.fill(np.nan)
else:
arr = arrays[k]
new_arrays[i] = arr
# Incompatible types in assignment (expression has type
# "List[Union[ExtensionArray, ndarray[Any, Any], None]]", variable
# has type "List[Union[ExtensionArray, ndarray[Any, Any]]]")
arrays = new_arrays # type: ignore[assignment]
arr_columns = columns
return arrays, arr_columns
def _get_names_from_index(data) -> Index:
has_some_name = any(getattr(s, "name", None) is not None for s in data)
if not has_some_name:
return default_index(len(data))
index: list[Hashable] = list(range(len(data)))
count = 0
for i, s in enumerate(data):
n = getattr(s, "name", None)
if n is not None:
index[i] = n
else:
index[i] = f"Unnamed {count}"
count += 1
return Index(index)
def _get_axes(
N: int, K: int, index: Index | None, columns: Index | None
) -> tuple[Index, Index]:
# helper to create the axes as indexes
# return axes or defaults
if index is None:
index = default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = default_index(K)
else:
columns = ensure_index(columns)
return index, columns
def dataclasses_to_dicts(data):
"""
Converts a list of dataclass instances to a list of dictionaries.
Parameters
----------
data : List[Type[dataclass]]
Returns
--------
list_dict : List[dict]
Examples
--------
>>> from dataclasses import dataclass
>>> @dataclass
... class Point:
... x: int
... y: int
>>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)])
[{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]
"""
from dataclasses import asdict
return list(map(asdict, data))
# ---------------------------------------------------------------------
# Conversion of Inputs to Arrays
def to_arrays(
data, columns: Index | None, dtype: DtypeObj | None = None
) -> tuple[list[ArrayLike], Index]:
"""
Return list of arrays, columns.
Returns
-------
list[ArrayLike]
These will become columns in a DataFrame.
Index
This will become frame.columns.
Notes
-----
Ensures that len(result_arrays) == len(result_index).
"""
if isinstance(data, ABCDataFrame):
# see test_from_records_with_index_data, test_from_records_bad_index_column
if columns is not None:
arrays = [
data._ixs(i, axis=1).values
for i, col in enumerate(data.columns)
if col in columns
]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
if data.dtype.names is not None:
# i.e. numpy structured array
columns = ensure_index(data.dtype.names)
arrays = [data[name] for name in columns]
if len(data) == 0:
# GH#42456 the indexing above results in list of 2D ndarrays
# TODO: is that an issue with numpy?
for i, arr in enumerate(arrays):
if arr.ndim == 2:
arrays[i] = arr[:, 0]
return arrays, columns
return [], ensure_index([])
elif isinstance(data[0], Categorical):
# GH#38845 deprecate special case
warnings.warn(
"The behavior of DataFrame([categorical, ...]) is deprecated and "
"in a future version will be changed to match the behavior of "
"DataFrame([any_listlike, ...]). "
"To retain the old behavior, pass as a dictionary "
"DataFrame({col: categorical, ..})",
FutureWarning,
stacklevel=4,
)
if columns is None:
columns = default_index(len(data))
elif len(columns) > len(data):
raise ValueError("len(columns) > len(data)")
elif len(columns) < len(data):
# doing this here is akin to a pre-emptive reindex
data = data[: len(columns)]
return data, columns
elif isinstance(data, np.ndarray) and data.dtype.names is not None:
# e.g. recarray
columns = Index(list(data.dtype.names))
arrays = [data[k] for k in columns]
return arrays, columns
if isinstance(data[0], (list, tuple)):
arr = _list_to_arrays(data)
elif isinstance(data[0], abc.Mapping):
arr, columns = _list_of_dict_to_arrays(data, columns)
elif isinstance(data[0], ABCSeries):
arr, columns = _list_of_series_to_arrays(data, columns)
else:
# last ditch effort
data = [tuple(x) for x in data]
arr = _list_to_arrays(data)
content, columns = _finalize_columns_and_data(arr, columns, dtype)
return content, columns
def _list_to_arrays(data: list[tuple | list]) -> np.ndarray:
# Returned np.ndarray has ndim = 2
# Note: we already check len(data) > 0 before getting hre
if isinstance(data[0], tuple):
content = lib.to_object_array_tuples(data)
else:
# list of lists
content = lib.to_object_array(data)
return content
def _list_of_series_to_arrays(
data: list,
columns: Index | None,
) -> tuple[np.ndarray, Index]:
# returned np.ndarray has ndim == 2
if columns is None:
# We know pass_data is non-empty because data[0] is a Series
pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]
columns = get_objs_combined_axis(pass_data, sort=False)
indexer_cache: dict[int, np.ndarray] = {}
aligned_values = []
for s in data:
index = getattr(s, "index", None)
if index is None:
index = default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = extract_array(s, extract_numpy=True)
aligned_values.append(algorithms.take_nd(values, indexer))
# error: Argument 1 to "vstack" has incompatible type "List[ExtensionArray]";
# expected "Sequence[Union[Union[int, float, complex, str, bytes, generic],
# Sequence[Union[int, float, complex, str, bytes, generic]],
# Sequence[Sequence[Any]], _SupportsArray]]"
content = np.vstack(aligned_values) # type: ignore[arg-type]
return content, columns
def _list_of_dict_to_arrays(
data: list[dict],
columns: Index | None,
) -> tuple[np.ndarray, Index]:
"""
Convert list of dicts to numpy arrays
if `columns` is not passed, column names are inferred from the records
- for OrderedDict and dicts, the column names match
the key insertion-order from the first record to the last.
- For other kinds of dict-likes, the keys are lexically sorted.
Parameters
----------
data : iterable
collection of records (OrderedDict, dict)
columns: iterables or None
Returns
-------
content : np.ndarray[object, ndim=2]
columns : Index
"""
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, dict) for d in data)
pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort)
columns = ensure_index(pre_cols)
# assure that they are of the base dict class and not of derived
# classes
data = [d if type(d) is dict else dict(d) for d in data]
content = lib.dicts_to_array(data, list(columns))
return content, columns
def _finalize_columns_and_data(
content: np.ndarray, # ndim == 2
columns: Index | None,
dtype: DtypeObj | None,
) -> tuple[list[ArrayLike], Index]:
"""
Ensure we have valid columns, cast object dtypes if possible.
"""
contents = list(content.T)
try:
columns = _validate_or_indexify_columns(contents, columns)
except AssertionError as err:
# GH#26429 do not raise user-facing AssertionError
raise ValueError(err) from err
if len(contents) and contents[0].dtype == np.object_:
contents = _convert_object_array(contents, dtype=dtype)
return contents, columns
def _validate_or_indexify_columns(
content: list[np.ndarray], columns: Index | None
) -> Index:
"""
If columns is None, make numbers as column names; Otherwise, validate that
columns have valid length.
Parameters
----------
content : list of np.ndarrays
columns : Index or None
Returns
-------
Index
If columns is None, assign positional column index value as columns.
Raises
------
1. AssertionError when content is not composed of list of lists, and if
length of columns is not equal to length of content.
2. ValueError when content is list of lists, but length of each sub-list
is not equal
3. ValueError when content is list of lists, but length of sub-list is
not equal to length of content
"""
if columns is None:
columns = default_index(len(content))
else:
# Add mask for data which is composed of list of lists
is_mi_list = isinstance(columns, list) and all(
isinstance(col, list) for col in columns
)
if not is_mi_list and len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError(
f"{len(columns)} columns passed, passed data had "
f"{len(content)} columns"
)
elif is_mi_list:
# check if nested list column, length of each sub-list should be equal
if len({len(col) for col in columns}) > 1:
raise ValueError(
"Length of columns passed for MultiIndex columns is different"
)
# if columns is not empty and length of sublist is not equal to content
elif columns and len(columns[0]) != len(content):
raise ValueError(
f"{len(columns[0])} columns passed, passed data had "
f"{len(content)} columns"
)
return columns
def _convert_object_array(
content: list[np.ndarray], dtype: DtypeObj | None
) -> list[ArrayLike]:
"""
Internal function to convert object array.
Parameters
----------
content: List[np.ndarray]
dtype: np.dtype or ExtensionDtype
Returns
-------
List[ArrayLike]
"""
# provide soft conversion of object dtypes
def convert(arr):
if dtype != np.dtype("O"):
arr = lib.maybe_convert_objects(arr)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays
|
{
"content_hash": "fd43a40c63b3eb2f898a1bd08454f66c",
"timestamp": "",
"source": "github",
"line_count": 1053,
"max_line_length": 88,
"avg_line_length": 31.440645773979107,
"alnum_prop": 0.5808439302866464,
"repo_name": "jorisvandenbossche/pandas",
"id": "159c20382dcfb665d1b3fa29f54f1d5aff9f2bbb",
"size": "33107",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/core/internals/construction.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C",
"bytes": "360342"
},
{
"name": "CSS",
"bytes": "1438"
},
{
"name": "Cython",
"bytes": "1083849"
},
{
"name": "Dockerfile",
"bytes": "1690"
},
{
"name": "HTML",
"bytes": "456275"
},
{
"name": "Makefile",
"bytes": "507"
},
{
"name": "Python",
"bytes": "17541583"
},
{
"name": "Shell",
"bytes": "10719"
},
{
"name": "Smarty",
"bytes": "7820"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
gettext = lambda s: s
USERENA_SIGNIN_AFTER_SIGNUP = getattr(settings,
'USERENA_SIGNIN_AFTER_SIGNUP',
False)
USERENA_REDIRECT_ON_SIGNOUT = getattr(settings,
'USERENA_REDIRECT_ON_SIGNOUT',
None)
USERENA_SIGNIN_REDIRECT_BASE = getattr(settings,
'USERENA_SIGNIN_REDIRECT_BASE', '/')
USERENA_SIGNIN_REDIRECT_URL = getattr(settings,
'USERENA_SIGNIN_REDIRECT_URL',
'/accounts/%(username)s/')
USERENA_ACTIVATION_REQUIRED = getattr(settings,
'USERENA_ACTIVATION_REQUIRED',
True)
USERENA_ADMIN_MODERATION = getattr(settings,
'USERENA_ADMIN_MODERATION',
False)
USERENA_ACTIVATION_DAYS = getattr(settings,
'USERENA_ACTIVATION_DAYS',
7)
USERENA_ACTIVATION_NOTIFY = getattr(settings,
'USERENA_ACTIVATION_NOTIFY',
True)
USERENA_ACTIVATION_NOTIFY_DAYS = getattr(settings,
'USERENA_ACTIVATION_NOTIFY_DAYS',
5)
USERENA_ACTIVATION_REJECTED = getattr(settings,
'USERENA_ACTIVATION_REJECTED',
'ACTIVATION_REJECTED')
USERENA_ACTIVATED = getattr(settings,
'USERENA_ACTIVATED',
'ALREADY_ACTIVATED')
USERENA_REMEMBER_ME_DAYS = getattr(settings,
'USERENA_REMEMBER_ME_DAYS',
(gettext('a month'), 30))
USERENA_FORBIDDEN_USERNAMES = getattr(settings,
'USERENA_FORBIDDEN_USERNAMES',
('signup', 'signout', 'signin',
'activate', 'me', 'password'))
DEFAULT_USERENA_USE_HTTPS = False
# NOTE: It is only for internal use. All those settings should be refactored to only defaults
# as specified in #452
_USERENA_USE_HTTPS = getattr(settings, 'USERENA_USE_HTTPS', DEFAULT_USERENA_USE_HTTPS)
USERENA_MUGSHOT_GRAVATAR = getattr(settings,
'USERENA_MUGSHOT_GRAVATAR',
True)
USERENA_MUGSHOT_GRAVATAR_SECURE = getattr(settings,
'USERENA_MUGSHOT_GRAVATAR_SECURE',
_USERENA_USE_HTTPS)
USERENA_MUGSHOT_DEFAULT = getattr(settings,
'USERENA_MUGSHOT_DEFAULT',
'identicon')
USERENA_MUGSHOT_SIZE = getattr(settings,
'USERENA_MUGSHOT_SIZE',
80)
USERENA_MUGSHOT_CROP_TYPE = getattr(settings,
'USERENA_MUGSHOT_CROP_TYPE',
'smart')
USERENA_MUGSHOT_PATH = getattr(settings,
'USERENA_MUGSHOT_PATH',
'mugshots/')
USERENA_DEFAULT_PRIVACY = getattr(settings,
'USERENA_DEFAULT_PRIVACY',
'registered')
USERENA_DISABLE_PROFILE_LIST = getattr(settings,
'USERENA_DISABLE_PROFILE_LIST',
False)
USERENA_DISABLE_SIGNUP = getattr(settings,
'USERENA_DISABLE_SIGNUP',
False)
USERENA_USE_MESSAGES = getattr(settings,
'USERENA_USE_MESSAGES',
True)
USERENA_LANGUAGE_FIELD = getattr(settings,
'USERENA_LANGUAGE_FIELD',
'language')
USERENA_WITHOUT_USERNAMES = getattr(settings,
'USERENA_WITHOUT_USERNAMES',
False)
USERENA_MODERATE_REGISTRATION = getattr(settings,
'USERENA_MODERATE_REGISTRATION',
True)
USERENA_PROFILE_DETAIL_TEMPLATE = getattr(
settings, 'USERENA_PROFILE_DETAIL_TEMPLATE', 'userena/profile_detail.html')
USERENA_PROFILE_LIST_TEMPLATE = getattr(
settings, 'USERENA_PROFILE_LIST_TEMPLATE', 'userena/profile_list.html')
USERENA_HIDE_EMAIL = getattr(settings, 'USERENA_HIDE_EMAIL', False)
USERENA_PENDING_MODERATION = getattr(settings, 'USERENA_PENDING_MODERATION', 'PENDING_MODERATION')
USERENA_HTML_EMAIL = getattr(settings, 'USERENA_HTML_EMAIL', False)
USERENA_USE_PLAIN_TEMPLATE = getattr(settings, 'USERENA_USE_PLAIN_TEMPLATE', not USERENA_HTML_EMAIL)
USERENA_REGISTER_PROFILE = getattr(settings, 'USERENA_REGISTER_PROFILE', True)
USERENA_REGISTER_USER = getattr(settings, 'USERENA_REGISTER_USER', True)
if hasattr(settings, 'ANONYMOUS_USER_ID'):
raise ImproperlyConfigured('settings.ANONYMOUS_USER_ID is deprecated for settings.ANONYMOUS_USER_NAME. See https://django-guardian.readthedocs.io/en/stable/configuration.html')
try:
if settings.ANONYMOUS_USER_NAME == None:
raise ImproperlyConfigured('settings.ANONYMOUS_USER_NAME must not be None.')
except AttributeError:
raise ImproperlyConfigured('ANONYMOUS_USER_NAME must be set in settings. See https://django-guardian.readthedocs.io/en/stable/configuration.html')
|
{
"content_hash": "126224bd64adc9a83bc8a9faa47f86eb",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 180,
"avg_line_length": 41.927536231884055,
"alnum_prop": 0.5243691669547182,
"repo_name": "bioinformatics-ua/django-userena",
"id": "49db7f440190db0a7f465fabeb8848cd45f2dc35",
"size": "5881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userena/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "21556"
},
{
"name": "Python",
"bytes": "246254"
},
{
"name": "Shell",
"bytes": "25"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
from os.path import exists
from twisted.python import log, failure
from twisted.trial import unittest
from twisted.test import proto_helpers
from twisted.internet import defer, error
from txtorcon import TorControlProtocol, TorProtocolFactory, TorState
from txtorcon import ITorControlProtocol
from txtorcon.torcontrolprotocol import parse_keywords, DEFAULT_VALUE
from txtorcon.util import hmac_sha256
import types
import functools
import tempfile
import base64
class CallbackChecker:
def __init__(self, expected):
self.expected_value = expected
self.called_back = False
def __call__(self, *args, **kwargs):
v = args[0]
if v != self.expected_value:
print "WRONG"
raise RuntimeError(
'Expected "%s" but got "%s"' % (self.expected_value, v)
)
self.called_back = True
return v
class InterfaceTests(unittest.TestCase):
def test_implements(self):
self.assertTrue(ITorControlProtocol.implementedBy(TorControlProtocol))
def test_object_implements(self):
self.assertTrue(ITorControlProtocol.providedBy(TorControlProtocol()))
class LogicTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.protocol.connectionMade = lambda: None
self.transport = proto_helpers.StringTransport()
self.protocol.makeConnection(self.transport)
def test_set_conf_wrong_args(self):
ctl = TorControlProtocol()
d = ctl.set_conf('a')
self.assertTrue(d.called)
self.assertTrue(d.result)
self.assertTrue('even number' in d.result.getErrorMessage())
# ignore the error so trial doesn't get unhappy
d.addErrback(lambda foo: True)
return d
class FactoryTests(unittest.TestCase):
def test_create(self):
TorProtocolFactory().buildProtocol(None)
class AuthenticationTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.transport = proto_helpers.StringTransport()
def send(self, line):
self.protocol.dataReceived(line.strip() + "\r\n")
def test_authenticate_cookie(self):
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), 'PROTOCOLINFO 1\r\n')
self.transport.clear()
cookie_data = 'cookiedata!cookiedata!cookiedata'
with open('authcookie', 'w') as f:
f.write(cookie_data)
self.send('250-PROTOCOLINFO 1')
self.send('250-AUTH METHODS=COOKIE,HASHEDPASSWORD COOKIEFILE="authcookie"')
self.send('250-VERSION Tor="0.2.2.34"')
self.send('250 OK')
self.assertEqual(
self.transport.value(),
'AUTHENTICATE %s\r\n' % cookie_data.encode("hex")
)
def test_authenticate_password(self):
self.protocol.password_function = lambda: 'foo'
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), 'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send('250-PROTOCOLINFO 1')
self.send('250-AUTH METHODS=HASHEDPASSWORD')
self.send('250-VERSION Tor="0.2.2.34"')
self.send('250 OK')
self.assertEqual(self.transport.value(), 'AUTHENTICATE %s\r\n' % "foo".encode("hex"))
def test_authenticate_password_deferred(self):
d = defer.Deferred()
self.protocol.password_function = lambda: d
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), 'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send('250-PROTOCOLINFO 1')
self.send('250-AUTH METHODS=HASHEDPASSWORD')
self.send('250-VERSION Tor="0.2.2.34"')
self.send('250 OK')
# make sure we haven't tried to authenticate before getting
# the password callback
self.assertEqual(self.transport.value(), '')
d.callback('foo')
# now make sure we DID try to authenticate
self.assertEqual(
self.transport.value(),
'AUTHENTICATE %s\r\n' % "foo".encode("hex")
)
def test_authenticate_password_deferred_but_no_password(self):
d = defer.Deferred()
self.protocol.password_function = lambda: d
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), 'PROTOCOLINFO 1\r\n')
self.transport.clear()
self.send('250-PROTOCOLINFO 1')
self.send('250-AUTH METHODS=HASHEDPASSWORD')
self.send('250-VERSION Tor="0.2.2.34"')
self.send('250 OK')
d.callback(None)
return self.assertFailure(self.protocol.post_bootstrap, RuntimeError)
def confirmAuthFailed(self, *args):
self.auth_failed = True
def test_authenticate_no_password(self):
self.protocol.post_bootstrap.addErrback(self.confirmAuthFailed)
self.auth_failed = False
self.protocol.makeConnection(self.transport)
self.assertEqual(self.transport.value(), 'PROTOCOLINFO 1\r\n')
self.send('250-PROTOCOLINFO 1')
self.send('250-AUTH METHODS=HASHEDPASSWORD')
self.send('250-VERSION Tor="0.2.2.34"')
self.send('250 OK')
self.assertTrue(self.auth_failed)
class DisconnectionTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.protocol.connectionMade = lambda: None
self.transport = proto_helpers.StringTransportWithDisconnection()
self.protocol.makeConnection(self.transport)
# why doesn't makeConnection do this?
self.transport.protocol = self.protocol
def tearDown(self):
self.protocol = None
def test_disconnect_callback(self):
"""
see that we get our callback on_disconnect if the transport
goes away
"""
def it_was_called(*args):
it_was_called.yes = True
return None
it_was_called.yes = False
self.protocol.on_disconnect.addCallback(it_was_called)
self.protocol.on_disconnect.addErrback(it_was_called)
f = failure.Failure(error.ConnectionDone("It's all over"))
self.protocol.connectionLost(f)
self.assertTrue(it_was_called.yes)
def test_disconnect_errback(self):
"""
see that we get our callback on_disconnect if the transport
goes away
"""
def it_was_called(*args):
it_was_called.yes = True
return None
it_was_called.yes = False
self.protocol.on_disconnect.addCallback(it_was_called)
self.protocol.on_disconnect.addErrback(it_was_called)
f = failure.Failure(RuntimeError("The thing didn't do the stuff."))
self.protocol.connectionLost(f)
self.assertTrue(it_was_called.yes)
class ProtocolTests(unittest.TestCase):
def setUp(self):
self.protocol = TorControlProtocol()
self.protocol.connectionMade = lambda: None
self.transport = proto_helpers.StringTransport()
self.protocol.makeConnection(self.transport)
def tearDown(self):
self.protocol = None
def send(self, line):
self.protocol.dataReceived(line.strip() + "\r\n")
def test_statemachine_broadcast_no_code(self):
try:
self.protocol._broadcast_response("foo")
self.fail()
except RuntimeError, e:
self.assertTrue('No code set yet' in str(e))
def test_statemachine_broadcast_unknown_code(self):
try:
self.protocol.code = 999
self.protocol._broadcast_response("foo")
self.fail()
except RuntimeError, e:
self.assertTrue('Unknown code' in str(e))
def test_statemachine_is_finish(self):
self.assertTrue(not self.protocol._is_finish_line(''))
self.assertTrue(self.protocol._is_finish_line('.'))
self.assertTrue(self.protocol._is_finish_line('300 '))
self.assertTrue(not self.protocol._is_finish_line('250-'))
def test_statemachine_singleline(self):
self.assertTrue(not self.protocol._is_single_line_response('foo'))
def test_statemachine_continuation(self):
try:
self.protocol.code = 250
self.protocol._is_continuation_line("123 ")
self.fail()
except RuntimeError, e:
self.assertTrue('Unexpected code' in str(e))
def test_statemachine_multiline(self):
try:
self.protocol.code = 250
self.protocol._is_multi_line("123 ")
self.fail()
except RuntimeError, e:
self.assertTrue('Unexpected code' in str(e))
def auth_failed(self, msg):
self.assertEqual(str(msg.value), '551 go away')
self.got_auth_failed = True
def test_authenticate_fail(self):
self.got_auth_failed = False
self.protocol._auth_failed = self.auth_failed
self.protocol.password_function = lambda: 'foo'
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=HASHEDPASSWORD
VERSION Tor="0.2.2.35"
OK''')
self.send('551 go away\r\n')
self.assertTrue(self.got_auth_failed)
def test_authenticate_no_auth_line(self):
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
FOOAUTH METHODS=COOKIE,SAFECOOKIE COOKIEFILE="/dev/null"
VERSION Tor="0.2.2.35"
OK''')
self.assertTrue(False)
except RuntimeError, e:
self.assertTrue('find AUTH line' in str(e))
def test_authenticate_not_enough_cookie_data(self):
with tempfile.NamedTemporaryFile() as cookietmp:
cookietmp.write('x' * 35) # too much data
cookietmp.flush()
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=COOKIE COOKIEFILE="%s"
VERSION Tor="0.2.2.35"
OK''' % cookietmp.name)
self.assertTrue(False)
except RuntimeError, e:
self.assertTrue('cookie to be 32' in str(e))
def test_authenticate_not_enough_safecookie_data(self):
with tempfile.NamedTemporaryFile() as cookietmp:
cookietmp.write('x' * 35) # too much data
cookietmp.flush()
try:
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=SAFECOOKIE COOKIEFILE="%s"
VERSION Tor="0.2.2.35"
OK''' % cookietmp.name)
self.assertTrue(False)
except RuntimeError, e:
self.assertTrue('cookie to be 32' in str(e))
def test_authenticate_safecookie(self):
with tempfile.NamedTemporaryFile() as cookietmp:
cookiedata = str(bytearray([0] * 32))
cookietmp.write(cookiedata)
cookietmp.flush()
self.protocol._do_authenticate('''PROTOCOLINFO 1
AUTH METHODS=SAFECOOKIE COOKIEFILE="%s"
VERSION Tor="0.2.2.35"
OK''' % cookietmp.name)
self.assertTrue(
'AUTHCHALLENGE SAFECOOKIE ' in self.transport.value()
)
client_nonce = base64.b16decode(self.transport.value().split()[-1])
self.transport.clear()
server_nonce = str(bytearray([0] * 32))
server_hash = hmac_sha256(
"Tor safe cookie authentication server-to-controller hash",
cookiedata + client_nonce + server_nonce
)
self.send(
'250 AUTHCHALLENGE SERVERHASH=%s SERVERNONCE=%s' %
(base64.b16encode(server_hash), base64.b16encode(server_nonce))
)
self.assertTrue('AUTHENTICATE ' in self.transport.value())
def test_authenticate_safecookie_wrong_hash(self):
cookiedata = str(bytearray([0] * 32))
server_nonce = str(bytearray([0] * 32))
server_hash = str(bytearray([0] * 32))
# pretend we already did PROTOCOLINFO and read the cookie
# file
self.protocol.cookie_data = cookiedata
self.protocol.client_nonce = server_nonce # all 0's anyway
try:
self.protocol._safecookie_authchallenge(
'250 AUTHCHALLENGE SERVERHASH=%s SERVERNONCE=%s' %
(base64.b16encode(server_hash), base64.b16encode(server_nonce))
)
self.assertTrue(False)
except RuntimeError, e:
self.assertTrue('hash not expected' in str(e))
def confirm_version_events(self, arg):
self.assertEqual(self.protocol.version, 'foo')
events = 'GUARD STREAM CIRC NS NEWCONSENSUS ORCONN NEWDESC ADDRMAP STATUS_GENERAL'.split()
self.assertEqual(len(self.protocol.valid_events), len(events))
self.assertTrue(all(x in self.protocol.valid_events for x in events))
def test_bootstrap_callback(self):
d = self.protocol.post_bootstrap
d.addCallback(CallbackChecker(self.protocol))
d.addCallback(self.confirm_version_events)
events = 'GUARD STREAM CIRC NS NEWCONSENSUS ORCONN NEWDESC ADDRMAP STATUS_GENERAL'
self.protocol._bootstrap()
# answer all the requests generated by boostrapping etc.
self.send("250-signal/names=")
self.send("250 OK")
self.send("250-version=foo")
self.send("250 OK")
self.send("250-events/names=" + events)
self.send("250 OK")
self.send("250 OK") # for USEFEATURE
return d
def test_bootstrap_tor_does_not_support_signal_names(self):
self.protocol._bootstrap()
self.send('552 Unrecognized key "signal/names"')
valid_signals = ["RELOAD", "DUMP", "DEBUG", "NEWNYM", "CLEARDNSCACHE"]
self.assertEqual(self.protocol.valid_signals, valid_signals)
def test_async(self):
"""
test the example from control-spec.txt to see that we
handle interleaved async notifications properly.
"""
self.protocol._set_valid_events('CIRC')
self.protocol.add_event_listener('CIRC', lambda _: None)
self.send("250 OK")
d = self.protocol.get_conf("SOCKSPORT ORPORT")
self.send("650 CIRC 1000 EXTENDED moria1,moria2")
self.send("250-SOCKSPORT=9050")
self.send("250 ORPORT=0")
return d
def test_async_multiline(self):
# same as above, but i think the 650's can be multline,
# too. Like:
# 650-CIRC 1000 EXTENDED moria1,moria2 0xBEEF
# 650-EXTRAMAGIC=99
# 650 ANONYMITY=high
self.protocol._set_valid_events('CIRC')
self.protocol.add_event_listener(
'CIRC',
CallbackChecker(
"1000 EXTENDED moria1,moria2\nEXTRAMAGIC=99\nANONYMITY=high"
)
)
self.send("250 OK")
d = self.protocol.get_conf("SOCKSPORT ORPORT")
d.addCallback(CallbackChecker({"ORPORT": "0", "SOCKSPORT": "9050"}))
self.send("650-CIRC 1000 EXTENDED moria1,moria2")
self.send("650-EXTRAMAGIC=99")
self.send("650 ANONYMITY=high")
self.send("250-SOCKSPORT=9050")
self.send("250 ORPORT=0")
return d
def test_multiline_plus(self):
"""
"""
d = self.protocol.get_info("FOO")
d.addCallback(CallbackChecker({"FOO": "\na\nb\nc"}))
self.send("250+FOO=")
self.send("a")
self.send("b")
self.send("c")
self.send(".")
self.send("250 OK")
return d
def incremental_check(self, expected, actual):
if '=' in actual:
return
self.assertEqual(expected, actual)
def test_getinfo_incremental(self):
d = self.protocol.get_info_incremental(
"FOO",
functools.partial(self.incremental_check, "bar")
)
self.send("250+FOO=")
self.send("bar")
self.send("bar")
self.send(".")
self.send("250 OK")
return d
def test_getinfo_incremental_continuation(self):
d = self.protocol.get_info_incremental(
"FOO",
functools.partial(self.incremental_check, "bar")
)
self.send("250-FOO=")
self.send("250-bar")
self.send("250-bar")
self.send("250 OK")
return d
def test_getinfo_one_line(self):
d = self.protocol.get_info(
"foo",
functools.partial(self.incremental_check, "bar")
)
self.send('250 foo=bar')
return d
def test_getconf(self):
d = self.protocol.get_conf("SOCKSPORT ORPORT")
d.addCallback(CallbackChecker({'SocksPort': '9050', 'ORPort': '0'}))
self.send("250-SocksPort=9050")
self.send("250 ORPort=0")
return d
def test_getconf_raw(self):
d = self.protocol.get_conf_raw("SOCKSPORT ORPORT")
d.addCallback(CallbackChecker('SocksPort=9050\nORPort=0'))
self.send("250-SocksPort=9050")
self.send("250 ORPort=0")
return d
def response_ok(self, v):
self.assertEqual(v, '')
def test_setconf(self):
d = self.protocol.set_conf("foo", "bar").addCallback(
functools.partial(self.response_ok)
)
self.send("250 OK")
self._wait(d)
self.assertEqual(self.transport.value(), "SETCONF foo=bar\r\n")
def test_setconf_with_space(self):
d = self.protocol.set_conf("foo", "a value with a space")
d.addCallback(functools.partial(self.response_ok))
self.send("250 OK")
self._wait(d)
self.assertEqual(
self.transport.value(),
'SETCONF foo="a value with a space"\r\n'
)
def test_setconf_multi(self):
d = self.protocol.set_conf("foo", "bar", "baz", 1)
self.send("250 OK")
self._wait(d)
self.assertEqual(self.transport.value(), "SETCONF foo=bar baz=1\r\n")
def test_quit(self):
d = self.protocol.quit()
self.send("250 OK")
self._wait(d)
self.assertEqual(self.transport.value(), "QUIT\r\n")
def test_dot(self):
# just checking we don't expode
self.protocol.graphviz_data()
def test_debug(self):
self.protocol.start_debug()
self.assertTrue(exists('txtorcon-debug.log'))
def error(self, failure):
print "ERROR", failure
self.assertTrue(False)
def test_twocommands(self):
"Two commands on the wire before first response."
d1 = self.protocol.get_conf("FOO")
ht = {"a": "one", "b": "two"}
d1.addCallback(CallbackChecker(ht)).addErrback(log.err)
d2 = self.protocol.get_info_raw("BAR")
d2.addCallback(CallbackChecker("bar")).addErrback(log.err)
self.send("250-a=one")
self.send("250-b=two")
self.send("250 OK")
self.send("250 bar")
return d2
def test_signal_error(self):
try:
self.protocol.signal('FOO')
self.fail()
except Exception, e:
self.assertTrue('Invalid signal' in str(e))
def test_signal(self):
self.protocol.valid_signals = ['NEWNYM']
self.protocol.signal('NEWNYM')
self.assertEqual(self.transport.value(), 'SIGNAL NEWNYM\r\n')
def test_650_after_authenticate(self):
self.protocol._set_valid_events('CONF_CHANGED')
self.protocol.add_event_listener(
'CONF_CHANGED',
CallbackChecker("Foo=bar")
)
self.send("250 OK")
self.send("650-CONF_CHANGED")
self.send("650-Foo=bar")
def test_notify_after_getinfo(self):
self.protocol._set_valid_events('CIRC')
self.protocol.add_event_listener(
'CIRC',
CallbackChecker("1000 EXTENDED moria1,moria2")
)
self.send("250 OK")
d = self.protocol.get_info("FOO")
d.addCallback(CallbackChecker({'a': 'one'})).addErrback(self.fail)
self.send("250-a=one")
self.send("250 OK")
self.send("650 CIRC 1000 EXTENDED moria1,moria2")
return d
def test_notify_error(self):
self.protocol._set_valid_events('CIRC')
self.send("650 CIRC 1000 EXTENDED moria1,moria2")
def test_getinfo(self):
d = self.protocol.get_info("version")
d.addCallback(CallbackChecker({'version': '0.2.2.34'}))
d.addErrback(self.fail)
self.send("250-version=0.2.2.34")
self.send("250 OK")
self.assertEqual(self.transport.value(), "GETINFO version\r\n")
return d
def test_addevent(self):
self.protocol._set_valid_events('FOO BAR')
self.protocol.add_event_listener('FOO', lambda _: None)
# is it dangerous/ill-advised to depend on internal state of
# class under test?
d = self.protocol.defer
self.send("250 OK")
self._wait(d)
self.assertEqual(
self.transport.value().split('\r\n')[-2],
"SETEVENTS FOO"
)
self.transport.clear()
self.protocol.add_event_listener('BAR', lambda _: None)
d = self.protocol.defer
self.send("250 OK")
self.assertTrue(self.transport.value() == "SETEVENTS FOO BAR\r\n" or
self.transport.value() == "SETEVENTS BAR FOO\r\n")
self._wait(d)
try:
self.protocol.add_event_listener(
'SOMETHING_INVALID', lambda _: None
)
self.assertTrue(False)
except:
pass
def test_eventlistener(self):
self.protocol._set_valid_events('STREAM')
class EventListener(object):
stream_events = 0
def __call__(self, data):
self.stream_events += 1
listener = EventListener()
self.protocol.add_event_listener('STREAM', listener)
d = self.protocol.defer
self.send("250 OK")
self._wait(d)
self.send("650 STREAM 1234 NEW 4321 1.2.3.4:555 REASON=MISC")
self.send("650 STREAM 2345 NEW 4321 2.3.4.5:666 REASON=MISC")
self.assertEqual(listener.stream_events, 2)
def test_remove_eventlistener(self):
self.protocol._set_valid_events('STREAM')
class EventListener(object):
stream_events = 0
def __call__(self, data):
self.stream_events += 1
listener = EventListener()
self.protocol.add_event_listener('STREAM', listener)
self.assertEqual(self.transport.value(), 'SETEVENTS STREAM\r\n')
self.protocol.lineReceived("250 OK")
self.transport.clear()
self.protocol.remove_event_listener('STREAM', listener)
self.assertEqual(self.transport.value(), 'SETEVENTS \r\n')
def test_remove_eventlistener_multiple(self):
self.protocol._set_valid_events('STREAM')
class EventListener(object):
stream_events = 0
def __call__(self, data):
self.stream_events += 1
listener0 = EventListener()
listener1 = EventListener()
self.protocol.add_event_listener('STREAM', listener0)
self.assertEqual(self.transport.value(), 'SETEVENTS STREAM\r\n')
self.protocol.lineReceived("250 OK")
self.transport.clear()
# add another one, shouldn't issue a tor command
self.protocol.add_event_listener('STREAM', listener1)
self.assertEqual(self.transport.value(), '')
# remove one, should still not issue a tor command
self.protocol.remove_event_listener('STREAM', listener0)
self.assertEqual(self.transport.value(), '')
# remove the other one, NOW should issue a command
self.protocol.remove_event_listener('STREAM', listener1)
self.assertEqual(self.transport.value(), 'SETEVENTS \r\n')
# try removing invalid event
try:
self.protocol.remove_event_listener('FOO', listener0)
self.fail()
except Exception, e:
self.assertTrue('FOO' in str(e))
def checkContinuation(self, v):
self.assertEqual(v, "key=\nvalue0\nvalue1")
def test_continuationLine(self):
d = self.protocol.get_info_raw("key")
d.addCallback(self.checkContinuation)
self.send("250+key=")
self.send("value0")
self.send("value1")
self.send(".")
self.send("250 OK")
return d
def test_newdesc(self):
"""
FIXME: this test is now maybe a little silly, it's just testing
multiline GETINFO... (Real test is in
TorStateTests.test_newdesc_parse)
"""
self.protocol.get_info_raw('ns/id/624926802351575FF7E4E3D60EFA3BFB56E67E8A')
d = self.protocol.defer
d.addCallback(CallbackChecker("""ns/id/624926802351575FF7E4E3D60EFA3BFB56E67E8A=
r fake YkkmgCNRV1/35OPWDvo7+1bmfoo tanLV/4ZfzpYQW0xtGFqAa46foo 2011-12-12 16:29:16 12.45.56.78 443 80
s Exit Fast Guard HSDir Named Running Stable V2Dir Valid
w Bandwidth=518000
p accept 43,53,79-81,110,143,194,220,443,953,989-990,993,995,1194,1293,1723,1863,2082-2083,2086-2087,2095-2096,3128,4321,5050,5190,5222-5223,6679,6697,7771,8000,8008,8080-8081,8090,8118,8123,8181,8300,8443,8888"""))
self.send("250+ns/id/624926802351575FF7E4E3D60EFA3BFB56E67E8A=")
self.send("r fake YkkmgCNRV1/35OPWDvo7+1bmfoo tanLV/4ZfzpYQW0xtGFqAa46foo 2011-12-12 16:29:16 12.45.56.78 443 80")
self.send("s Exit Fast Guard HSDir Named Running Stable V2Dir Valid")
self.send("w Bandwidth=518000")
self.send("p accept 43,53,79-81,110,143,194,220,443,953,989-990,993,995,1194,1293,1723,1863,2082-2083,2086-2087,2095-2096,3128,4321,5050,5190,5222-5223,6679,6697,7771,8000,8008,8080-8081,8090,8118,8123,8181,8300,8443,8888")
self.send(".")
self.send("250 OK")
return d
def test_plus_line_no_command(self):
self.protocol.lineReceived("650+NS\r\n")
self.protocol.lineReceived("r Gabor gFpAHsFOHGATy12ZUswRf0ZrqAU GG6GDp40cQfR3ODvkBT0r+Q09kw 2012-05-12 16:54:56 91.219.238.71 443 80\r\n")
def test_minus_line_no_command(self):
"""
haven't seen 600's use - "in the wild" but don't see why it's not
possible
"""
self.protocol._set_valid_events('NS')
self.protocol.add_event_listener('NS', lambda _: None)
self.protocol.lineReceived("650-NS\r\n")
self.protocol.lineReceived("650 OK\r\n")
class ParseTests(unittest.TestCase):
def setUp(self):
self.controller = TorState(TorControlProtocol())
self.controller.connectionMade = lambda _: None
def test_keywords(self):
x = parse_keywords('events/names=CIRC STREAM ORCONN BW DEBUG INFO NOTICE WARN ERR NEWDESC ADDRMAP AUTHDIR_NEWDESCS DESCCHANGED NS STATUS_GENERAL STATUS_CLIENT STATUS_SERVER GUARD STREAM_BW CLIENTS_SEEN NEWCONSENSUS BUILDTIMEOUT_SET')
self.assertTrue('events/names' in x)
self.assertEqual(x['events/names'], 'CIRC STREAM ORCONN BW DEBUG INFO NOTICE WARN ERR NEWDESC ADDRMAP AUTHDIR_NEWDESCS DESCCHANGED NS STATUS_GENERAL STATUS_CLIENT STATUS_SERVER GUARD STREAM_BW CLIENTS_SEEN NEWCONSENSUS BUILDTIMEOUT_SET')
self.assertEqual(len(x.keys()), 1)
def test_keywords_mutli_equals(self):
x = parse_keywords('foo=something subvalue="foo"')
self.assertEqual(len(x), 1)
self.assertTrue('foo' in x)
self.assertEqual(x['foo'], 'something subvalue="foo"')
def test_default_keywords(self):
x = parse_keywords('foo')
self.assertEqual(len(x), 1)
self.assertTrue('foo' in x)
self.assertEqual(x['foo'], DEFAULT_VALUE)
def test_multientry_keywords_2(self):
x = parse_keywords('foo=bar\nfoo=zarimba')
self.assertEqual(len(x), 1)
self.assertTrue(isinstance(x['foo'], types.ListType))
self.assertEqual(len(x['foo']), 2)
self.assertEqual(x['foo'][0], 'bar')
self.assertEqual(x['foo'][1], 'zarimba')
def test_multientry_keywords_3(self):
x = parse_keywords('foo=bar\nfoo=baz\nfoo=zarimba')
self.assertEqual(len(x), 1)
self.assertTrue(isinstance(x['foo'], types.ListType))
self.assertEqual(len(x['foo']), 3)
self.assertEqual(x['foo'][0], 'bar')
self.assertEqual(x['foo'][1], 'baz')
self.assertEqual(x['foo'][2], 'zarimba')
def test_multientry_keywords_4(self):
x = parse_keywords('foo=bar\nfoo=baz\nfoo=zarimba\nfoo=foo')
self.assertEqual(len(x), 1)
self.assertTrue(isinstance(x['foo'], types.ListType))
self.assertEqual(len(x['foo']), 4)
self.assertEqual(x['foo'][0], 'bar')
self.assertEqual(x['foo'][1], 'baz')
self.assertEqual(x['foo'][2], 'zarimba')
self.assertEqual(x['foo'][3], 'foo')
def test_multiline_keywords_with_spaces(self):
x = parse_keywords('''ns/name/foo=
r foo aaaam7E7h1vY5Prk8v9/nSRCydY BBBBOfum4CtAYuOgf/D33Qq5+rk 2013-10-27 06:22:18 1.2.3.4 9001 9030
s Fast Guard HSDir Running Stable V2Dir Valid
w Bandwidth=1234
ns/name/bar=
r bar aaaaHgNYtTVPw5hHTO28J4je5i8 BBBBBUaJaBFSU/HDrTxnSh+D3+fY 2013-10-27 07:48:56 1.2.4.5 9001 9030
s Exit Fast Guard HSDir Named Running Stable V2Dir Valid
w Bandwidth=1234
OK
''')
self.assertEqual(2, len(x))
keys = x.keys()
keys.sort()
self.assertEqual(keys, ['ns/name/bar', 'ns/name/foo'])
def test_multiline_keywords(self):
x = parse_keywords('''Foo=bar\nBar''')
self.assertEqual(x, {'Foo': 'bar\nBar'})
x = parse_keywords('''Foo=bar\nBar''', multiline_values=False)
self.assertEqual(x, {'Foo': 'bar',
'Bar': DEFAULT_VALUE})
def test_unquoted_keywords(self):
x = parse_keywords('''Tor="0.1.2.3.4-rc44"''')
self.assertEqual(x, {'Tor': '0.1.2.3.4-rc44'})
def test_unquoted_keywords_singlequote(self):
x = parse_keywords("Tor='0.1.2.3.4-rc44'")
self.assertEqual(x, {'Tor': '0.1.2.3.4-rc44'})
def test_unquoted_keywords_empty(self):
x = parse_keywords('foo=')
self.assertEqual(x, {'foo': ''})
def test_network_status(self):
self.controller._update_network_status("""ns/all=
r right2privassy3 ADQ6gCT3DiFHKPDFr3rODBUI8HM JehnjB8l4Js47dyjLCEmE8VJqao 2011-12-02 03:36:40 50.63.8.215 9023 0
s Exit Fast Named Running Stable Valid
w Bandwidth=53
p accept 80,1194,1220,1293,1500,1533,1677,1723,1863,2082-2083,2086-2087,2095-2096,2102-2104,3128,3389,3690,4321,4643,5050,5190,5222-5223,5228,5900,6660-6669,6679,6697,8000,8008,8074,8080,8087-8088,8443,8888,9418,9999-10000,19294,19638
r Unnamed AHe2V2pmj4Yfn0H9+Np3lci7htU T/g7ZLzG/ooqCn+gdLd9Jjh+AEI 2011-12-02 15:52:09 84.101.216.232 443 9030
s Exit Fast Running V2Dir Valid
w Bandwidth=33
p reject 25,119,135-139,445,563,1214,4661-4666,6346-6429,6699,6881-6999""")
# the routers list is always keyed with both name and hash
self.assertEqual(len(self.controller.routers_by_name), 2)
self.assertEqual(len(self.controller.routers_by_hash), 2)
self.assertTrue('right2privassy3' in self.controller.routers)
self.assertTrue('Unnamed' in self.controller.routers)
self.controller.routers.clear()
self.controller.routers_by_name.clear()
self.controller.routers_by_hash.clear()
def test_circuit_status(self):
self.controller._update_network_status("""ns/all=
r wildnl f+Ty/+B6lgYr0Ntbf67O/L2M8ZI c1iK/kPPXKGZZvwXRWbvL9eCfSc 2011-12-02 19:07:05 209.159.142.164 9001 0
s Exit Fast Named Running Stable Valid
w Bandwidth=1900
p reject 25,119,135-139,445,563,1214,4661-4666,6346-6429,6699,6881-6999
r l0l wYXUpLBpzVWfzVSMgGO0dThdd38 KIJC+W1SHeaFOj/BVsEAgxbtQNM 2011-12-02 13:43:39 94.23.168.39 443 80
s Fast Named Running Stable V2Dir Valid
w Bandwidth=22800
p reject 1-65535
r Tecumseh /xAD0tFLS50Dkz+O37xGyVLoKlk yJHbad7MFl1VW2/23RxrPKBTOIE 2011-12-02 09:44:10 76.73.48.211 22 9030
s Fast Guard HSDir Named Running Stable V2Dir Valid
w Bandwidth=18700
p reject 1-65535""")
self.controller._circuit_status("""circuit-status=
4472 BUILT $FF1003D2D14B4B9D03933F8EDFBC46C952E82A59=Tecumseh,$C185D4A4B069CD559FCD548C8063B475385D777F=l0l,$7FE4F2FFE07A96062BD0DB5B7FAECEFCBD8CF192=wildnl PURPOSE=GENERAL""")
self.assertEqual(len(self.controller.circuits), 1)
self.assertTrue(4472 in self.controller.circuits)
self.controller.routers.clear()
self.controller.routers_by_name.clear()
self.controller.routers_by_hash.clear()
self.controller.circuits.clear()
|
{
"content_hash": "e51b8d9e78c3a4a53f969d54e84814b5",
"timestamp": "",
"source": "github",
"line_count": 892,
"max_line_length": 245,
"avg_line_length": 36.43834080717489,
"alnum_prop": 0.6275113066486171,
"repo_name": "isislovecruft/txtorcon",
"id": "9a03bdee9b09096e73b219b5972da7bd78736fa2",
"size": "32503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_torcontrolprotocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3638"
},
{
"name": "Python",
"bytes": "476663"
},
{
"name": "Shell",
"bytes": "681"
}
],
"symlink_target": ""
}
|
"""
Support for Tellstick switches using Tellstick Net.
This platform uses the Telldus Live online service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.tellduslive/
"""
import logging
from homeassistant.components.tellduslive import TelldusLiveEntity
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup Tellstick switches."""
if discovery_info is None:
return
add_devices(TelldusLiveSwitch(hass, switch) for switch in discovery_info)
class TelldusLiveSwitch(TelldusLiveEntity, ToggleEntity):
"""Representation of a Tellstick switch."""
@property
def is_on(self):
"""Return true if switch is on."""
return self.device.is_on
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.device.turn_on()
self.changed()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self.device.turn_off()
self.changed()
|
{
"content_hash": "6dcb460c0b04aa12031f2ec23997102a",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 27.26829268292683,
"alnum_prop": 0.6940966010733453,
"repo_name": "robjohnson189/home-assistant",
"id": "5f3901d79b8a873174f28cdac1dbd116f6a33e58",
"size": "1118",
"binary": false,
"copies": "16",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/tellduslive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1362685"
},
{
"name": "Python",
"bytes": "3499625"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
}
|
import math
import re
from django.utils.html import strip_tags
def count_words(html_string):
word_string = strip_tags(html_string)
matching_words = re.findall(r'\w+', word_string)
count = len(matching_words) #joincfe.com/projects/
return count
def get_read_time(html_string):
count = count_words(html_string)
read_time_min = math.ceil(count/200.0) #assuming 200wpm reading
return int(read_time_min)
|
{
"content_hash": "8afd62c36f5cce5543cd1576845685cd",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 67,
"avg_line_length": 24,
"alnum_prop": 0.7083333333333334,
"repo_name": "DJMedhaug/BizSprint",
"id": "f590febac7c9be4d03ae4444a0e85d47970ff6bc",
"size": "432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "posts/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1242519"
},
{
"name": "HTML",
"bytes": "250009"
},
{
"name": "JavaScript",
"bytes": "965426"
},
{
"name": "PHP",
"bytes": "390755"
},
{
"name": "Python",
"bytes": "129777"
},
{
"name": "Shell",
"bytes": "3675"
}
],
"symlink_target": ""
}
|
"""Musepack audio streams with APEv2 tags.
Musepack is an audio format originally based on the MPEG-1 Layer-2
algorithms. Stream versions 4 through 7 are supported.
For more information, see http://www.musepack.net/.
"""
__all__ = ["Musepack", "Open", "delete"]
import struct
from ._compat import endswith
from mutagen import StreamInfo
from mutagen.apev2 import APEv2File, error, delete
from mutagen.id3 import BitPaddedInt
from mutagen._util import cdata
from ._compat import xrange
class MusepackHeaderError(error):
pass
RATES = [44100, 48000, 37800, 32000]
def _parse_sv8_int(fileobj, limit=9):
"""Reads (max limit) bytes from fileobj until the MSB is zero.
All 7 LSB will be merged to a big endian uint.
Raises ValueError in case not MSB is zero, or EOFError in
case the file ended before limit is reached.
Returns (parsed number, number of bytes read)
"""
num = 0
for i in xrange(limit):
c = fileobj.read(1)
if len(c) != 1:
raise EOFError
num = (num << 7) | (ord(c) & 0x7F)
if not ord(c) & 0x80:
return num, i + 1
if limit > 0:
raise ValueError
return 0, 0
def _calc_sv8_gain(gain):
# 64.82 taken from mpcdec
return 64.82 - gain / 256.0
def _calc_sv8_peak(peak):
return (10 ** (peak / (256.0 * 20.0)) / 65535.0)
class MusepackInfo(StreamInfo):
"""Musepack stream information.
Attributes:
* channels -- number of audio channels
* length -- file length in seconds, as a float
* sample_rate -- audio sampling rate in Hz
* bitrate -- audio bitrate, in bits per second
* version -- Musepack stream version
Optional Attributes:
* title_gain, title_peak -- Replay Gain and peak data for this song
* album_gain, album_peak -- Replay Gain and peak data for this album
These attributes are only available in stream version 7/8. The
gains are a float, +/- some dB. The peaks are a percentage [0..1] of
the maximum amplitude. This means to get a number comparable to
VorbisGain, you must multiply the peak by 2.
"""
def __init__(self, fileobj):
header = fileobj.read(4)
if len(header) != 4:
raise MusepackHeaderError("not a Musepack file")
# Skip ID3v2 tags
if header[:3] == b"ID3":
header = fileobj.read(6)
if len(header) != 6:
raise MusepackHeaderError("not a Musepack file")
size = 10 + BitPaddedInt(header[2:6])
fileobj.seek(size)
header = fileobj.read(4)
if len(header) != 4:
raise MusepackHeaderError("not a Musepack file")
if header.startswith(b"MPCK"):
self.__parse_sv8(fileobj)
else:
self.__parse_sv467(fileobj)
if not self.bitrate and self.length != 0:
fileobj.seek(0, 2)
self.bitrate = int(round(fileobj.tell() * 8 / self.length))
def __parse_sv8(self, fileobj):
#SV8 http://trac.musepack.net/trac/wiki/SV8Specification
key_size = 2
mandatory_packets = [b"SH", b"RG"]
def check_frame_key(key):
if len(frame_type) != key_size or not b'AA' <= frame_type <= b'ZZ':
raise MusepackHeaderError("Invalid frame key.")
frame_type = fileobj.read(key_size)
check_frame_key(frame_type)
while frame_type not in (b"AP", b"SE") and mandatory_packets:
try:
frame_size, slen = _parse_sv8_int(fileobj)
except (EOFError, ValueError):
raise MusepackHeaderError("Invalid packet size.")
data_size = frame_size - key_size - slen
if frame_type == b"SH":
mandatory_packets.remove(frame_type)
self.__parse_stream_header(fileobj, data_size)
elif frame_type == b"RG":
mandatory_packets.remove(frame_type)
self.__parse_replaygain_packet(fileobj, data_size)
else:
fileobj.seek(data_size, 1)
frame_type = fileobj.read(key_size)
check_frame_key(frame_type)
if mandatory_packets:
raise MusepackHeaderError("Missing mandatory packets: %s." %
", ".join(map(repr, mandatory_packets)))
self.length = float(self.samples) / self.sample_rate
self.bitrate = 0
def __parse_stream_header(self, fileobj, data_size):
fileobj.seek(4, 1)
try:
self.version = ord(fileobj.read(1))
except TypeError:
raise MusepackHeaderError("SH packet ended unexpectedly.")
try:
samples, l1 = _parse_sv8_int(fileobj)
samples_skip, l2 = _parse_sv8_int(fileobj)
except (EOFError, ValueError):
raise MusepackHeaderError(
"SH packet: Invalid sample counts.")
left_size = data_size - 5 - l1 - l2
if left_size != 2:
raise MusepackHeaderError("Invalid SH packet size.")
data = fileobj.read(left_size)
if len(data) != left_size:
raise MusepackHeaderError("SH packet ended unexpectedly.")
self.sample_rate = RATES[ord(data[-2:-1]) >> 5]
self.channels = (ord(data[-1:]) >> 4) + 1
self.samples = samples - samples_skip
def __parse_replaygain_packet(self, fileobj, data_size):
data = fileobj.read(data_size)
if data_size != 9:
raise MusepackHeaderError("Invalid RG packet size.")
if len(data) != data_size:
raise MusepackHeaderError("RG packet ended unexpectedly.")
title_gain = cdata.short_be(data[1:3])
title_peak = cdata.short_be(data[3:5])
album_gain = cdata.short_be(data[5:7])
album_peak = cdata.short_be(data[7:9])
if title_gain:
self.title_gain = _calc_sv8_gain(title_gain)
if title_peak:
self.title_peak = _calc_sv8_peak(title_peak)
if album_gain:
self.album_gain = _calc_sv8_gain(album_gain)
if album_peak:
self.album_peak = _calc_sv8_peak(album_peak)
def __parse_sv467(self, fileobj):
fileobj.seek(-4, 1)
header = fileobj.read(32)
if len(header) != 32:
raise MusepackHeaderError("not a Musepack file")
# SV7
if header.startswith(b"MP+"):
self.version = ord(header[3:4]) & 0xF
if self.version < 7:
raise MusepackHeaderError("not a Musepack file")
frames = cdata.uint_le(header[4:8])
flags = cdata.uint_le(header[8:12])
self.title_peak, self.title_gain = struct.unpack(
"<Hh", header[12:16])
self.album_peak, self.album_gain = struct.unpack(
"<Hh", header[16:20])
self.title_gain /= 100.0
self.album_gain /= 100.0
self.title_peak /= 65535.0
self.album_peak /= 65535.0
self.sample_rate = RATES[(flags >> 16) & 0x0003]
self.bitrate = 0
# SV4-SV6
else:
header_dword = cdata.uint_le(header[0:4])
self.version = (header_dword >> 11) & 0x03FF
if self.version < 4 or self.version > 6:
raise MusepackHeaderError("not a Musepack file")
self.bitrate = (header_dword >> 23) & 0x01FF
self.sample_rate = 44100
if self.version >= 5:
frames = cdata.uint_le(header[4:8])
else:
frames = cdata.ushort_le(header[6:8])
if self.version < 6:
frames -= 1
self.channels = 2
self.length = float(frames * 1152 - 576) / self.sample_rate
def pprint(self):
rg_data = []
if hasattr(self, "title_gain"):
rg_data.append("%+0.2f (title)" % self.title_gain)
if hasattr(self, "album_gain"):
rg_data.append("%+0.2f (album)" % self.album_gain)
rg_data = (rg_data and ", Gain: " + ", ".join(rg_data)) or ""
return "Musepack SV%d, %.2f seconds, %d Hz, %d bps%s" % (
self.version, self.length, self.sample_rate, self.bitrate, rg_data)
class Musepack(APEv2File):
_Info = MusepackInfo
_mimes = ["audio/x-musepack", "audio/x-mpc"]
@staticmethod
def score(filename, fileobj, header):
return (header.startswith(b"MP+") + header.startswith(b"MPCK") +
endswith(filename.lower(), b".mpc"))
Open = Musepack
|
{
"content_hash": "17523188c2003f89ca2dc2c3be6fe685",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 79,
"avg_line_length": 34.0398406374502,
"alnum_prop": 0.5727996254681648,
"repo_name": "lcharlick/ArtistAlbumArt.bundle",
"id": "da1698e5349bf1bc909ba8d02da3d79d0a683fa3",
"size": "8873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Contents/Libraries/Shared/mutagen/musepack.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "350447"
}
],
"symlink_target": ""
}
|
""" Comparing remote and local predictions
"""
from .world import world, setup_module, teardown_module, show_doc
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_model_steps as model_create
from . import create_ensemble_steps as ensemble_create
from . import create_linear_steps as linear_create
from . import create_prediction_steps as prediction_create
from . import compare_predictions_steps as prediction_compare
class TestComparePrediction(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully comparing predictions for deepnets:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a deepnet with objective "<objective>" and "<params>"
And I wait until the deepnet is ready less than <time_3> secs
And I create a local deepnet
When I create a prediction for "<data_input>"
Then the prediction for "<objective>" is "<prediction>"
And I create a local prediction for "<data_input>"
Then the local prediction is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | data_input | objective | prediction | params,
"""
examples = [
['data/iris.csv', '30', '50', '60', '{"petal width": 4}', '000004', 'Iris-virginica', '{}'],
['data/iris.csv', '30', '50', '60', '{"sepal length": 4.1, "sepal width": 2.4}', '000004', 'Iris-setosa', '{}'],
['data/iris_missing2.csv', '30', '50', '60', '{}', '000004', 'Iris-setosa', '{}'],
['data/grades.csv', '30', '50', '60', '{}', '000005', 42.15473, '{}'],
['data/spam.csv', '30', '50', '60', '{}', '000000', 'ham', '{}']]
show_doc(self.test_scenario1, examples)
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_deepnet_with_objective_and_params(self, example[5], example[7])
model_create.the_deepnet_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_deepnet(self)
prediction_create.i_create_a_deepnet_prediction(self, example[4])
prediction_create.the_prediction_is(self, example[5], example[6])
prediction_compare.i_create_a_local_deepnet_prediction(self, example[4])
prediction_compare.the_local_prediction_is(self, example[6])
def test_scenario2(self):
"""
Scenario: Successfully comparing predictions in operating points for models:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
And I create a local model
When I create a prediction for "<data_input>" in "<operating_point>"
Then the prediction for "<objective>" is "<prediction>"
And I create a local prediction for "<data_input>" in "<operating_point>"
Then the local prediction is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | data_input | prediction | operating_point
"""
examples = [
['data/iris.csv', '10', '50', '50', '{"petal width": 4}', 'Iris-setosa', {"kind": "probability", "threshold": 0.1, "positive_class": "Iris-setosa"}, "000004"],
['data/iris.csv', '10', '50', '50', '{"petal width": 4}', 'Iris-versicolor', {"kind": "probability", "threshold": 0.9, "positive_class": "Iris-setosa"}, "000004"],
['data/iris.csv', '10', '50', '50', '{"sepal length": 4.1, "sepal width": 2.4}', 'Iris-setosa', {"kind": "confidence", "threshold": 0.1, "positive_class": "Iris-setosa"}, "000004"],
['data/iris.csv', '10', '50', '50', '{"sepal length": 4.1, "sepal width": 2.4}', 'Iris-versicolor', {"kind": "confidence", "threshold": 0.9, "positive_class": "Iris-setosa"}, "000004"]]
show_doc(self.test_scenario2, examples)
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_model(self)
prediction_create.i_create_a_prediction_op(self, example[4], example[6])
prediction_create.the_prediction_is(self, example[7], example[5])
prediction_compare.i_create_a_local_prediction_op(self, example[4], example[6])
prediction_compare.the_local_prediction_is(self, example[5])
def test_scenario3(self):
"""
Scenario: Successfully comparing predictions for deepnets with operating point:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a deepnet with objective "<objective>" and "<params>"
And I wait until the deepnet is ready less than <time_3> secs
And I create a local deepnet
When I create a prediction with operating point "<operating_point>" for "<data_input>"
Then the prediction for "<objective>" is "<prediction>"
And I create a local prediction with operating point "<operating_point>" for "<data_input>"
Then the local prediction is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | data_input | objective | prediction | params | operating_point,
"""
examples = [
['data/iris.csv', '10', '50', '60', '{"petal width": 4}', '000004', 'Iris-versicolor', '{}', {"kind": "probability", "threshold": 1, "positive_class": "Iris-virginica"}]]
show_doc(self.test_scenario3, examples)
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_deepnet_with_objective_and_params(self, example[5], example[7])
model_create.the_deepnet_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_deepnet(self)
prediction_create.i_create_a_deepnet_prediction_with_op(self, example[4], example[8])
prediction_create.the_prediction_is(self, example[5], example[6])
prediction_compare.i_create_a_local_deepnet_prediction_with_op(self, example[4], example[8])
prediction_compare.the_local_prediction_is(self, example[6])
def test_scenario4(self):
"""
Scenario: Successfully comparing predictions in operating points for ensembles:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create an ensemble
And I wait until the ensemble is ready less than <time_3> secs
And I create a local ensemble
When I create a prediction for "<data_input>" in "<operating_point>"
Then the prediction for "<objective>" is "<prediction>"
And I create a local ensemble prediction for "<data_input>" in "<operating_point>"
Then the local ensemble prediction is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | data_input | prediction | operating_point
"""
examples = [
['data/iris.csv', '10', '50', '50', '{"petal width": 4}', 'Iris-setosa', {"kind": "probability", "threshold": 0.1, "positive_class": "Iris-setosa"}, "000004"],
['data/iris.csv', '10', '50', '50', '{"petal width": 4}', 'Iris-virginica', {"kind": "probability", "threshold": 0.9, "positive_class": "Iris-setosa"}, "000004"],
['data/iris.csv', '10', '50', '50', '{"sepal length": 4.1, "sepal width": 2.4}', 'Iris-setosa', {"kind": "confidence", "threshold": 0.1, "positive_class": "Iris-setosa"}, "000004"],
['data/iris.csv', '10', '50', '50', '{"sepal length": 4.1, "sepal width": 2.4}', 'Iris-versicolor', {"kind": "confidence", "threshold": 0.9, "positive_class": "Iris-setosa"}, "000004"]]
show_doc(self.test_scenario4, examples)
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
ensemble_create.i_create_an_ensemble(self)
ensemble_create.the_ensemble_is_finished_in_less_than(self, example[3])
ensemble_create.create_local_ensemble(self)
prediction_create.i_create_an_ensemble_prediction_op(self, example[4], example[6])
prediction_create.the_prediction_is(self, example[7], example[5])
prediction_compare.i_create_a_local_ensemble_prediction_op(self, example[4], example[6])
prediction_compare.the_local_prediction_is(self, example[5])
def test_scenario5(self):
"""
Scenario: Successfully comparing predictions in operating kind for models:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
And I create a local model
When I create a prediction for "<data_input>" in "<operating_kind>"
Then the prediction for "<objective>" is "<prediction>"
And I create a local prediction for "<data_input>" in "<operating_kind>"
Then the local prediction is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | data_input | prediction | operating_point
"""
examples = [
['data/iris.csv', '10', '50', '50', '{"petal length": 2.46, "sepal length": 5}', 'Iris-versicolor', "probability", "000004"],
['data/iris.csv', '10', '50', '50', '{"petal length": 2.46, "sepal length": 5}', 'Iris-versicolor', "confidence", "000004"],
['data/iris.csv', '10', '50', '50', '{"petal length": 2}', 'Iris-setosa', "probability", "000004"],
['data/iris.csv', '10', '50', '50', '{"petal length": 2}', 'Iris-setosa', "confidence", "000004"]]
show_doc(self.test_scenario5, examples)
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_model(self)
prediction_create.i_create_a_prediction_op_kind(self, example[4], example[6])
prediction_create.the_prediction_is(self, example[7], example[5])
prediction_compare.i_create_a_local_prediction_op_kind(self, example[4], example[6])
prediction_compare.the_local_prediction_is(self, example[5])
def test_scenario6(self):
"""
Scenario: Successfully comparing predictions for deepnets with operating kind:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a deepnet with objective "<objective>" and "<params>"
And I wait until the deepnet is ready less than <time_3> secs
And I create a local deepnet
When I create a prediction with operating kind "<operating_kind>" for "<data_input>"
Then the prediction for "<objective>" is "<prediction>"
And I create a local prediction with operating point "<operating_kind>" for "<data_input>"
Then the local prediction is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | data_input | objective | prediction | params | operating_point,
"""
examples = [
['data/iris.csv', '10', '50', '60', '{"petal length": 2.46}', '000004', 'Iris-setosa', '{}', "probability"],
['data/iris.csv', '10', '50', '60', '{"petal length": 2}', '000004', 'Iris-setosa', '{}', "probability"]]
show_doc(self.test_scenario6, examples)
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_deepnet_with_objective_and_params(self, example[5], example[7])
model_create.the_deepnet_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_deepnet(self)
prediction_create.i_create_a_deepnet_prediction_op_kind(self, example[4], example[8])
prediction_create.the_prediction_is(self, example[5], example[6])
prediction_compare.i_create_a_local_deepnet_prediction_op_kind(self, example[4], example[8])
prediction_compare.the_local_prediction_is(self, example[6])
def test_scenario7(self):
"""
Scenario: Successfully comparing predictions in operating points for ensembles:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create an ensemble
And I wait until the ensemble is ready less than <time_3> secs
And I create a local ensemble
When I create a prediction for "<data_input>" in "<operating_kind>"
Then the prediction for "<objective>" is "<prediction>"
And I create a local ensemble prediction for "<data_input>" in "<operating_kind>"
Then the local ensemble prediction is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | data_input | prediction | operating_kind
"""
examples = [
['data/iris.csv', '10', '50', '50', '{"petal length": 2.46}', 'Iris-versicolor', "probability", "000004"],
['data/iris.csv', '10', '50', '50', '{"petal length": 2}', 'Iris-setosa', "probability", "000004"],
['data/iris.csv', '10', '50', '50', '{"petal length": 2.46}', 'Iris-versicolor', "confidence", "000004"],
['data/iris.csv', '10', '50', '50', '{"petal length": 2}', 'Iris-setosa', "confidence", "000004"],
['data/iris.csv', '10', '50', '50', '{"petal length": 2.46}', 'Iris-versicolor', "votes", "000004"],
['data/iris.csv', '10', '50', '50', '{"petal length": 1}', 'Iris-setosa', "votes", "000004"]]
show_doc(self.test_scenario7, examples)
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
ensemble_create.i_create_an_ensemble(self)
ensemble_create.the_ensemble_is_finished_in_less_than(self, example[3])
ensemble_create.create_local_ensemble(self)
prediction_create.i_create_an_ensemble_prediction_op_kind(self, example[4], example[6])
prediction_create.the_prediction_is(self, example[7], example[5])
prediction_compare.i_create_a_local_ensemble_prediction_op_kind(self, example[4], example[6])
prediction_compare.the_local_prediction_is(self, example[5])
def test_scenario8(self):
"""
Scenario: Successfully comparing predictions for logistic regressions with operating kind:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a logistic regression with objective "<objective>"
And I wait until the logistic regression is ready less than <time_3> secs
And I create a local logistic regression
When I create a prediction with operating kind "<operating_kind>" for "<data_input>"
Then the prediction for "<objective>" is "<prediction>"
And I create a local prediction with operating point "<operating_kind>" for "<data_input>"
Then the local prediction is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | data_input | objective | prediction | params | operating_point,
"""
examples = [
['data/iris.csv', '10', '50', '60', '{"petal length": 5}', '000004', 'Iris-versicolor', '{}', "probability"],
['data/iris.csv', '10', '50', '60', '{"petal length": 2}', '000004', 'Iris-setosa', '{}', "probability"]]
show_doc(self.test_scenario8, examples)
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_logistic_model(self)
model_create.the_logistic_model_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_logistic_model(self)
prediction_create.i_create_a_logistic_prediction_with_op_kind(self, example[4], example[8])
prediction_create.the_prediction_is(self, example[5], example[6])
prediction_compare.i_create_a_local_logistic_prediction_op_kind(self, example[4], example[8])
prediction_compare.the_local_prediction_is(self, example[6])
def test_scenario9(self):
"""
Scenario: Successfully comparing predictions for logistic regressions with operating kind and supervised model:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a logistic regression with objective "<objective>"
And I wait until the logistic regression is ready less than <time_3> secs
And I create a local supervised model
When I create a prediction with operating kind "<operating_kind>" for "<data_input>"
Then the prediction for "<objective>" is "<prediction>"
And I create a local prediction with operating point "<operating_kind>" for "<data_input>"
Then the local prediction is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | data_input | objective | prediction | params | operating_point,
"""
examples = [
['data/iris.csv', '10', '50', '60', '{"petal length": 5}', '000004', 'Iris-versicolor', '{}', "probability"],
['data/iris.csv', '10', '50', '60', '{"petal length": 2}', '000004', 'Iris-setosa', '{}', "probability"]]
show_doc(self.test_scenario9, examples)
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_logistic_model(self)
model_create.the_logistic_model_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_supervised_model(self, model_type="logistic_regression")
prediction_create.i_create_a_logistic_prediction_with_op_kind(self, example[4], example[8])
prediction_create.the_prediction_is(self, example[5], example[6])
prediction_compare.i_create_a_local_logistic_prediction_op_kind(self, example[4], example[8])
prediction_compare.the_local_prediction_is(self, example[6])
def test_scenario10(self):
"""
Scenario: Successfully comparing predictions for linear regression:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a linear regression with objective "<objective>" and "<params>"
And I wait until the linear regression is ready less than <time_3> secs
And I create a local linear regression
When I create a prediction for "<data_input>"
Then the prediction for "<objective>" is "<prediction>"
And I create a local prediction for "<data_input>"
Then the local prediction is "<prediction>"
Examples:
| data | time_1 | time_2 | time_3 | data_input | objective | prediction | params
"""
examples = [
['data/grades.csv', '10', '50', '60', '{"000000": 1, "000001": 1, "000002": 1}', '000005', 29.63024, '{"input_fields": ["000000", "000001", "000002"]}'],
['data/iris.csv', '10', '50', '60', '{"000000": 1, "000001": 1, "000004": "Iris-virginica"}', '000003', 1.21187, '{"input_fields": ["000000", "000001", "000004"]}'],
['data/movies.csv', '10', '50', '60', '{"000007": "Action"}', '000009', 4.33333, '{"input_fields": ["000007"]}'],
['data/movies.csv', '10', '50', '60', '{"000006": "1999"}', '000009', 3.28427, '{"input_fields": ["000006"], "bias": false}']]
show_doc(self.test_scenario10, examples)
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
linear_create.i_create_a_linear_regression_with_objective_and_params( \
self, example[5], example[7])
linear_create.the_linear_regression_is_finished_in_less_than( \
self, example[3])
prediction_compare.i_create_a_local_linear(self)
prediction_create.i_create_a_linear_prediction(self, example[4])
prediction_create.the_prediction_is(self, example[5], example[6])
prediction_compare.i_create_a_local_linear_prediction(self, example[4])
prediction_compare.the_local_prediction_is(self, example[6])
def test_scenario11(self):
"""
Scenario: Successfully comparing remote and local predictions
with raw date input for linear regression:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a linear regression
And I wait until the linear regression is ready
less than <time_3> secs
And I create a local linear regression
When I create a prediction for "<data_input>"
Then the prediction for "<objective>" is "<prediction>"
And I create a local prediction for "<data_input>"
Then the local prediction is "<prediction>"
Examples:
|data|time_1|time_2|time_3|data_input|objective|prediction
"""
examples = [
['data/dates2.csv', '20', '20', '25',
'{"time-1": "1910-05-08T19:10:23.106", "cat-0":"cat2"}',
'000002', -0.01284],
['data/dates2.csv', '20', '20', '25',
'{"time-1": "1920-06-30T20:21:20.320", "cat-0":"cat1"}',
'000002', -0.09459],
['data/dates2.csv', '20', '20', '25',
'{"time-1": "1932-01-30T19:24:11.440", "cat-0":"cat2"}',
'000002', -0.02259],
['data/dates2.csv', '20', '20', '25',
'{"time-1": "1950-11-06T05:34:05.252", "cat-0":"cat1"}',
'000002', -0.06754],
['data/dates2.csv', '20', '20', '25',
'{"time-1": "2001-01-05T23:04:04.693", "cat-0":"cat2"}',
'000002', 0.05204],
['data/dates2.csv', '20', '20', '25',
'{"time-1": "2011-04-01T00:16:45.747", "cat-0":"cat2"}',
'000002', 0.05878]]
show_doc(self.test_scenario11, examples)
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self,
example[2])
linear_create.i_create_a_linear_regression(self)
linear_create.the_linear_regression_is_finished_in_less_than(self,
example[3])
prediction_compare.i_create_a_local_linear(self)
prediction_create.i_create_a_linear_prediction(self, example[4])
prediction_create.the_prediction_is(self, example[5], example[6])
prediction_compare.i_create_a_local_linear_prediction(self,
example[4])
prediction_compare.the_local_prediction_is(self, example[6])
def test_scenario12(self):
"""
Scenario: Successfully comparing remote and local predictions
with raw date input for deepnet:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a deepnet
And I wait until the deepnet is ready
less than <time_3> secs
And I create a local deepnet
When I create a prediction for "<data_input>"
Then the prediction for "<objective>" is "<prediction>"
And I create a local prediction for "<data_input>"
Then the local prediction is "<prediction>"
Examples:
|data|time_1|time_2|time_3|data_input|objective|prediction
['data/dates2.csv', '20', '45', '60',
'{"time-1": "1910-05-08T19:10:23.106", "cat-0":"cat2"}',
'000002', 0.04082],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "2011-04-01T00:16:45.747", "cat-0":"cat2"}',
'000002', 0.02919],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "1969-W29-1T17:36:39Z", "cat-0":"cat1"}',
'000002', 0.0199],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "1920-06-45T20:21:20.320", "cat-0":"cat1"}',
'000002', 0.0199],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "2001-01-05T23:04:04.693", "cat-0":"cat2"}',
'000002', 0.28517],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "1950-11-06T05:34:05.602", "cat-0":"cat1"}',
'000002', -0.05673],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "1932-01-30T19:24:11.440", "cat-0":"cat2"}',
'000002', 0.16183],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "Mon Jul 14 17:36 +0000 1969", "cat-0":"cat1"}',
'000002', 0.0199]
"""
examples = [
['data/dates2.csv', '20', '45', '60',
'{"time-1": "1910-05-08T19:10:23.106", "cat-0":"cat2"}',
'000002', 0.04082],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "2011-04-01T00:16:45.747", "cat-0":"cat2"}',
'000002', 0.02919],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "1969-W29-1T17:36:39Z", "cat-0":"cat1"}',
'000002', 0.0199],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "1920-06-45T20:21:20.320", "cat-0":"cat1"}',
'000002', 0.0199],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "2001-01-05T23:04:04.693", "cat-0":"cat2"}',
'000002', 0.28517],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "1950-11-06T05:34:05.602", "cat-0":"cat1"}',
'000002', -0.05673],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "1932-01-30T19:24:11.440", "cat-0":"cat2"}',
'000002', 0.16183],
['data/dates2.csv', '20', '45', '60',
'{"time-1": "Mon Jul 14 17:36 +0000 1969", "cat-0":"cat1"}',
'000002', 0.0199]
]
show_doc(self.test_scenario12, examples)
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_no_suggest_deepnet(self)
model_create.the_deepnet_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_deepnet(self)
prediction_create.i_create_a_deepnet_prediction(self, example[4])
prediction_create.the_prediction_is(self, example[5], example[6])
prediction_compare.i_create_a_local_deepnet_prediction(self,
example[4])
prediction_compare.the_local_prediction_is(self, example[6])
|
{
"content_hash": "934e836811cf95a6703de8c94ee21a89",
"timestamp": "",
"source": "github",
"line_count": 591,
"max_line_length": 198,
"avg_line_length": 57.1912013536379,
"alnum_prop": 0.5607396449704142,
"repo_name": "mmerce/python",
"id": "00b4fdba0b9006127a4bb829103426714ff16f24",
"size": "34402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigml/tests/test_36_compare_predictions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1531559"
}
],
"symlink_target": ""
}
|
from pymocap.color_terminal import ColorTerminal
from pymocap.event import Event
import struct, os
from datetime import datetime
class NatnetFile:
def __init__(self, path=None, loop=True):
self.path = path
self.loop = loop
# file handles
self.read_file = None
self.write_file = None
# last read frame info
self.currentFrame = None
self.currentFrameTime = None
self.currentFrameIndex = -1
# events
self.loopEvent = Event()
def __del__(self):
self.stop()
def startReading(self):
self.stopReading()
try:
if not self.path:
self.path = 'walk-198frames.binary.recording'
self.read_file = open(self.path, 'rb')
ColorTerminal().success("NatnetFile opened: %s" % self.path)
except:
ColorTerminal().fail("NatnetFile couldn't be opened: %s" % self.path)
self.read_file = None
def stopReading(self):
if self.read_file:
self.read_file.close()
self.read_file = None
ColorTerminal().blue('NatnetFile closed')
def startWriting(self):
self.stopWriting()
try:
if not self.path:
self.path = '/tmp/natnet_'+datetime.now().strftime('%Y_%m_%d_%H_%M_%S')+'.binary'
self.write_file = open(self.path, 'wb')
ColorTerminal().success("NatnetFile opened for writing: %s" % self.path)
except:
ColorTerminal().fail("NatnetFile couldn't be opened for writing: %s" % self.path)
self.write_file = None
def stopWriting(self):
if self.write_file:
self.write_file.close()
self.write_file = None
ColorTerminal().blue('NatnetFile closed')
def stop(self):
self.stopReading()
self.stopWriting()
def setLoop(self, loop):
self.loop = loop
def nextFrame(self):
bytecount = self._readFrameSize() # int: bytes
self.currentFrameTime = self._readFrameTime() # float: seconds
if bytecount == None or self.currentFrameTime == None:
return None
self.currentFrame = self.read_file.read(bytecount)
self.currentFrameIndex += 1
return self.currentFrame
def _readFrameSize(self):
# int is 4 bytes
value = self.read_file.read(4)
# end-of-file?
if not value:
if not self.loop:
return None
# reset file handle
self.read_file.seek(0)
self.currentFrame = None
self.currentFrameTime = None
self.currentFrameIndex = -1
# notify
self.loopEvent(self)
# try again
return self._readFrameSize()
# 'unpack' 4 binary bytes into integer
return struct.unpack('i', value)[0]
def _readFrameTime(self):
# float of 4 bytes
value = self.read_file.read(4)
# end-of-file?
if not value:
# TODO; raise format error?
return None
# 'unpack' 4 binary bytes into float
return struct.unpack('f', value)[0]
def writeFrame(self, frameData, time=0.0):
# frame format;
# 4-bytes binary integer indicating the size of the (binary) frame data
# 4-byte binary float indicating timestamp (in seconds) of the frame
# followed by the binary frame data
# [next frame]
# write 4-byte binary integer; size of the frame data
self.write_file.write(struct.pack('i', len(frameData)))
# write 4-byte binary float; timestamp in seconds
self.write_file.write(struct.pack('f', time))
# write binary frame data
self.write_file.write(frameData)
|
{
"content_hash": "524be6cb4aa9cc97211370d33f74ea32",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 97,
"avg_line_length": 29.643410852713178,
"alnum_prop": 0.5732217573221757,
"repo_name": "markkorput/PyMoCap",
"id": "68f9d8a8fccf4f1ba5d43b95c1782a06f5a69b3d",
"size": "3824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymocap/natnet_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "156397"
}
],
"symlink_target": ""
}
|
import os
from flask import Flask, url_for, jsonify, g, request
from flask_login import LoginManager
from flask.ext.openid import OpenID
from openid.extensions import pape
from passlib.context import CryptContext
from .settings import WmtSettings
from .core import db
from .blueprints import register_blueprints
from .errors import ERROR_HANDLERS
class User(object):
def __init__(self, id):
self._id = id
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self._id
def create_app(settings_override=None, register_security_blueprint=True,
wmt_root_path=None):
app = Flask(__name__, instance_relative_config=True,
template_folder='/data/web/htdocs/wmt/api/rest/bin/templates')
#oid = OpenID(app, safe_roots=[], extension_responses=[pape.Response])
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(userid):
return User(userid)
@app.before_first_request
def create_database():
db.create_all()
app.config.from_object(WmtSettings(wmt_root_path or app.root_path))
app.config.from_pyfile('settings.cfg', silent=True)
app.config.from_object(settings_override)
app.config['pw'] = CryptContext.from_string(
app.config['CRYPT_INI_CONTENTS'], section='passlib')
import logging
logging.basicConfig()
app.config['log'] = logging.getLogger('wmtserver')
db.init_app(app)
@app.route('/')
def site_map():
COLLECTIONS = ['users', 'names', 'components', 'models', 'tags',
'sims', 'parameters', 'files']
map = {"@type": "api", "href": url_for('.site_map')}
links = []
for rel in COLLECTIONS:
href = url_for('.'.join([rel, 'show']))
links.append({'rel': rel, 'href': href})
map['links'] = links
return jsonify(map)
register_blueprints(app, __name__, __path__)
for error, func in ERROR_HANDLERS:
app.errorhandler(error)(func)
return app
|
{
"content_hash": "ad08560e3862a627de651ac5aaf52274",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 26.814814814814813,
"alnum_prop": 0.6321362799263351,
"repo_name": "mcflugen/wmt-rest",
"id": "56d576696c481acd8aeec8519ebb8bb8afba3dcb",
"size": "2172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wmt/flask/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "169966"
},
{
"name": "Shell",
"bytes": "24"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.