text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import tempfile
import os
import shutil
from biicode.client.store import hivedb
from unittest import TestCase
from biicode.common.test.conf import BII_TEST_FOLDER
from nose.plugins.attrib import attr
from biicode.client.store.migration_store import MigrationStore
from biicode.common.test.migration.migration_utils import TMigration1, TMigration2
@attr('integration')
class MigrationStoreTest(TestCase):
_suites = ['client']
_multiprocess_shared_ = True
def setUp(self):
self.hiveFolder = tempfile.mkdtemp(suffix='biicode', dir=BII_TEST_FOLDER)
self.hivedb = hivedb.factory(os.path.join(self.hiveFolder, "mytestdb.db"))
self.db = MigrationStore(self.hivedb)
def tearDown(self):
if os.path.isdir(self.hiveFolder):
self.hivedb.disconnect()
try: # Avoid windows crashes
shutil.rmtree(self.hiveFolder)
except Exception:
pass
def test_read_and_write_migrations(self):
mig1 = TMigration1()
self.db.store_executed_migration(mig1)
self.assertEquals(self.db.read_last_migrated(), mig1)
mig2 = TMigration2()
self.db.store_executed_migration(mig2)
self.assertEquals(self.db.read_last_migrated(), mig2)
|
{
"content_hash": "70a34ca538bf4aa4e6f55c5c25689c58",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 82,
"avg_line_length": 33.3421052631579,
"alnum_prop": 0.6898184688239937,
"repo_name": "bowlofstew/client",
"id": "65fd6f92c9a43de73edf3983344b38d04428c490",
"size": "1267",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "test/store/migration_store_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "622"
},
{
"name": "CMake",
"bytes": "90594"
},
{
"name": "Python",
"bytes": "367469"
},
{
"name": "Shell",
"bytes": "738"
}
],
"symlink_target": ""
}
|
from . api import api
from . root import root
|
{
"content_hash": "1ab654f9c7e342965f7dec5670260525",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 23,
"avg_line_length": 23,
"alnum_prop": 0.7391304347826086,
"repo_name": "jjangsangy/ExplainToMe",
"id": "904b7f58142f6eddddaf925dbff7b62a92fb1707",
"size": "46",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ExplainToMe/views/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9228"
},
{
"name": "Dockerfile",
"bytes": "1211"
},
{
"name": "HTML",
"bytes": "12056"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "13937"
}
],
"symlink_target": ""
}
|
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.21
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SubscriptionResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, id_product=None, duration=None, download=None, trial_duration=None, trial_percent=None, trial_sub=None, limit=None, limit_duration=None, alert_type=None, alert_qty=None, active=None, recurring_payment=None):
"""
SubscriptionResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'id_product': 'int',
'duration': 'int',
'download': 'bool',
'trial_duration': 'int',
'trial_percent': 'int',
'trial_sub': 'int',
'limit': 'bool',
'limit_duration': 'int',
'alert_type': 'str',
'alert_qty': 'int',
'active': 'bool',
'recurring_payment': 'int'
}
self.attribute_map = {
'id': 'id',
'id_product': 'id_product',
'duration': 'duration',
'download': 'download',
'trial_duration': 'trial_duration',
'trial_percent': 'trial_percent',
'trial_sub': 'trial_sub',
'limit': 'limit',
'limit_duration': 'limit_duration',
'alert_type': 'alert_type',
'alert_qty': 'alert_qty',
'active': 'active',
'recurring_payment': 'recurring_payment'
}
self._id = id
self._id_product = id_product
self._duration = duration
self._download = download
self._trial_duration = trial_duration
self._trial_percent = trial_percent
self._trial_sub = trial_sub
self._limit = limit
self._limit_duration = limit_duration
self._alert_type = alert_type
self._alert_qty = alert_qty
self._active = active
self._recurring_payment = recurring_payment
@property
def id(self):
"""
Gets the id of this SubscriptionResponse.
:return: The id of this SubscriptionResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this SubscriptionResponse.
:param id: The id of this SubscriptionResponse.
:type: int
"""
self._id = id
@property
def id_product(self):
"""
Gets the id_product of this SubscriptionResponse.
:return: The id_product of this SubscriptionResponse.
:rtype: int
"""
return self._id_product
@id_product.setter
def id_product(self, id_product):
"""
Sets the id_product of this SubscriptionResponse.
:param id_product: The id_product of this SubscriptionResponse.
:type: int
"""
self._id_product = id_product
@property
def duration(self):
"""
Gets the duration of this SubscriptionResponse.
:return: The duration of this SubscriptionResponse.
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration):
"""
Sets the duration of this SubscriptionResponse.
:param duration: The duration of this SubscriptionResponse.
:type: int
"""
self._duration = duration
@property
def download(self):
"""
Gets the download of this SubscriptionResponse.
:return: The download of this SubscriptionResponse.
:rtype: bool
"""
return self._download
@download.setter
def download(self, download):
"""
Sets the download of this SubscriptionResponse.
:param download: The download of this SubscriptionResponse.
:type: bool
"""
self._download = download
@property
def trial_duration(self):
"""
Gets the trial_duration of this SubscriptionResponse.
:return: The trial_duration of this SubscriptionResponse.
:rtype: int
"""
return self._trial_duration
@trial_duration.setter
def trial_duration(self, trial_duration):
"""
Sets the trial_duration of this SubscriptionResponse.
:param trial_duration: The trial_duration of this SubscriptionResponse.
:type: int
"""
self._trial_duration = trial_duration
@property
def trial_percent(self):
"""
Gets the trial_percent of this SubscriptionResponse.
:return: The trial_percent of this SubscriptionResponse.
:rtype: int
"""
return self._trial_percent
@trial_percent.setter
def trial_percent(self, trial_percent):
"""
Sets the trial_percent of this SubscriptionResponse.
:param trial_percent: The trial_percent of this SubscriptionResponse.
:type: int
"""
self._trial_percent = trial_percent
@property
def trial_sub(self):
"""
Gets the trial_sub of this SubscriptionResponse.
:return: The trial_sub of this SubscriptionResponse.
:rtype: int
"""
return self._trial_sub
@trial_sub.setter
def trial_sub(self, trial_sub):
"""
Sets the trial_sub of this SubscriptionResponse.
:param trial_sub: The trial_sub of this SubscriptionResponse.
:type: int
"""
self._trial_sub = trial_sub
@property
def limit(self):
"""
Gets the limit of this SubscriptionResponse.
:return: The limit of this SubscriptionResponse.
:rtype: bool
"""
return self._limit
@limit.setter
def limit(self, limit):
"""
Sets the limit of this SubscriptionResponse.
:param limit: The limit of this SubscriptionResponse.
:type: bool
"""
self._limit = limit
@property
def limit_duration(self):
"""
Gets the limit_duration of this SubscriptionResponse.
:return: The limit_duration of this SubscriptionResponse.
:rtype: int
"""
return self._limit_duration
@limit_duration.setter
def limit_duration(self, limit_duration):
"""
Sets the limit_duration of this SubscriptionResponse.
:param limit_duration: The limit_duration of this SubscriptionResponse.
:type: int
"""
self._limit_duration = limit_duration
@property
def alert_type(self):
"""
Gets the alert_type of this SubscriptionResponse.
:return: The alert_type of this SubscriptionResponse.
:rtype: str
"""
return self._alert_type
@alert_type.setter
def alert_type(self, alert_type):
"""
Sets the alert_type of this SubscriptionResponse.
:param alert_type: The alert_type of this SubscriptionResponse.
:type: str
"""
self._alert_type = alert_type
@property
def alert_qty(self):
"""
Gets the alert_qty of this SubscriptionResponse.
:return: The alert_qty of this SubscriptionResponse.
:rtype: int
"""
return self._alert_qty
@alert_qty.setter
def alert_qty(self, alert_qty):
"""
Sets the alert_qty of this SubscriptionResponse.
:param alert_qty: The alert_qty of this SubscriptionResponse.
:type: int
"""
self._alert_qty = alert_qty
@property
def active(self):
"""
Gets the active of this SubscriptionResponse.
:return: The active of this SubscriptionResponse.
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""
Sets the active of this SubscriptionResponse.
:param active: The active of this SubscriptionResponse.
:type: bool
"""
self._active = active
@property
def recurring_payment(self):
"""
Gets the recurring_payment of this SubscriptionResponse.
:return: The recurring_payment of this SubscriptionResponse.
:rtype: int
"""
return self._recurring_payment
@recurring_payment.setter
def recurring_payment(self, recurring_payment):
"""
Sets the recurring_payment of this SubscriptionResponse.
:param recurring_payment: The recurring_payment of this SubscriptionResponse.
:type: int
"""
self._recurring_payment = recurring_payment
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "bca2368862cd1c1f22aea357f91f2132",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 239,
"avg_line_length": 26.349246231155778,
"alnum_prop": 0.5604081243444264,
"repo_name": "kinow-io/kinow-python-sdk",
"id": "880f65f3bdc3500af74ef9a5d3fa76a852d7be62",
"size": "10504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kinow_client/models/subscription_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4659182"
},
{
"name": "Shell",
"bytes": "1666"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.format import format
def prestart(env, iop_component):
import params
conf_select.select(params.stack_name, iop_component, params.version)
stack_select.select(iop_component, params.version)
|
{
"content_hash": "52f8e78c675ef68cf9f01f86d03c8df7",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 42.785714285714285,
"alnum_prop": 0.8080133555926544,
"repo_name": "alexryndin/ambari",
"id": "098b5cf2480e3be8d073da17e0bb443dad320ed5",
"size": "1221",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-adh-1.5",
"path": "ambari-server/src/main/resources/stacks/BigInsights/4.2/services/RANGER/package/scripts/upgrade.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "44884"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "786184"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "89958"
},
{
"name": "HTML",
"bytes": "2514774"
},
{
"name": "Java",
"bytes": "29565801"
},
{
"name": "JavaScript",
"bytes": "19033151"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "316489"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "17215686"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "33764"
},
{
"name": "SQLPL",
"bytes": "4277"
},
{
"name": "Shell",
"bytes": "886011"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timedelta
from django.contrib.admin.models import User
from django.conf import settings
from challenges.models import (ExternalLink, Submission, Phase, Challenge,
Category, Project, SubmissionParent)
__all__ = ['challenge_setup', 'challenge_teardown', 'create_submissions',
'create_users']
def challenge_setup():
"""Set up some sample data to test with.
This is a bit clearer and hopefully more flexible than using fixtures.
"""
challenge_teardown() # In case other tests didn't clean up
p = Project()
p.name = 'My Project'
p.slug = getattr(settings, 'IGNITE_PROJECT_SLUG', 'my-project')
p.description = 'My super awesome project of awesomeness.'
p.long_description = 'Did I mention how awesome it was?'
p.allow_participation = True
p.save()
c = Challenge()
c.project = p
c.title, 'My Challenge'
c.slug = getattr(settings, 'IGNITE_CHALLENGE_SLUG', 'my-challenge')
c.summary = 'Are you up to it?'
c.description = 'This is a challenge of supreme challengingness.'
c.end_date = datetime.utcnow() + timedelta(days=365)
c.save()
ph = Phase()
ph.challenge = c
ph.name = 'Ideation'
ph.order = 1
ph.save()
cat = Category()
cat.name = 'Beer'
cat.slug = 'beer'
cat.save()
def challenge_teardown():
"""Tear down any data created by these tests."""
for model in [ExternalLink, Submission, Phase, Challenge, Category, Project, User]:
model.objects.all().delete()
def create_submissions(count, phase=None, creator=None):
"""Create a number of fake submissions. Return their titles.
If a phase is not supplied, assume only one phase exists.
If a creator is not supplied, try to get a single user's profile, or create
a dummy user.
"""
if phase is None:
phase = Phase.objects.get()
if creator is None:
try:
user = User.objects.get()
except User.DoesNotExist:
user = User.objects.create_user('bob', 'bob@example.com', 'bob')
creator = user.get_profile()
category = Category.objects.all()[0]
titles = ['Submission %d' % i for i in range(1, count + 1)]
for title in titles:
foo = Submission.objects.create(title=title,
brief_description='A submission',
description='A really good submission',
phase=phase,
created_by=creator,
category=category)
# Make sure this submission has a parent
SubmissionParent.objects.create(submission=foo)
return titles
def create_users():
profile_list = []
for name in ['alex', 'bob', 'charlie']:
user = User.objects.create_user(name, '%s@example.com' % name,
password=name)
# Give the user a display name to stop 'complete your profile' redirect
profile = user.get_profile()
profile.name = '%(name)s %(name)sson' % {'name': name.capitalize()}
profile.save()
profile_list.append(profile)
return profile_list
|
{
"content_hash": "fc71885ebf3f41116bafed619c86798e",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 87,
"avg_line_length": 33.05050505050505,
"alnum_prop": 0.5974938875305623,
"repo_name": "mozilla/mozilla-ignite",
"id": "ab10d64d3ea31497d9edf3fdce14cce49a049a69",
"size": "3272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/challenges/tests/fixtures/project_fixtures.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "230222"
},
{
"name": "JavaScript",
"bytes": "457971"
},
{
"name": "Puppet",
"bytes": "11448"
},
{
"name": "Python",
"bytes": "4064774"
},
{
"name": "SQL",
"bytes": "71"
},
{
"name": "Shell",
"bytes": "1462"
},
{
"name": "TeX",
"bytes": "19491"
}
],
"symlink_target": ""
}
|
from pathlib import Path
import pytest
from django.conf import settings
from django.urls import reverse
from rdmo.core.constants import (VALUE_TYPE_CHOICES, VALUE_TYPE_FILE,
VALUE_TYPE_TEXT)
from ..models import Value
users = (
('owner', 'owner'),
('manager', 'manager'),
('author', 'author'),
('guest', 'guest'),
('api', 'api'),
('user', 'user'),
('site', 'site'),
('anonymous', None),
)
view_value_permission_map = {
'owner': [1, 2, 3, 4, 5],
'manager': [1, 3, 5],
'author': [1, 3, 5],
'guest': [1, 3, 5],
'api': [1, 2, 3, 4, 5],
'site': [1, 2, 3, 4, 5]
}
add_value_permission_map = change_value_permission_map = delete_value_permission_map = {
'owner': [1, 2, 3, 4, 5],
'manager': [1, 3, 5],
'author': [1, 3, 5],
'api': [1, 2, 3, 4, 5],
'site': [1, 2, 3, 4, 5]
}
urlnames = {
'list': 'v1-projects:project-value-list',
'detail': 'v1-projects:project-value-detail',
'file': 'v1-projects:project-value-file'
}
projects = [1, 2, 3, 4, 5]
values = [1, 2, 3, 4, 5, 6, 7, 238, 242, 247, 248, 249]
attribute_id = 1
option_id = 1
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_list(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
response = client.get(url)
if project_id in view_value_permission_map.get(username, []):
assert response.status_code == 200
assert isinstance(response.json(), list)
if username == 'user':
assert sorted([item['id'] for item in response.json()]) == []
else:
values_list = Value.objects.filter(project_id=project_id) \
.filter(snapshot_id=None) \
.order_by('id').values_list('id', flat=True)
assert sorted([item['id'] for item in response.json()]) == list(values_list)
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('value_id', values)
def test_detail(db, client, username, password, project_id, value_id):
client.login(username=username, password=password)
value = Value.objects.filter(project_id=project_id, id=value_id).filter()
url = reverse(urlnames['detail'], args=[project_id, value_id])
response = client.get(url)
if value and project_id in view_value_permission_map.get(username, []):
assert response.status_code == 200
assert isinstance(response.json(), dict)
assert response.json().get('id') == value_id
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('value_type,value_type_label', VALUE_TYPE_CHOICES)
def test_create_text(db, client, username, password, project_id, value_type, value_type_label):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
data = {
'attribute': attribute_id,
'set_index': 0,
'collection_index': 0,
'text': 'Lorem ipsum',
'value_type': value_type,
'unit': ''
}
response = client.post(url, data)
if project_id in add_value_permission_map.get(username, []):
assert response.status_code == 201
assert isinstance(response.json(), dict)
assert response.json().get('id') in Value.objects.filter(project_id=project_id).values_list('id', flat=True)
elif project_id in view_value_permission_map.get(username, []):
assert response.status_code == 403
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('value_type,value_type_label', VALUE_TYPE_CHOICES)
def test_create_option(db, client, username, password, project_id, value_type, value_type_label):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
data = {
'attribute': attribute_id,
'set_index': 0,
'collection_index': 0,
'option': option_id,
'value_type': value_type,
'unit': ''
}
response = client.post(url, data)
if project_id in add_value_permission_map.get(username, []):
assert response.status_code == 201
assert isinstance(response.json(), dict)
assert response.json().get('id') in Value.objects.filter(project_id=project_id).values_list('id', flat=True)
elif project_id in view_value_permission_map.get(username, []):
assert response.status_code == 403
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('value_type,value_type_label', VALUE_TYPE_CHOICES)
def test_create_external(db, client, username, password, project_id, value_type, value_type_label):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
data = {
'attribute': attribute_id,
'set_index': 0,
'collection_index': 0,
'text': 'Lorem ipsum',
'external_id': '1',
'value_type': value_type,
'unit': ''
}
response = client.post(url, data)
if project_id in add_value_permission_map.get(username, []):
assert response.status_code == 201
assert isinstance(response.json(), dict)
assert response.json().get('id') in Value.objects.filter(project_id=project_id).values_list('id', flat=True)
elif project_id in view_value_permission_map.get(username, []):
assert response.status_code == 403
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('value_id', values)
def test_update(db, client, username, password, project_id, value_id):
client.login(username=username, password=password)
value = Value.objects.filter(project_id=project_id, id=value_id).first()
url = reverse(urlnames['detail'], args=[project_id, value_id])
data = {
'attribute': attribute_id,
'set_index': 0,
'collection_index': 0,
'text': 'Lorem ipsum',
'value_type': VALUE_TYPE_TEXT,
'unit': ''
}
response = client.put(url, data, content_type='application/json')
if value and project_id in change_value_permission_map.get(username, []):
assert response.status_code == 200
assert isinstance(response.json(), dict)
assert response.json().get('id') in Value.objects.filter(project_id=project_id).values_list('id', flat=True)
elif value and project_id in view_value_permission_map.get(username, []):
assert response.status_code == 403
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('value_id', values)
def test_delete(db, client, username, password, project_id, value_id):
client.login(username=username, password=password)
value = Value.objects.filter(project_id=project_id, id=value_id).first()
url = reverse(urlnames['detail'], args=[project_id, value_id])
response = client.delete(url)
if value and project_id in delete_value_permission_map.get(username, []):
assert response.status_code == 204
assert not Value.objects.filter(pk=value_id).exists()
elif value and project_id in view_value_permission_map.get(username, []):
assert response.status_code == 403
assert Value.objects.filter(pk=value_id).exists()
else:
assert response.status_code == 404
assert Value.objects.filter(pk=value_id).exists()
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('value_id', values)
def test_file_get(db, client, files, username, password, project_id, value_id):
client.login(username=username, password=password)
value = Value.objects.filter(project_id=project_id, id=value_id).first()
url = reverse(urlnames['file'], args=[project_id, value_id])
response = client.get(url)
if value and value.value_type == VALUE_TYPE_FILE and project_id in view_value_permission_map.get(username, []):
assert response.status_code == 200
assert response['Content-Type'] == value.file_type
assert response['Content-Disposition'] == 'attachment; filename={}'.format(value.file_name)
assert response.content == value.file.read()
else:
assert response.status_code == 404
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('value_id', values)
def test_file_put(db, client, files, username, password, project_id, value_id):
client.login(username=username, password=password)
value = Value.objects.filter(project_id=project_id, id=value_id).first()
url = reverse(urlnames['file'], args=[project_id, value_id])
file_path = Path(settings.MEDIA_ROOT) / 'test_file.txt'
with file_path.open() as fp:
response = client.post(url, {'name': 'test_file.txt', 'file': fp})
if value and project_id in change_value_permission_map.get(username, []):
assert response.status_code == 200
assert response.json().get('file_name') == 'test_file.txt'
elif value and project_id in view_value_permission_map.get(username, []):
assert response.status_code == 403
else:
assert response.status_code == 404
|
{
"content_hash": "a2babc92f6dece336140073f4edc542a",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 116,
"avg_line_length": 37.70454545454545,
"alnum_prop": 0.6483825597749648,
"repo_name": "DMPwerkzeug/DMPwerkzeug",
"id": "9a4ca791603042c3a905bb08693f9d37d871e028",
"size": "9954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdmo/projects/tests/test_viewset_project_value.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9735"
},
{
"name": "HTML",
"bytes": "126570"
},
{
"name": "JavaScript",
"bytes": "46177"
},
{
"name": "Python",
"bytes": "120676"
}
],
"symlink_target": ""
}
|
from typing import Sequence
from pyramids import categorization
from pyramids.categorization import Category
from pyramids.rules.subtree_match import SubtreeMatchRule
# all_terms(),
class AllTermsMatchRule(SubtreeMatchRule):
def __str__(self) -> str:
return str(categorization.Category('all_terms', self._positive_properties,
self._negative_properties))
def __call__(self, category_list: Sequence[Category], head_index: int) -> bool:
for index in range(len(category_list)):
if index == head_index:
continue
if (not (self._positive_properties <= category_list[index].positive_properties) or
(self._negative_properties & category_list[index].positive_properties)):
return False
return True
|
{
"content_hash": "ed6cd7c3cf2ab30dfdd9ddafa59c0eb7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 94,
"avg_line_length": 38.36363636363637,
"alnum_prop": 0.6398104265402843,
"repo_name": "hosford42/pyramids",
"id": "51815a9a43591061b0fb6a82ead2f3397cf46a20",
"size": "844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyramids/rules/all_terms_match.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "266486"
}
],
"symlink_target": ""
}
|
from neutron.api.v2 import attributes
from neutron.db import db_base_plugin_v2
from neutron.extensions import l3
from neutron.openstack.common import timeutils
def _uos_extend_timestamp(res, db):
res['created_at'] = timeutils.strtime(db['created_at'])
def _uos_extend_floatingip_dict_binding(core_plugin, res, db):
_uos_extend_timestamp(res, db)
def _uos_extend_router_dict_binding(core_plugin, res, db):
_uos_extend_timestamp(res, db)
def _uos_extend_network_dict_binding(core_plugin, res, db):
_uos_extend_timestamp(res, db)
def _uos_extend_port_dict_binding(core_plugin, res, db):
_uos_extend_timestamp(res, db)
def _uos_extend_subnet_dict_binding(core_plugin, res, db):
_uos_extend_timestamp(res, db)
def _uos_extend_sg_dict_binding(core_plugin, res, db):
_uos_extend_timestamp(res, db)
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.FLOATINGIPS, [_uos_extend_floatingip_dict_binding])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.ROUTERS, [_uos_extend_router_dict_binding])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, [_uos_extend_network_dict_binding])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, [_uos_extend_port_dict_binding])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.SUBNETS, [_uos_extend_subnet_dict_binding])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
'security_groups', [_uos_extend_sg_dict_binding])
|
{
"content_hash": "898e5a1c70846b44e12441e1aaab613d",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 63,
"avg_line_length": 28,
"alnum_prop": 0.7436224489795918,
"repo_name": "CingHu/neutron-ustack",
"id": "97a8667d885eac45f0cd4a4a096228b44fc247b2",
"size": "2291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/db/uos_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1109"
},
{
"name": "Python",
"bytes": "11544804"
},
{
"name": "Shell",
"bytes": "29485"
}
],
"symlink_target": ""
}
|
from c7n_azure.resources.arm import ArmResourceManager
from c7n_azure.provider import resources
@resources.register('disk')
class Disk(ArmResourceManager):
"""Disk Resource
:example:
This policy will find all data disks that are not being managed by a VM.
.. code-block:: yaml
policies:
- name: orphaned-disk
resource: azure.disk
filters:
- type: value
key: managedBy
value: null
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Storage']
service = 'azure.mgmt.compute'
client = 'ComputeManagementClient'
enum_spec = ('disks', 'list', None)
default_report_fields = (
'name',
'location',
'resourceGroup',
'properties.diskState',
'sku.name'
)
resource_type = 'Microsoft.Compute/disks'
|
{
"content_hash": "18640e6d49b42a3592de6fa3802c341a",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 76,
"avg_line_length": 24.86842105263158,
"alnum_prop": 0.5682539682539682,
"repo_name": "alfredgamulo/cloud-custodian",
"id": "6caa115fdac9c6f5c484615fa6742d379134d62d",
"size": "1025",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/c7n_azure/resources/disk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2126"
},
{
"name": "Go",
"bytes": "146637"
},
{
"name": "HCL",
"bytes": "33977"
},
{
"name": "Jinja",
"bytes": "19775"
},
{
"name": "Makefile",
"bytes": "14242"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "6579430"
},
{
"name": "Shell",
"bytes": "15323"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
}
|
import setuptools
from distutils.core import setup
setup(
name='vttools',
version='0.0.x',
author='Brookhaven National Lab',
packages=["vttools",
'vttools.vtmods',
'vttools.vtmods.import_lists',
'vttools.to_wrap',
'vttools.tests'
],
package_data={'vttools.vtmods.import_lists': ['*.yaml']}
)
|
{
"content_hash": "ff57ffa90b4e25f7666eeae989ea522d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 60,
"avg_line_length": 25.8,
"alnum_prop": 0.5503875968992248,
"repo_name": "Nikea/VTTools",
"id": "07498a9ef2283db6e2cdc51981ee92eada7b25de",
"size": "388",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "146428"
},
{
"name": "Shell",
"bytes": "204"
}
],
"symlink_target": ""
}
|
import tamil
from tamil.txt2unicode import *
from transliterate import ISO, algorithm
def unicode_converter(tsci, cod):
out = ""
if cod == "t2u":
out = tamil.tscii.convert_to_unicode(tsci)
elif cod == "u2t":
temp = str(tsci)
out = unicode2tscii(temp)
elif cod == "an2u":
temp = str(tsci)
out = encode2unicode.anjal2unicode(temp)
elif cod == "bam2u":
temp = str(tsci)
out = encode2unicode.bamini2unicode(temp)
elif cod == "boom2u":
temp = str(tsci)
out = encode2unicode.boomi2unicode(temp)
elif cod == "karan2u":
temp = str(tsci)
out = encode2unicode.dinakaran2unicode(temp)
elif cod == "thanthy2u":
temp = str(tsci)
out = encode2unicode.dinathanthy2unicode(temp)
elif cod == "kavi2u":
temp = str(tsci)
out = encode2unicode.kavipriya2unicode(temp)
elif cod == "mura2u":
temp = str(tsci)
out = encode2unicode.murasoli2unicode(temp)
elif cod == "mylai2u":
temp = str(tsci)
out = encode2unicode.mylai2unicode(temp)
elif cod == "nakk2u":
temp = str(tsci)
out = encode2unicode.nakkeeran2unicode(temp)
elif cod == "roman2u":
temp = str(tsci)
out = encode2unicode.roman2unicode(temp)
elif cod == "tab2u":
temp = str(tsci)
out = encode2unicode.tab2unicode(temp)
elif cod == "tam2u":
temp = str(tsci)
out = encode2unicode.tam2unicode(temp)
elif cod == "indoweb2u":
temp = str(tsci)
out = encode2unicode.indoweb2unicode(temp)
elif cod == "koeln2u":
temp = str(tsci)
out = encode2unicode.koeln2unicode(temp)
elif cod == "libi2u":
temp = str(tsci)
out = encode2unicode.libi2unicode(temp)
elif cod == "oldvikatan2u":
temp = str(tsci)
out = encode2unicode.oldvikatan2unicode(temp)
elif cod == "webulagam2u":
temp = str(tsci)
out = encode2unicode.webulagam2unicode(temp)
elif cod == "auto2u":
temp = str(tsci)
result = []
out = encode2unicode.auto2unicode(temp, result)
out += "<BR/><B>Encoding={0}</B><BR/>".format(result)
elif cod == "dinamani2u":
temp = str(tsci)
out = encode2unicode.dinamani2unicode(temp)
elif cod == "pallavar2u":
temp = str(tsci)
out = encode2unicode.pallavar2unicode(temp)
elif cod == "diacritic2u":
temp = str(tsci)
out = encode2unicode.diacritic2unicode(temp)
elif cod == "shreelipi2u":
temp = str(tsci)
out = encode2unicode.shreelipi2unicode(temp)
elif cod == "softview2u":
temp = str(tsci)
out = encode2unicode.softview2unicode(temp)
elif cod == "tace2u":
temp = str(tsci)
out = encode2unicode.tace2unicode(temp)
elif cod == "vanavil2u":
temp = str(tsci)
out = encode2unicode.vanavil2unicode(temp)
elif cod == "indica2u":
temp = str(tsci)
out = encode2unicode.indica2unicode(temp)
elif cod == "anu2u":
temp = str(tsci)
out = encode2unicode.anu2unicode(temp)
elif cod == "shreelipiavid2u":
temp = str(tsci)
out = encode2unicode.shreelipiavid2unicode(temp)
elif cod == "unicode2anjal":
temp = str(tsci)
out = unicode2encode.unicode2anjal(temp)
elif cod == "unicode2bamini":
temp = str(tsci)
out = unicode2encode.unicode2bamini(temp)
elif cod == "unicode2boomi":
temp = str(tsci)
out = unicode2encode.unicode2boomi(temp)
elif cod == "unicode2dinakaran":
temp = str(tsci)
out = unicode2encode.unicode2dinakaran(temp)
elif cod == "unicode2dinathanthy":
temp = str(tsci)
out = unicode2encode.unicode2dinathanthy(temp)
elif cod == "unicode2kavipriya":
temp = str(tsci)
out = unicode2encode.unicode2kavipriya(temp)
elif cod == "unicode2murasoli":
temp = str(tsci)
out = unicode2encode.unicode2murasoli(temp)
elif cod == "unicode2mylai":
temp = str(tsci)
out = unicode2encode.unicode2mylai(temp)
elif cod == "unicode2nakkeeran":
temp = str(tsci)
out = unicode2encode.unicode2nakkeeran(temp)
elif cod == "unicode2roman":
temp = str(tsci)
out = unicode2encode.unicode2roman(temp)
elif cod == "unicode2tab":
temp = str(tsci)
out = unicode2encode.unicode2tab(temp)
elif cod == "unicode2tam":
temp = str(tsci)
out = encode2unicode.unicode2tam(temp)
elif cod == "unicode2indoweb":
temp = str(tsci)
out = unicode2encode.unicode2indoweb(temp)
elif cod == "unicode2koeln":
temp = str(tsci)
out = unicode2encode.unicode2koeln(temp)
elif cod == "unicode2libi":
temp = str(tsci)
out = unicode2encode.unicode2libi(temp)
elif cod == "unicode2oldvikatan":
temp = str(tsci)
out = unicode2encode.unicode2oldvikatan(temp)
elif cod == "unicode2webulagam":
temp = str(tsci)
out = unicode2encode.unicode2webulagam(temp)
elif cod == "unicode2dinamani":
temp = str(tsci)
out = unicode2encode.unicode2dinamani(temp)
elif cod == "unicode2pallavar":
temp = str(tsci)
out = unicode2encode.unicode2pallavar(temp)
elif cod == "unicode2diacritic":
temp = str(tsci)
out = unicode2encode.unicode2diacritic(temp)
elif cod == "unicode2shreelipi":
temp = str(tsci)
out = unicode2encode.unicode2shreelipi(temp)
elif cod == "unicode2softview":
temp = str(tsci)
out = unicode2encode.unicode2softview(temp)
elif cod == "unicode2tace":
temp = str(tsci)
out = unicode2encode.unicode2tace(temp)
elif cod == "unicode2vanavil":
temp = str(tsci)
out = unicode2encode.unicode2vanavil(temp)
elif cod == "unicode2indica":
temp = str(tsci)
out = unicode2encode.unicode2indica(temp)
elif cod == "unicode2anu":
temp = str(tsci)
out = unicode2encode.unicode2anu(temp)
elif cod == "unicode2shreelipiavid":
temp = str(tsci)
out = unicode2encode.unicode2shreelipiavid(temp)
elif cod == "ISO2unicode":
temp = str(tsci)
ISO_table = ISO.Transliteration.table
top, result = algorithm.Greedy.transliterate(ISO_table, temp)
# print(result.options)
out = list(result.options)[0]
# TODO/alternative option.
# out = algorithm.Direct.transliterate(ISO_table,temp)
# join uyir mei.
elif cod == "unicode2ISO":
temp = str(tsci)
ISO_table = ISO.ReverseTransliteration.table
out = algorithm.Direct.transliterate(ISO_table, temp)
data = {"result": out}
return data
|
{
"content_hash": "b9a249dbe2b912fd5b9b34a849c8672d",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 69,
"avg_line_length": 35.34020618556701,
"alnum_prop": 0.6080805134189031,
"repo_name": "arcturusannamalai/open-tamil",
"id": "4947e8e5ef5c99c45866e130c229f344985d7c78",
"size": "6856",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "webapp/opentamilapp/webuni.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "14505"
},
{
"name": "HTML",
"bytes": "5869"
},
{
"name": "Java",
"bytes": "35038"
},
{
"name": "JavaScript",
"bytes": "9250"
},
{
"name": "Makefile",
"bytes": "345"
},
{
"name": "Python",
"bytes": "603879"
},
{
"name": "Ruby",
"bytes": "26442"
},
{
"name": "Shell",
"bytes": "3686"
}
],
"symlink_target": ""
}
|
import pika
import logging
import locale
import json
class RabbitClient(object):
'''
RabbitClient is the base level class that handles rabbit exceptions, connections and logging. Subsequent
consumer/publishers will sublcass RabbitClient and implement the necessary functions.
'''
_LOG_FORMAT = ('%(asctime)s %(name) -30s %(funcName) -35s %(lineno) -5d: %(levelname) -10s %(message)s')
_logger=None
_config=None
_connection=None
_destinationQ='testq'
_exchange=None
_channel=None
_routingkey=''
_exchangeName=''
_exchangeType=''
def __init__(self, configfile=None):
if (configfile == None):
configfile = './defaultConfig.json'
locale.setlocale(locale.LC_ALL, 'en_US')
logger = logging.getLogger(self.__class__.__name__)
#load config file
try:
with open(configfile) as conffile:
self._config = json.load(conffile)
except IOError as e:
raise IOError("Error::" + str(self.__class__.__name__) + " unable to load config file: " + file + " with error : " + str(e.message))
self._destinationQ=self._config['queue']
self._exchangeName=self._config['exchange_name']
self._exchangeType=self._config['exchange_type']
self._routingkey = self._config['routing_key']
# setup logger
logfile = logging.FileHandler(self._config.get('logfile'))
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
logfile.setFormatter(formatter)
logger.addHandler(logfile)
logger.setLevel(self._config['loglevel'])
self._logger = logger
self._logger.info(str(self.__class__.__name__) + " Class initialized with configuration : ")
self._logger.info(str(self._config))
# we'll do connection instantiation somewhere else.
def connect(self, onOpenCallback=None, onCloseCallback=None, closeIOLoopOnDisconnect=True):
'''
The assumption that each client has but one connection
'''
if (onOpenCallback == None):
onOpenCallback = self._defaultOpenCallback
if (onCloseCallback == None):
onCloseCallback = self._defaultCloseCallback
self._logger.info("Connecting to %s", self._config['broker_url'])
self._connection=pika.SelectConnection(pika.URLParameters(str(self._config['broker_url'])),on_open_callback=onOpenCallback,
on_open_error_callback=self._connectionErrorHandler,
on_close_callback=onCloseCallback, stop_ioloop_on_close=closeIOLoopOnDisconnect)
def close(self):
self._logger.info("Closing connection %s", self._config['q_host'])
self._connection.close()
def _defaultOpenCallback(self, connection):
self._logger.info("RabbitPublisher::Connection Opened")
self._channel = self._connection.channel(on_open_callback=self.on_open_channel_callback)
def on_open_channel_callback(self, channel):
self._logger.info("RabbitPublisher::Channel Opened!")
#declare exchange
self._channel.exchange_declare(callback=self.on_exchange_declareok, exchange=self._exchangeName, exchange_type=self._exchangeType, durable=True)
def on_exchange_declareok(self,frame):
self._logger.info("Exchange %s Declared",self._exchangeName)
self._channel.queue_declare(self.on_queue_declareok, self._destinationQ, durable=True)
def on_queue_declareok(self, frame):
self._logger.info("Queue %s declared",self._destinationQ)
self._channel.queue_bind(self.on_bindok, self._destinationQ, self._exchangeName, self._routingkey)
def on_bindok(self, bind):
self._logger.info("Queue %s is bound on %s.", self._destinationQ, self._exchangeName)
self.run_action()
def _defaultCloseCallback(self):
self._logger.info("Connection Closed!")
def _connectionErrorHandler(self, error):
self._logger.error("Error with Connection! " + str(error))
if __name__ == '__main__':
client = RabbitClient()
client.connect()
__author__ = 'Warren Chang :: https://github.com/changzone/rabbitclient'
|
{
"content_hash": "9e1aeca6cd9bd1a1fdefb63c7efe2219",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 152,
"avg_line_length": 39.89622641509434,
"alnum_prop": 0.6488531567746512,
"repo_name": "changzone/rabbitclient",
"id": "b75619f6dee4c090d2fea24350f6c91191416721",
"size": "4229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rabbitclient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7438"
}
],
"symlink_target": ""
}
|
"""The ClimaCell integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from math import ceil
from typing import Any
from pyclimacell import ClimaCellV3, ClimaCellV4
from pyclimacell.const import CURRENT, DAILY, FORECASTS, HOURLY, NOWCAST
from pyclimacell.exceptions import (
CantConnectException,
InvalidAPIKeyException,
RateLimitedException,
UnknownException,
)
from homeassistant.components.tomorrowio import DOMAIN as TOMORROW_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_API_KEY,
CONF_API_VERSION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ATTRIBUTION,
CC_V3_ATTR_CLOUD_COVER,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_HUMIDITY,
CC_V3_ATTR_OZONE,
CC_V3_ATTR_PRECIPITATION,
CC_V3_ATTR_PRECIPITATION_DAILY,
CC_V3_ATTR_PRECIPITATION_PROBABILITY,
CC_V3_ATTR_PRECIPITATION_TYPE,
CC_V3_ATTR_PRESSURE,
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_VISIBILITY,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_WIND_GUST,
CC_V3_ATTR_WIND_SPEED,
CC_V3_SENSOR_TYPES,
CONF_TIMESTEP,
DEFAULT_TIMESTEP,
DOMAIN,
MAX_REQUESTS_PER_DAY,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [Platform.SENSOR, Platform.WEATHER]
def _set_update_interval(hass: HomeAssistant, current_entry: ConfigEntry) -> timedelta:
"""Recalculate update_interval based on existing ClimaCell instances and update them."""
api_calls = 4 if current_entry.data[CONF_API_VERSION] == 3 else 2
# We check how many ClimaCell configured instances are using the same API key and
# calculate interval to not exceed allowed numbers of requests. Divide 90% of
# MAX_REQUESTS_PER_DAY by 4 because every update requires four API calls and we want
# a buffer in the number of API calls left at the end of the day.
other_instance_entry_ids = [
entry.entry_id
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.entry_id != current_entry.entry_id
and entry.data[CONF_API_KEY] == current_entry.data[CONF_API_KEY]
]
interval = timedelta(
minutes=(
ceil(
(24 * 60 * (len(other_instance_entry_ids) + 1) * api_calls)
/ (MAX_REQUESTS_PER_DAY * 0.9)
)
)
)
for entry_id in other_instance_entry_ids:
if entry_id in hass.data[DOMAIN]:
hass.data[DOMAIN][entry_id].update_interval = interval
return interval
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up ClimaCell API from a config entry."""
hass.data.setdefault(DOMAIN, {})
params: dict[str, Any] = {}
# If config entry options not set up, set them up
if not entry.options:
params["options"] = {
CONF_TIMESTEP: DEFAULT_TIMESTEP,
}
else:
# Use valid timestep if it's invalid
timestep = entry.options[CONF_TIMESTEP]
if timestep not in (1, 5, 15, 30):
if timestep <= 2:
timestep = 1
elif timestep <= 7:
timestep = 5
elif timestep <= 20:
timestep = 15
else:
timestep = 30
new_options = entry.options.copy()
new_options[CONF_TIMESTEP] = timestep
params["options"] = new_options
# Add API version if not found
if CONF_API_VERSION not in entry.data:
new_data = entry.data.copy()
new_data[CONF_API_VERSION] = 3
params["data"] = new_data
if params:
hass.config_entries.async_update_entry(entry, **params)
hass.async_create_task(
hass.config_entries.flow.async_init(
TOMORROW_DOMAIN,
context={"source": SOURCE_IMPORT, "old_config_entry_id": entry.entry_id},
data=entry.data,
)
)
# Eventually we will remove the code that sets up the platforms and force users to
# migrate. This will only impact users still on the V3 API because we can't
# automatically migrate them, but for V4 users, we can skip the platform setup.
if entry.data[CONF_API_VERSION] == 4:
return True
api = ClimaCellV3(
entry.data[CONF_API_KEY],
entry.data.get(CONF_LATITUDE, hass.config.latitude),
entry.data.get(CONF_LONGITUDE, hass.config.longitude),
session=async_get_clientsession(hass),
)
coordinator = ClimaCellDataUpdateCoordinator(
hass,
entry,
api,
_set_update_interval(hass, entry),
)
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
hass.data[DOMAIN].pop(config_entry.entry_id, None)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
class ClimaCellDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold ClimaCell data."""
def __init__(
self,
hass: HomeAssistant,
config_entry: ConfigEntry,
api: ClimaCellV3 | ClimaCellV4,
update_interval: timedelta,
) -> None:
"""Initialize."""
self._config_entry = config_entry
self._api_version = config_entry.data[CONF_API_VERSION]
self._api = api
self.name = config_entry.data[CONF_NAME]
self.data = {CURRENT: {}, FORECASTS: {}}
super().__init__(
hass,
_LOGGER,
name=config_entry.data[CONF_NAME],
update_interval=update_interval,
)
async def _async_update_data(self) -> dict[str, Any]:
"""Update data via library."""
data: dict[str, Any] = {FORECASTS: {}}
try:
data[CURRENT] = await self._api.realtime(
[
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_HUMIDITY,
CC_V3_ATTR_PRESSURE,
CC_V3_ATTR_WIND_SPEED,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_VISIBILITY,
CC_V3_ATTR_OZONE,
CC_V3_ATTR_WIND_GUST,
CC_V3_ATTR_CLOUD_COVER,
CC_V3_ATTR_PRECIPITATION_TYPE,
*(sensor_type.key for sensor_type in CC_V3_SENSOR_TYPES),
]
)
data[FORECASTS][HOURLY] = await self._api.forecast_hourly(
[
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_WIND_SPEED,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_PRECIPITATION,
CC_V3_ATTR_PRECIPITATION_PROBABILITY,
],
None,
timedelta(hours=24),
)
data[FORECASTS][DAILY] = await self._api.forecast_daily(
[
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_WIND_SPEED,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_PRECIPITATION_DAILY,
CC_V3_ATTR_PRECIPITATION_PROBABILITY,
],
None,
timedelta(days=14),
)
data[FORECASTS][NOWCAST] = await self._api.forecast_nowcast(
[
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_WIND_SPEED,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_PRECIPITATION,
],
None,
timedelta(
minutes=min(300, self._config_entry.options[CONF_TIMESTEP] * 30)
),
self._config_entry.options[CONF_TIMESTEP],
)
except (
CantConnectException,
InvalidAPIKeyException,
RateLimitedException,
UnknownException,
) as error:
raise UpdateFailed from error
return data
class ClimaCellEntity(CoordinatorEntity[ClimaCellDataUpdateCoordinator]):
"""Base ClimaCell Entity."""
def __init__(
self,
config_entry: ConfigEntry,
coordinator: ClimaCellDataUpdateCoordinator,
api_version: int,
) -> None:
"""Initialize ClimaCell Entity."""
super().__init__(coordinator)
self.api_version = api_version
self._config_entry = config_entry
@staticmethod
def _get_cc_value(
weather_dict: dict[str, Any], key: str
) -> int | float | str | None:
"""
Return property from weather_dict.
Used for V3 API.
"""
items = weather_dict.get(key, {})
# Handle cases where value returned is a list.
# Optimistically find the best value to return.
if isinstance(items, list):
if len(items) == 1:
return items[0].get("value")
return next(
(item.get("value") for item in items if "max" in item),
next(
(item.get("value") for item in items if "min" in item),
items[0].get("value", None),
),
)
return items.get("value")
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def device_info(self) -> DeviceInfo:
"""Return device registry information."""
return DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, self._config_entry.data[CONF_API_KEY])},
manufacturer="ClimaCell",
name="ClimaCell",
sw_version=f"v{self.api_version}",
)
|
{
"content_hash": "bfcadb26ae9b79246672c7a64c57c22d",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 92,
"avg_line_length": 32.0516717325228,
"alnum_prop": 0.5811284969179706,
"repo_name": "toddeye/home-assistant",
"id": "cf80b83fc36a9d93524dbceb19a26e8b87cc11e2",
"size": "10545",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/climacell/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from django.db.models.expressions import OrderByList
class OrderableAggMixin:
def __init__(self, *expressions, ordering=(), **extra):
if isinstance(ordering, (list, tuple)):
self.order_by = OrderByList(*ordering)
else:
self.order_by = OrderByList(ordering)
super().__init__(*expressions, **extra)
def resolve_expression(self, *args, **kwargs):
self.order_by = self.order_by.resolve_expression(*args, **kwargs)
return super().resolve_expression(*args, **kwargs)
def get_source_expressions(self):
if self.order_by.source_expressions:
return super().get_source_expressions() + [self.order_by]
return super().get_source_expressions()
def set_source_expressions(self, exprs):
if isinstance(exprs[-1], OrderByList):
*exprs, self.order_by = exprs
return super().set_source_expressions(exprs)
def as_sql(self, compiler, connection):
order_by_sql, order_by_params = compiler.compile(self.order_by)
sql, sql_params = super().as_sql(compiler, connection, ordering=order_by_sql)
return sql, (*sql_params, *order_by_params)
|
{
"content_hash": "eb192d69ddc97acba5c741983fe577a2",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 40.724137931034484,
"alnum_prop": 0.6418289585097375,
"repo_name": "blighj/django",
"id": "340a9178793b850a427680a71815d7653089e361",
"size": "1181",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "django/contrib/postgres/aggregates/mixins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91756"
},
{
"name": "HTML",
"bytes": "238967"
},
{
"name": "JavaScript",
"bytes": "157514"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16145599"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
}
|
"""
This file will:
Compile your python files into an excecutable using:
python setup.py py2exe
This file will not:
Copy over any templates/css/js etc into the dist folder alongside the exe
"""
import sys
import py2exe
import os
"""
We need to do this so that py2exe includes the correct dlls in the dist folder.
"""
dllList = ('qtnetwork.pyd','qtxmlpatterns4.dll',)
origIsSystemDLL = py2exe.build_exe.isSystemDLL
def isSystemDLL(pathname):
if os.path.basename(pathname).lower() in dllList:
return 0
return origIsSystemDLL(pathname)
py2exe.build_exe.isSystemDLL = isSystemDLL
""" New for 3.0 build - bundle flag (experimental).
If set to 1:
bundle required files into the executable
(including the zipfile)
"""
bundle = 0
if "bundle" in sys.argv[2:]:
bundle = 1
sys.argv.remove("bundle")
try:
# if this doesn't work, try import modulefinder
import py2exe.mf as modulefinder
import win32com
for p in win32com.__path__[1:]:
modulefinder.AddPackagePath("win32com", p)
for extra in ["win32com.shell"]: #,"win32com.mapi"
__import__(extra)
m = sys.modules[extra]
for p in m.__path__[1:]:
modulefinder.AddPackagePath(extra, p)
except ImportError:
# no build path setup, no worries.
pass
from distutils.core import setup
# SETUP PY2EXE PARAMETERS HERE:
script_list = [
{'script':'%s.py' % 'main',
'name':'SAMPLE-APP',
'version':'0.0.0.1',
'language':'en-gb',
'company_name':'defmyfunc.com',
}
]
options = {
"py2exe": {
"packages":["django", "django_offline", "mysite", "polls", "auth_fix"],
"includes": ["sip"],
"excludes":[],
"dll_excludes": ["w9xpopen.exe", "MSVCP90.dll"],
}
}
zipfile = "python/main.zip"
if bundle:
zipfile = None
options["py2exe"]["bundle_files"] = 2
setup(windows=script_list, options=options, zipfile=zipfile)
|
{
"content_hash": "a67fe118b6960d0006e9e0c5bf7766f7",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 24.337349397590362,
"alnum_prop": 0.6163366336633663,
"repo_name": "joeyjojo/django_offline",
"id": "407d79a29d96a96afa4e3cdc1659e4414199f4be",
"size": "2020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "88031"
},
{
"name": "Python",
"bytes": "4475207"
}
],
"symlink_target": ""
}
|
from django.db import models
from .helper import bulk_update
class BulkUpdateManager(models.Manager):
def bulk_update(self, objs, update_fields=None,
exclude_fields=None, batch_size=None):
return bulk_update(
objs, update_fields=update_fields,
exclude_fields=exclude_fields, using=self.db,
batch_size=batch_size)
|
{
"content_hash": "51bf99fcb7a5feae4b1cca0f259bf713",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 58,
"avg_line_length": 34.81818181818182,
"alnum_prop": 0.6553524804177546,
"repo_name": "lead-ratings/django-bulk-update",
"id": "7fe11ecb8dbea1a41d87d28a40106c4814c8d308",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_bulk_update/manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48280"
}
],
"symlink_target": ""
}
|
"""Unittests for the subcmds module (mostly __init__.py than subcommands)."""
import optparse
import unittest
import subcmds
class AllCommands(unittest.TestCase):
"""Check registered all_commands."""
def test_required_basic(self):
"""Basic checking of registered commands."""
# NB: We don't test all subcommands as we want to avoid "change detection"
# tests, so we just look for the most common/important ones here that are
# unlikely to ever change.
for cmd in {'cherry-pick', 'help', 'init', 'start', 'sync', 'upload'}:
self.assertIn(cmd, subcmds.all_commands)
def test_naming(self):
"""Verify we don't add things that we shouldn't."""
for cmd in subcmds.all_commands:
# Reject filename suffixes like "help.py".
self.assertNotIn('.', cmd)
# Make sure all '_' were converted to '-'.
self.assertNotIn('_', cmd)
# Reject internal python paths like "__init__".
self.assertFalse(cmd.startswith('__'))
def test_help_desc_style(self):
"""Force some consistency in option descriptions.
Python's optparse & argparse has a few default options like --help. Their
option description text uses lowercase sentence fragments, so enforce our
options follow the same style so UI is consistent.
We enforce:
* Text starts with lowercase.
* Text doesn't end with period.
"""
for name, cls in subcmds.all_commands.items():
cmd = cls()
parser = cmd.OptionParser
for option in parser.option_list:
if option.help == optparse.SUPPRESS_HELP:
continue
c = option.help[0]
self.assertEqual(
c.lower(), c,
msg=f'subcmds/{name}.py: {option.get_opt_string()}: help text '
f'should start with lowercase: "{option.help}"')
self.assertNotEqual(
option.help[-1], '.',
msg=f'subcmds/{name}.py: {option.get_opt_string()}: help text '
f'should not end in a period: "{option.help}"')
|
{
"content_hash": "bc2831e2f7196adab8d0fc56bce1c3a1",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 34.152542372881356,
"alnum_prop": 0.6332506203473945,
"repo_name": "couchbasedeps/git-repo",
"id": "bc53051a3e6e2a89da5d621c74b6852513a360a6",
"size": "2616",
"binary": false,
"copies": "4",
"ref": "refs/heads/stable",
"path": "tests/test_subcmds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "769532"
},
{
"name": "Shell",
"bytes": "8670"
}
],
"symlink_target": ""
}
|
import pygame
from pygame.locals import *
import os, sys
import threading
import time
"""
NOTES - pygame events and values
JOYAXISMOTION
event.axis event.value
0 - x axis left thumb (+1 is right, -1 is left)
1 - y axis left thumb (+1 is down, -1 is up)
2 - x axis right thumb (+1 is right, -1 is left)
3 - y axis right thumb (+1 is down, -1 is up)
4 - right trigger
5 - left trigger
JOYBUTTONDOWN | JOYBUTTONUP
event.button
A = 0
B = 1
X = 2
Y = 3
LB = 4
RB = 5
BACK = 6
START = 7
XBOX = 8
LEFTTHUMB = 9
RIGHTTHUMB = 10
JOYHATMOTION
event.value
[0] - horizontal
[1] - vertival
[0].0 - middle
[0].-1 - left
[0].+1 - right
[1].0 - middle
[1].-1 - bottom
[1].+1 - top
"""
#Main class for reading the xbox controller values
class XboxController(threading.Thread):
#internal ids for the xbox controls
class XboxControls():
LTHUMBX = 0
LTHUMBY = 1
RTHUMBX = 2
RTHUMBY = 3
RTRIGGER = 4
LTRIGGER = 5
A = 6
B = 7
X = 8
Y = 9
LB = 10
RB = 11
BACK = 12
START = 13
XBOX = 14
LEFTTHUMB = 15
RIGHTTHUMB = 16
DPAD = 17
#pygame axis constants for the analogue controls of the xbox controller
class PyGameAxis():
LTHUMBX = 0
LTHUMBY = 1
RTHUMBX = 2
RTHUMBY = 3
RTRIGGER = 4
LTRIGGER = 5
#pygame constants for the buttons of the xbox controller
class PyGameButtons():
A = 0
B = 1
X = 2
Y = 3
LB = 4
RB = 5
BACK = 6
START = 7
XBOX = 8
LEFTTHUMB = 9
RIGHTTHUMB = 10
#map between pygame axis (analogue stick) ids and xbox control ids
AXISCONTROLMAP = {PyGameAxis.LTHUMBX: XboxControls.LTHUMBX,
PyGameAxis.LTHUMBY: XboxControls.LTHUMBY,
PyGameAxis.RTHUMBX: XboxControls.RTHUMBX,
PyGameAxis.RTHUMBY: XboxControls.RTHUMBY}
#map between pygame axis (trigger) ids and xbox control ids
TRIGGERCONTROLMAP = {PyGameAxis.RTRIGGER: XboxControls.RTRIGGER,
PyGameAxis.LTRIGGER: XboxControls.LTRIGGER}
#map between pygame buttons ids and xbox contorl ids
BUTTONCONTROLMAP = {PyGameButtons.A: XboxControls.A,
PyGameButtons.B: XboxControls.B,
PyGameButtons.X: XboxControls.X,
PyGameButtons.Y: XboxControls.Y,
PyGameButtons.LB: XboxControls.LB,
PyGameButtons.RB: XboxControls.RB,
PyGameButtons.BACK: XboxControls.BACK,
PyGameButtons.START: XboxControls.START,
PyGameButtons.XBOX: XboxControls.XBOX,
PyGameButtons.LEFTTHUMB: XboxControls.LEFTTHUMB,
PyGameButtons.RIGHTTHUMB: XboxControls.RIGHTTHUMB}
#setup xbox controller class
def __init__(self,
controllerCallBack = None,
joystickNo = 0,
deadzone = 0.1,
scale = 1,
invertYAxis = False):
#setup threading
threading.Thread.__init__(self)
#persist values
self.running = False
self.controllerCallBack = controllerCallBack
self.joystickNo = joystickNo
self.lowerDeadzone = deadzone * -1
self.upperDeadzone = deadzone
self.scale = scale
self.invertYAxis = invertYAxis
self.controlCallbacks = {}
#setup controller properties
self.controlValues = {self.XboxControls.LTHUMBX:0,
self.XboxControls.LTHUMBY:0,
self.XboxControls.RTHUMBX:0,
self.XboxControls.RTHUMBY:0,
self.XboxControls.RTRIGGER:0,
self.XboxControls.LTRIGGER:0,
self.XboxControls.A:0,
self.XboxControls.B:0,
self.XboxControls.X:0,
self.XboxControls.Y:0,
self.XboxControls.LB:0,
self.XboxControls.RB:0,
self.XboxControls.BACK:0,
self.XboxControls.START:0,
self.XboxControls.XBOX:0,
self.XboxControls.LEFTTHUMB:0,
self.XboxControls.RIGHTTHUMB:0,
self.XboxControls.DPAD:(0,0)}
#setup pygame
self._setupPygame(joystickNo)
#Create controller properties
@property
def LTHUMBX(self):
return self.controlValues[self.XboxControls.LTHUMBX]
@property
def LTHUMBY(self):
return self.controlValues[self.XboxControls.LTHUMBY]
@property
def RTHUMBX(self):
return self.controlValues[self.XboxControls.RTHUMBX]
@property
def RTHUMBY(self):
return self.controlValues[self.XboxControls.RTHUMBY]
@property
def RTRIGGER(self):
return self.controlValues[self.XboxControls.RTRIGGER]
@property
def LTRIGGER(self):
return self.controlValues[self.XboxControls.LTRIGGER]
@property
def A(self):
return self.controlValues[self.XboxControls.A]
@property
def B(self):
return self.controlValues[self.XboxControls.B]
@property
def X(self):
return self.controlValues[self.XboxControls.X]
@property
def Y(self):
return self.controlValues[self.XboxControls.Y]
@property
def LB(self):
return self.controlValues[self.XboxControls.LB]
@property
def RB(self):
return self.controlValues[self.XboxControls.RB]
@property
def BACK(self):
return self.controlValues[self.XboxControls.BACK]
@property
def START(self):
return self.controlValues[self.XboxControls.START]
@property
def XBOX(self):
return self.controlValues[self.XboxControls.XBOX]
@property
def LEFTTHUMB(self):
return self.controlValues[self.XboxControls.LEFTTHUMB]
@property
def RIGHTTHUMB(self):
return self.controlValues[self.XboxControls.RIGHTTHUMB]
@property
def DPAD(self):
return self.controlValues[self.XboxControls.DPAD]
#setup pygame
def _setupPygame(self, joystickNo):
# set SDL to use the dummy NULL video driver, so it doesn't need a windowing system.
os.environ["SDL_VIDEODRIVER"] = "dummy"
# init pygame
pygame.init()
# create a 1x1 pixel screen, its not used so it doesnt matter
screen = pygame.display.set_mode((1, 1))
# init the joystick control
pygame.joystick.init()
# how many joysticks are there
#print pygame.joystick.get_count()
# get the first joystick
joy = pygame.joystick.Joystick(joystickNo)
# init that joystick
joy.init()
#called by the thread
def run(self):
self._start()
#start the controller
def _start(self):
self.running = True
#run until the controller is stopped
while(self.running):
#react to the pygame events that come from the xbox controller
if True:
#for event in pygame.event.wait():
event = pygame.event.wait()
#thumb sticks, trigger buttons
if event.type == JOYAXISMOTION:
#is this axis on our xbox controller
if event.axis in self.AXISCONTROLMAP:
#is this a y axis
yAxis = True if (event.axis == self.PyGameAxis.LTHUMBY or event.axis == self.PyGameAxis.RTHUMBY) else False
#update the control value
self.updateControlValue(self.AXISCONTROLMAP[event.axis],
self._sortOutAxisValue(event.value, yAxis))
#is this axis a trigger
if event.axis in self.TRIGGERCONTROLMAP:
#update the control value
self.updateControlValue(self.TRIGGERCONTROLMAP[event.axis],
self._sortOutTriggerValue(event.value))
#d pad
elif event.type == JOYHATMOTION:
#update control value
self.updateControlValue(self.XboxControls.DPAD, event.value)
#button pressed and unpressed
elif event.type == JOYBUTTONUP or event.type == JOYBUTTONDOWN:
#is this button on our xbox controller
if event.button in self.BUTTONCONTROLMAP:
#update control value
self.updateControlValue(self.BUTTONCONTROLMAP[event.button],
self._sortOutButtonValue(event.type))
#stops the controller
def stop(self):
self.running = False
#updates a specific value in the control dictionary
def updateControlValue(self, control, value):
#if the value has changed update it and call the callbacks
if self.controlValues[control] != value:
self.controlValues[control] = value
self.doCallBacks(control, value)
#calls the call backs if necessary
def doCallBacks(self, control, value):
#call the general callback
if self.controllerCallBack != None: self.controllerCallBack(control, value)
#has a specific callback been setup?
if control in self.controlCallbacks:
self.controlCallbacks[control](value)
#used to add a specific callback to a control
def setupControlCallback(self, control, callbackFunction):
# add callback to the dictionary
self.controlCallbacks[control] = callbackFunction
#scales the axis values, applies the deadzone
def _sortOutAxisValue(self, value, yAxis = False):
#invert yAxis
if yAxis and self.invertYAxis: value = value * -1
#scale the value
value = value * self.scale
#apply the deadzone
if value < self.upperDeadzone and value > self.lowerDeadzone: value = 0
return value
#turns the trigger value into something sensible and scales it
def _sortOutTriggerValue(self, value):
#trigger goes -1 to 1 (-1 is off, 1 is full on, half is 0) - I want this to be 0 - 1
value = max(0,(value + 1) / 2)
#scale the value
value = value * self.scale
return value
#turns the event type (up/down) into a value
def _sortOutButtonValue(self, eventType):
#if the button is down its 1, if the button is up its 0
value = 1 if eventType == JOYBUTTONDOWN else 0
return value
#tests
if __name__ == '__main__':
#generic call back
def controlCallBack(xboxControlId, value):
print "Control Id = {}, Value = {}".format(xboxControlId, value)
#specific callbacks for the left thumb (X & Y)
def leftThumbX(xValue):
print "LX {}".format(xValue)
def leftThumbY(yValue):
print "LY {}".format(yValue)
#setup xbox controller, set out the deadzone and scale, also invert the Y Axis (for some reason in Pygame negative is up - wierd!
xboxCont = XboxController(controlCallBack, deadzone = 30, scale = 100, invertYAxis = True)
#setup the left thumb (X & Y) callbacks
xboxCont.setupControlCallback(xboxCont.XboxControls.LTHUMBX, leftThumbX)
xboxCont.setupControlCallback(xboxCont.XboxControls.LTHUMBY, leftThumbY)
try:
#start the controller
xboxCont.start()
print "xbox controller running"
while True:
time.sleep(1)
#Ctrl C
except KeyboardInterrupt:
print "User cancelled"
#error
except:
print "Unexpected error:", sys.exc_info()[0]
raise
finally:
#stop the controller
xboxCont.stop()
|
{
"content_hash": "4ec41f853288e911df45418e5fd3d7b7",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 134,
"avg_line_length": 33.272965879265094,
"alnum_prop": 0.5589650548236964,
"repo_name": "abramhindle/xbox-controller-supercollider",
"id": "175a5aa56e73b27cb35b0108dc237291a3a1d16a",
"size": "12837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "XboxController.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13647"
},
{
"name": "SuperCollider",
"bytes": "21279"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
from .. import DETAIL_TYPE
for subcategory in orm.CutSubcategory.objects.all():
subcategory.type = DETAIL_TYPE
subcategory.save()
def backwards(self, orm):
"Write your backwards methods here."
pass
models = {
u'core.category': {
'Meta': {'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Image']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
u'core.detail': {
'Meta': {'object_name': 'Detail'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'facts': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Subcategory']"}),
'title_image': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Image']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'core.image': {
'Meta': {'object_name': 'Image'},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'figcaption': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'core.subcategory': {
'Meta': {'object_name': 'Subcategory'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Category']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
u'cuts.cutdetail': {
'Meta': {'object_name': 'CutDetail', '_ormbases': [u'core.Detail']},
u'detail_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Detail']", 'unique': 'True', 'primary_key': 'True'})
},
u'cuts.cutsubcategory': {
'Meta': {'object_name': 'CutSubcategory', '_ormbases': [u'core.Subcategory']},
u'subcategory_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Subcategory']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['cuts']
symmetrical = True
|
{
"content_hash": "26895571359f6bd62f52515f792c191d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 194,
"avg_line_length": 66.62337662337663,
"alnum_prop": 0.560233918128655,
"repo_name": "michaupl/materialsapp",
"id": "19eb313ad6a133c3925692f1a1dd55f77c83973a",
"size": "5154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cuts/migrations/0003_set_type_on_subcategory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6799"
},
{
"name": "JavaScript",
"bytes": "9273"
},
{
"name": "Python",
"bytes": "213454"
}
],
"symlink_target": ""
}
|
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
import numpy as np
import matplotlib.pyplot as plt
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvas
def enter_axes(event):
print('enter_axes', event.inaxes)
event.inaxes.patch.set_facecolor('yellow')
event.canvas.draw()
def leave_axes(event):
print('leave_axes', event.inaxes)
event.inaxes.patch.set_facecolor('white')
event.canvas.draw()
def enter_figure(event):
print('enter_figure', event.canvas.figure)
event.canvas.figure.patch.set_facecolor('red')
event.canvas.draw()
def leave_figure(event):
print('leave_figure', event.canvas.figure)
event.canvas.figure.patch.set_facecolor('grey')
event.canvas.draw()
kv = """
<Test>:
orientation: 'vertical'
Button:
size_hint_y: None
height: 40
"""
Builder.load_string(kv)
class Test(BoxLayout):
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
self.add_plot()
def get_fc(self, i):
fig1 = plt.figure()
fig1.suptitle('mouse hover over figure or axes to trigger events' +
str(i))
ax1 = fig1.add_subplot(211)
ax2 = fig1.add_subplot(212)
wid = FigureCanvas(fig1)
fig1.canvas.mpl_connect('figure_enter_event', enter_figure)
fig1.canvas.mpl_connect('figure_leave_event', leave_figure)
fig1.canvas.mpl_connect('axes_enter_event', enter_axes)
fig1.canvas.mpl_connect('axes_leave_event', leave_axes)
return wid
def add_plot(self):
self.add_widget(self.get_fc(1))
self.add_widget(self.get_fc(2))
class TestApp(App):
def build(self):
return Test()
if __name__ == '__main__':
TestApp().run()
|
{
"content_hash": "38df0d6b45c474322f8fa6abfc5f3763",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 75,
"avg_line_length": 24.594594594594593,
"alnum_prop": 0.6395604395604395,
"repo_name": "andnovar/garden.matplotlib",
"id": "3a3c84de59c0a98e3a25b6f1ee25ec5538eac8c9",
"size": "1820",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/test_events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57020"
}
],
"symlink_target": ""
}
|
"""
Support the sensor of a BloomSky weather station.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/sensor.bloomsky/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (TEMP_FAHRENHEIT, CONF_MONITORED_CONDITIONS)
from homeassistant.helpers.entity import Entity
from homeassistant.loader import get_component
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['bloomsky']
# These are the available sensors
SENSOR_TYPES = ['Temperature',
'Humidity',
'Pressure',
'Luminance',
'UVIndex',
'Voltage']
# Sensor units - these do not currently align with the API documentation
SENSOR_UNITS = {'Temperature': TEMP_FAHRENHEIT,
'Humidity': '%',
'Pressure': 'inHg',
'Luminance': 'cd/m²',
'Voltage': 'mV'}
# Which sensors to format numerically
FORMAT_NUMBERS = ['Temperature', 'Pressure', 'Voltage']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_CONDITIONS, default=SENSOR_TYPES):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the available BloomSky weather sensors."""
bloomsky = get_component('bloomsky')
# Default needed in case of discovery
sensors = config.get(CONF_MONITORED_CONDITIONS, SENSOR_TYPES)
for device in bloomsky.BLOOMSKY.devices.values():
for variable in sensors:
add_devices([BloomSkySensor(bloomsky.BLOOMSKY, device, variable)])
class BloomSkySensor(Entity):
"""Representation of a single sensor in a BloomSky device."""
def __init__(self, bs, device, sensor_name):
"""Initialize a BloomSky sensor."""
self._bloomsky = bs
self._device_id = device['DeviceID']
self._sensor_name = sensor_name
self._name = '{} {}'.format(device['DeviceName'], sensor_name)
self._unique_id = 'bloomsky_sensor {}'.format(self._name)
self.update()
@property
def name(self):
"""Return the name of the BloomSky device and this sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique ID for this sensor."""
return self._unique_id
@property
def state(self):
"""Return the current state, eg. value, of this sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the sensor units."""
return SENSOR_UNITS.get(self._sensor_name, None)
def update(self):
"""Request an update from the BloomSky API."""
self._bloomsky.refresh_devices()
state = \
self._bloomsky.devices[self._device_id]['Data'][self._sensor_name]
if self._sensor_name in FORMAT_NUMBERS:
self._state = '{0:.2f}'.format(state)
else:
self._state = state
|
{
"content_hash": "1033418b3e8d7c356bee0ea02399f46d",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 78,
"avg_line_length": 31.818181818181817,
"alnum_prop": 0.6415873015873016,
"repo_name": "shaftoe/home-assistant",
"id": "62769dc049486c101815b4fc93eae0fc0c1b1a05",
"size": "3151",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/bloomsky.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1584258"
},
{
"name": "Python",
"bytes": "5479272"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15017"
}
],
"symlink_target": ""
}
|
import os
TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID', None)
TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN', None)
#NOTE: The mongodb will be deleted right before this repo goes publis so I don't care if you can see the password.
MONGODB_HOST = os.environ.get('MONGODB_HOST', 'mongodb://ted:ted@sawyer.mongohq.com/tedxhec')
MONGODB_PORT = os.environ.get('MONGODB_PORT', 10015)
#NOTE: This is also going down... sorry internet.
REDIS_HOST = os.environ.get('REDIS_HOST', 'ec2-107-22-63-170.compute-1.amazonaws.com')
REDIS_PORT = os.environ.get('REDIS_PORT', 6379)
|
{
"content_hash": "5b80766a00facbf0ed3a3afc61d3f6f7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 114,
"avg_line_length": 48.833333333333336,
"alnum_prop": 0.7389078498293515,
"repo_name": "sayar/tedxhec",
"id": "cdf403fec52547fd437abebe7ca3785435380cb9",
"size": "608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "5334"
},
{
"name": "Python",
"bytes": "13152"
},
{
"name": "Shell",
"bytes": "1256"
}
],
"symlink_target": ""
}
|
import os
import shutil
import sqlite3
def quotateNames(names):
return ", ".join([('"%s"' % name) for name in names])
class TagFolder:
def __init__(self, path):
self.path = path
self.tag_path = self.path + os.sep + ".tag"
self.db_path = self.tag_path + os.sep + "tag.db"
@staticmethod
def getTFByPath(path):
dirpath = os.path.abspath(path)
if os.path.isdir(dirpath + os.sep + ".tag"):
return TagFolder(dirpath)
if os.path.dirname(dirpath) == dirpath:
raise BaseException("There is no repository.")
return TagFolder.getTFByPath(os.path.dirname(dirpath))
def removeRepository(self):
shutil.rmtree(self.tag_path)
def getDb(self):
if not hasattr(self, "db"):
self.db = DbConnection(self.db_path)
return self.db
def init(self):
if not os.path.isdir(self.tag_path):
os.makedirs(self.tag_path)
os.system('attrib +h "' + self.tag_path + '"')
if os.path.exists(self.db_path):
raise BaseException("Already initialized")
self.getDb().execute('''CREATE TABLE tag (id integer primary key autoincrement not null, name char unique);''')
self.getDb().execute('''CREATE TABLE file (id integer primary key autoincrement not null, spath char unique);''')
self.getDb().execute('''CREATE TABLE files_tags(
file_id integer,
tag_id integer,
primary key (file_id, tag_id),
foreign key(file_id) references file(id) on delete cascade,
foreign key(tag_id) references tag(id) on delete cascade
);''')
def execute(self, query):
return self.getDb().execute(query)
class DbConnection:
def __init__(self, path):
self.path = path
def __del__(self):
if hasattr(self, "connection"):
self.connection.close()
def getCursor(self):
if not hasattr(self, "cursor"):
self.connection = sqlite3.connect(self.path)
self.cursor = self.connection.cursor()
return self.cursor
def execute(self, query):
cursor = self.getCursor()
result = cursor.execute(query)
self.connection.commit()
return result
class TagFile:
def __init__(self, folder):
self.folder = folder
def addMultipleTags(self, filenames, tagnames):
self.folder.execute('''INSERT or IGNORE INTO tag (name) %s;''' %
" UNION ".join([('SELECT "%s"' % tag) for tag in tagnames])
)
self.folder.execute('''INSERT or IGNORE INTO file (spath) %s;''' %
" UNION ".join([('SELECT "%s"' % spath) for spath in filenames])
)
self.folder.execute('''INSERT OR IGNORE INTO files_tags (file_id, tag_id)
SELECT file.id, tag.id FROM file CROSS JOIN tag
WHERE file.spath IN (%s) AND tag.name IN (%s);''' % (quotateNames(filenames), quotateNames(tagnames)))
def addTag(self, filepath, tagname):
tag_id = Tag(self.folder).getId(tagname, True)
file_id = File(self.folder).getId(filepath, True)
self.folder.execute('''INSERT or IGNORE INTO files_tags (file_id, tag_id) VALUES (%d, %d);''' % (file_id, tag_id))
def removeMultipleTags(self, filenames, tagnames):
self.folder.execute('''DELETE FROM files_tags WHERE rowid IN (
SELECT files_tags.rowid FROM files_tags
INNER JOIN file ON file.id = file_id
INNER JOIN tag ON tag.id = tag_id
WHERE file.spath IN (%s) AND tag.name IN (%s)
);''' % (quotateNames(filenames), quotateNames(tagnames)))
def clearFiles(self, filenames):
file_object = File(self.folder)
file_object.clear(filenames)
def clearTags(self, tagnames):
tag_object = Tag(self.folder)
tag_object.clear(tagnames)
def removeTag(self, filepath, tagname):
tag_id = Tag(self.folder).getId(tagname)
file_id = File(self.folder).getId(filepath)
self.folder.execute('''DELETE FROM files_tags WHERE file_id = %d AND tag_id = %d''' % (file_id, tag_id))
def getTags(self, filenames):
tags_raw = self.folder.execute('''SELECT spath, name FROM files_tags
INNER JOIN tag ON tag.id = tag_id
INNER JOIN file ON file.id = file_id
WHERE spath IN (%s)''' % quotateNames(filenames))
tags_all = {}
for tag_row in tags_raw.fetchall():
filename = tag_row[0]
tagname = tag_row[1]
if not filename in tags_all:
tags_all[filename] = []
tags_all[filename].append(tagname)
return tags_all
def searchTags(self, tagnames):
files_raw = self.folder.execute('''SELECT spath, name FROM files_tags
INNER JOIN tag ON tag.id = tag_id
INNER JOIN file ON file.id = file_id
WHERE name IN (%s)''' % quotateNames(tagnames))
files_all = {}
for file_row in files_raw.fetchall():
filename = file_row[0]
tagname = file_row[1]
if not tagname in files_all:
files_all[tagname] = []
files_all[tagname].append(filename)
return files_all
class AbstractModel:
def __init__(self, folder, tablename, namename):
self.folder = folder
self.tablename = tablename
self.namename = namename
def getId(self, name, add_if_not_exists = False):
if add_if_not_exists:
self.folder.execute('''INSERT or IGNORE INTO %s (%s) VALUES ("%s");''' % (self.tablename, self.namename, name))
item_exists = self.folder.execute('''SELECT id FROM %s WHERE name = "%s";''' % (self.tablename, name))
if not item_exists:
return False
item_id = item_exists.fetchone()[0]
return item_id
def clear(self, names):
self.folder.execute('''DELETE FROM %s WHERE %s IN (%s)''' % (self.tablename, self.namename, quotateNames(names)))
class Tag(AbstractModel):
def __init__(self, folder):
return AbstractModel.__init__(self, folder,
tablename = "tag",
namename = "name"
)
class File:
def __init__(self, folder):
return AbstractModel.__init__(self, folder,
tablename = "file",
namename = "spath"
)
|
{
"content_hash": "824c578469c4e0ab62d50090fac657aa",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 116,
"avg_line_length": 29.430851063829788,
"alnum_prop": 0.6752213988794505,
"repo_name": "modifier/tag",
"id": "efbd863dbe68c95128a5582b3e5b70901ffdc5e5",
"size": "5533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tagutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14269"
}
],
"symlink_target": ""
}
|
import copy
from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
_version = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'href': {'type': 'string', 'format': 'uri'},
'rel': {'type': 'string'},
'type': {'type': 'string'},
},
'required': ['href', 'rel'],
'additionalProperties': False
}
},
'status': {'type': 'string'},
'updated': parameter_types.date_time,
'version': {'type': 'string'},
'min_version': {'type': 'string'},
'media-types': {
'type': 'array',
'properties': {
'base': {'type': 'string'},
'type': {'type': 'string'},
}
},
},
# NOTE: version and min_version have been added since Kilo,
# so they should not be required.
# NOTE(sdague): media-types only shows up in single version requests.
'required': ['id', 'links', 'status', 'updated'],
'additionalProperties': False
}
list_versions = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'versions': {
'type': 'array',
'items': _version
}
},
'required': ['versions'],
'additionalProperties': False
}
}
_detail_get_version = copy.deepcopy(_version)
_detail_get_version['properties'].pop('min_version')
_detail_get_version['properties'].pop('version')
_detail_get_version['properties'].pop('updated')
_detail_get_version['properties']['media-types'] = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'base': {'type': 'string'},
'type': {'type': 'string'}
}
}
}
_detail_get_version['required'] = ['id', 'links', 'status', 'media-types']
get_version = {
'status_code': [300],
'response_body': {
'type': 'object',
'properties': {
'choices': {
'type': 'array',
'items': _detail_get_version
}
},
'required': ['choices'],
'additionalProperties': False
}
}
get_one_version = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'version': _version
},
'additionalProperties': False
}
}
|
{
"content_hash": "f8a381cb1045a63a8555e89119727ec2",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 74,
"avg_line_length": 26.540816326530614,
"alnum_prop": 0.46251441753171857,
"repo_name": "Juniper/tempest",
"id": "7f56239286649578f8078b002639116346f1e9a2",
"size": "3232",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tempest/lib/api_schema/response/compute/v2_1/versions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4194970"
},
{
"name": "Shell",
"bytes": "19343"
}
],
"symlink_target": ""
}
|
""" BGP Exception """
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class BGPException(Exception):
"""Base BGP Exception.
"""
message = "An unknown exception occurred."
def __init__(self, **kwargs):
try:
super(BGPException, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs
except Exception:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
else:
# at least get the core message out if something happened
super(BGPException, self).__init__(self.message)
def __unicode__(self):
return unicode(self.msg)
class NotificationSent(Exception):
"""BGP Notification Exception.
"""
message = "Unknow Notification exception occurred"
error = 0
def __init__(self, sub_error, data=''):
try:
super(NotificationSent, self).__init__()
self.msg = self.message % {'sub_error': sub_error, 'data': data}
self.sub_error = sub_error
self.data = data
except Exception:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
else:
# at least get the core message out if something happened
super(NotificationSent, self).__init__(self.message)
def __unicode__(self):
return unicode(self.msg)
class MessageHeaderError(NotificationSent):
error = 1
message = "BGP Message Header Error, sub error:%(sub_error)s, data:%(data)s"
class OpenMessageError(NotificationSent):
error = 2
message = "BGP Open Message Error, sub error:%(sub_error)s, data:%(data)s"
class UpdateMessageError(NotificationSent):
error = 3
message = "BGP Update Message Error, sub error:%(sub_error)s, data:%(data)s"
class HoldTimerExpiredError(NotificationSent):
error = 4
message = "BGP Hold Timer Expired, sub error:%(sub_error)s, data:%(data)s"
class FSMError(NotificationSent):
error = 5
message = "BGP FSM Error, sub error:%(sub_error)s, data:%(data)s"
class ErrCease(NotificationSent):
error = 6
message = "BGP ERR CEASE Error, sub error:%(sub_error)s, data:%(data)s"
|
{
"content_hash": "031755b76e2db588747b6ea5d6196bcf",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 80,
"avg_line_length": 28.710526315789473,
"alnum_prop": 0.6099908340971586,
"repo_name": "jack8daniels2/yabgp",
"id": "c8073cae8c62a6437d93f29a533dce12d08702de",
"size": "2817",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "yabgp/common/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "256862"
}
],
"symlink_target": ""
}
|
import pytest
def get_file_contents(file_path):
file = open(file_path)
file_contents = file.read()
file.close()
return file_contents
def cleanup_stack_if_exists(heat_client, template_name):
stacks = heat_client.stacks.list()
for stack in stacks:
if stack.stack_name == template_name:
heat_client.delete_stack(stack.id)
@pytest.fixture
def HeatStack(heatclientmanager, request):
'''Fixture for creating/deleting a heat stack.'''
def manage_stack(
template_file,
stack_name,
parameters={},
teardown=True,
expect_fail=False
):
def test_teardown():
heatclientmanager.delete_stack(stack.id)
template = get_file_contents(template_file)
config = {}
config['stack_name'] = stack_name
config['template'] = template
config['parameters'] = parameters
# Call delete before create, in case previous teardown failed
cleanup_stack_if_exists(heatclientmanager, stack_name)
target_status = 'CREATE_COMPLETE'
if expect_fail:
target_status = 'CREATE_FAILED'
stack = heatclientmanager.create_stack(
config,
target_status=target_status
)
if teardown:
request.addfinalizer(test_teardown)
return heatclientmanager, stack
return manage_stack
|
{
"content_hash": "1fe06b5228f8bba961553ff841ab91c1",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 69,
"avg_line_length": 29.520833333333332,
"alnum_prop": 0.6146788990825688,
"repo_name": "pjbreaux/f5-openstack-test",
"id": "122881af0b00731423aa1f2e23b72757ebe4e8cd",
"size": "2006",
"binary": false,
"copies": "2",
"ref": "refs/heads/liberty",
"path": "f5_os_test/heat_client_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "28843"
}
],
"symlink_target": ""
}
|
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.spatialite',
'NAME': 'neo.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'neo_test',
}
}
AUTHENTICATION_BACKENDS = (
'foundry.backends.MultiBackend',
'django.contrib.auth.backends.ModelBackend',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
)
# A tuple of callables that are used to populate the context in RequestContext.
# These callables take a request object as their argument and return a
# dictionary of items to be merged into the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
'preferences.context_processors.preferences_cp',
'foundry.context_processors.foundry',
)
FOUNDRY = {
'layers': ('basic', )
}
# AppDirectoriesTypeLoader must be after filesystem loader
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'foundry.loaders.AppDirectoriesTypeLoader',
'django.template.loaders.app_directories.Loader',
)
INSTALLED_APPS = [
'atlas',
'south',
'django.contrib.auth',
'django.contrib.comments',
'django.contrib.contenttypes',
'django.contrib.sites',
'django.contrib.gis',
'django.contrib.sessions',
'category',
'preferences',
'jmbo',
'competition',
'photologue',
'secretballot',
'publisher',
'foundry',
'neo',
'compressor',
'social_auth',
]
NEO = {
'URL': 'https://neostaging.wsnet.diageo.com/MCAL/MultiChannelWebService.svc',
'APP_ID': '',
'VERSION_ID': '',
'PROMO_CODE': '',
'PASSWORD': '',
'BRAND_ID': 0,
'VERIFY_CERT': False,
'USE_MCAL': True,
}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'handlers': {
'null': {
'level': 'INFO',
'class': 'logging.NullHandler',
'formatter': 'verbose'
}
},
'loggers': {
'neo.api': {
'level': 'INFO',
'handlers': ['null'],
'propagate': False,
}
},
}
STATIC_URL = 'static/'
SITE_ID = 1
ROOT_URLCONF = 'neo.test_urls'
SOUTH_TESTS_MIGRATE = False
|
{
"content_hash": "da438662b5fa188eceedacd63f5341b4",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 95,
"avg_line_length": 24.806451612903224,
"alnum_prop": 0.618335500650195,
"repo_name": "praekelt/jmbo-neo",
"id": "d0d5efd3a3ae5e179e076b9b20890f7a2fbb1b19",
"size": "3076",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "447536"
}
],
"symlink_target": ""
}
|
"""The tests for UVC camera module."""
import socket
import unittest
from unittest import mock
import pytest
import requests
from uvcclient import camera, nvr
from homeassistant.components.camera import SUPPORT_STREAM
from homeassistant.components.uvc import camera as uvc
from homeassistant.exceptions import PlatformNotReady
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
class TestUVCSetup(unittest.TestCase):
"""Test the UVC camera platform."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
@mock.patch("uvcclient.nvr.UVCRemote")
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_full_config(self, mock_uvc, mock_remote):
"""Test the setup with full configuration."""
config = {
"platform": "uvc",
"nvr": "foo",
"password": "bar",
"port": 123,
"key": "secret",
}
mock_cameras = [
{"uuid": "one", "name": "Front", "id": "id1"},
{"uuid": "two", "name": "Back", "id": "id2"},
{"uuid": "three", "name": "Old AirCam", "id": "id3"},
]
def mock_get_camera(uuid):
"""Create a mock camera."""
if uuid == "id3":
return {"model": "airCam"}
return {"model": "UVC"}
mock_remote.return_value.index.return_value = mock_cameras
mock_remote.return_value.get_camera.side_effect = mock_get_camera
mock_remote.return_value.server_version = (3, 2, 0)
assert setup_component(self.hass, "camera", {"camera": config})
assert mock_remote.call_count == 1
assert mock_remote.call_args == mock.call("foo", 123, "secret", ssl=False)
mock_uvc.assert_has_calls(
[
mock.call(mock_remote.return_value, "id1", "Front", "bar"),
mock.call(mock_remote.return_value, "id2", "Back", "bar"),
]
)
@mock.patch("uvcclient.nvr.UVCRemote")
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_partial_config(self, mock_uvc, mock_remote):
"""Test the setup with partial configuration."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_cameras = [
{"uuid": "one", "name": "Front", "id": "id1"},
{"uuid": "two", "name": "Back", "id": "id2"},
]
mock_remote.return_value.index.return_value = mock_cameras
mock_remote.return_value.get_camera.return_value = {"model": "UVC"}
mock_remote.return_value.server_version = (3, 2, 0)
assert setup_component(self.hass, "camera", {"camera": config})
assert mock_remote.call_count == 1
assert mock_remote.call_args == mock.call("foo", 7080, "secret", ssl=False)
mock_uvc.assert_has_calls(
[
mock.call(mock_remote.return_value, "id1", "Front", "ubnt"),
mock.call(mock_remote.return_value, "id2", "Back", "ubnt"),
]
)
@mock.patch("uvcclient.nvr.UVCRemote")
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_partial_config_v31x(self, mock_uvc, mock_remote):
"""Test the setup with a v3.1.x server."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_cameras = [
{"uuid": "one", "name": "Front", "id": "id1"},
{"uuid": "two", "name": "Back", "id": "id2"},
]
mock_remote.return_value.index.return_value = mock_cameras
mock_remote.return_value.get_camera.return_value = {"model": "UVC"}
mock_remote.return_value.server_version = (3, 1, 3)
assert setup_component(self.hass, "camera", {"camera": config})
assert mock_remote.call_count == 1
assert mock_remote.call_args == mock.call("foo", 7080, "secret", ssl=False)
mock_uvc.assert_has_calls(
[
mock.call(mock_remote.return_value, "one", "Front", "ubnt"),
mock.call(mock_remote.return_value, "two", "Back", "ubnt"),
]
)
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_incomplete_config(self, mock_uvc):
"""Test the setup with incomplete configuration."""
assert setup_component(self.hass, "camera", {"platform": "uvc", "nvr": "foo"})
assert not mock_uvc.called
assert setup_component(
self.hass, "camera", {"platform": "uvc", "key": "secret"}
)
assert not mock_uvc.called
assert setup_component(
self.hass, "camera", {"platform": "uvc", "port": "invalid"}
)
assert not mock_uvc.called
@mock.patch.object(uvc, "UnifiVideoCamera")
@mock.patch("uvcclient.nvr.UVCRemote")
def setup_nvr_errors_during_indexing(self, error, mock_remote, mock_uvc):
"""Set up test for NVR errors during indexing."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_remote.return_value.index.side_effect = error
assert setup_component(self.hass, "camera", {"camera": config})
assert not mock_uvc.called
def test_setup_nvr_error_during_indexing_notauthorized(self):
"""Test for error: nvr.NotAuthorized."""
self.setup_nvr_errors_during_indexing(nvr.NotAuthorized)
def test_setup_nvr_error_during_indexing_nvrerror(self):
"""Test for error: nvr.NvrError."""
self.setup_nvr_errors_during_indexing(nvr.NvrError)
pytest.raises(PlatformNotReady)
def test_setup_nvr_error_during_indexing_connectionerror(self):
"""Test for error: requests.exceptions.ConnectionError."""
self.setup_nvr_errors_during_indexing(requests.exceptions.ConnectionError)
pytest.raises(PlatformNotReady)
@mock.patch.object(uvc, "UnifiVideoCamera")
@mock.patch("uvcclient.nvr.UVCRemote.__init__")
def setup_nvr_errors_during_initialization(self, error, mock_remote, mock_uvc):
"""Set up test for NVR errors during initialization."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_remote.return_value = None
mock_remote.side_effect = error
assert setup_component(self.hass, "camera", {"camera": config})
assert not mock_remote.index.called
assert not mock_uvc.called
def test_setup_nvr_error_during_initialization_notauthorized(self):
"""Test for error: nvr.NotAuthorized."""
self.setup_nvr_errors_during_initialization(nvr.NotAuthorized)
def test_setup_nvr_error_during_initialization_nvrerror(self):
"""Test for error: nvr.NvrError."""
self.setup_nvr_errors_during_initialization(nvr.NvrError)
pytest.raises(PlatformNotReady)
def test_setup_nvr_error_during_initialization_connectionerror(self):
"""Test for error: requests.exceptions.ConnectionError."""
self.setup_nvr_errors_during_initialization(requests.exceptions.ConnectionError)
pytest.raises(PlatformNotReady)
class TestUVC(unittest.TestCase):
"""Test class for UVC."""
def setup_method(self, method):
"""Set up the mock camera."""
self.nvr = mock.MagicMock()
self.uuid = "uuid"
self.name = "name"
self.password = "seekret"
self.uvc = uvc.UnifiVideoCamera(self.nvr, self.uuid, self.name, self.password)
self.nvr.get_camera.return_value = {
"model": "UVC Fake",
"recordingSettings": {"fullTimeRecordEnabled": True},
"host": "host-a",
"internalHost": "host-b",
"username": "admin",
"channels": [
{
"id": "0",
"width": 1920,
"height": 1080,
"fps": 25,
"bitrate": 6000000,
"isRtspEnabled": True,
"rtspUris": ["rtsp://host-a:7447/uuid_rtspchannel_0"],
},
{
"id": "1",
"width": 1024,
"height": 576,
"fps": 15,
"bitrate": 1200000,
"isRtspEnabled": False,
"rtspUris": ["rtsp://host-a:7447/uuid_rtspchannel_1"],
},
],
}
self.nvr.server_version = (3, 2, 0)
def test_properties(self):
"""Test the properties."""
assert self.name == self.uvc.name
assert self.uvc.is_recording
assert "Ubiquiti" == self.uvc.brand
assert "UVC Fake" == self.uvc.model
assert SUPPORT_STREAM == self.uvc.supported_features
def test_stream(self):
"""Test the RTSP stream URI."""
stream_source = yield from self.uvc.stream_source()
assert stream_source == "rtsp://host-a:7447/uuid_rtspchannel_0"
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClientV320")
def test_login(self, mock_camera, mock_store):
"""Test the login."""
self.uvc._login()
assert mock_camera.call_count == 1
assert mock_camera.call_args == mock.call("host-a", "admin", "seekret")
assert mock_camera.return_value.login.call_count == 1
assert mock_camera.return_value.login.call_args == mock.call()
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClient")
def test_login_v31x(self, mock_camera, mock_store):
"""Test login with v3.1.x server."""
self.nvr.server_version = (3, 1, 3)
self.uvc._login()
assert mock_camera.call_count == 1
assert mock_camera.call_args == mock.call("host-a", "admin", "seekret")
assert mock_camera.return_value.login.call_count == 1
assert mock_camera.return_value.login.call_args == mock.call()
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClientV320")
def test_login_tries_both_addrs_and_caches(self, mock_camera, mock_store):
"""Test the login tries."""
responses = [0]
def mock_login(*a):
"""Mock login."""
try:
responses.pop(0)
raise socket.error
except IndexError:
pass
mock_store.return_value.get_camera_password.return_value = None
mock_camera.return_value.login.side_effect = mock_login
self.uvc._login()
assert 2 == mock_camera.call_count
assert "host-b" == self.uvc._connect_addr
mock_camera.reset_mock()
self.uvc._login()
assert mock_camera.call_count == 1
assert mock_camera.call_args == mock.call("host-b", "admin", "seekret")
assert mock_camera.return_value.login.call_count == 1
assert mock_camera.return_value.login.call_args == mock.call()
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClientV320")
def test_login_fails_both_properly(self, mock_camera, mock_store):
"""Test if login fails properly."""
mock_camera.return_value.login.side_effect = socket.error
assert self.uvc._login() is None
assert self.uvc._connect_addr is None
def test_camera_image_tries_login_bails_on_failure(self):
"""Test retrieving failure."""
with mock.patch.object(self.uvc, "_login") as mock_login:
mock_login.return_value = False
assert self.uvc.camera_image() is None
assert mock_login.call_count == 1
assert mock_login.call_args == mock.call()
def test_camera_image_logged_in(self):
"""Test the login state."""
self.uvc._camera = mock.MagicMock()
assert self.uvc._camera.get_snapshot.return_value == self.uvc.camera_image()
def test_camera_image_error(self):
"""Test the camera image error."""
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = camera.CameraConnectError
assert self.uvc.camera_image() is None
def test_camera_image_reauths(self):
"""Test the re-authentication."""
responses = [0]
def mock_snapshot():
"""Mock snapshot."""
try:
responses.pop()
raise camera.CameraAuthError()
except IndexError:
pass
return "image"
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = mock_snapshot
with mock.patch.object(self.uvc, "_login") as mock_login:
assert "image" == self.uvc.camera_image()
assert mock_login.call_count == 1
assert mock_login.call_args == mock.call()
assert [] == responses
def test_camera_image_reauths_only_once(self):
"""Test if the re-authentication only happens once."""
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = camera.CameraAuthError
with mock.patch.object(self.uvc, "_login") as mock_login:
with pytest.raises(camera.CameraAuthError):
self.uvc.camera_image()
assert mock_login.call_count == 1
assert mock_login.call_args == mock.call()
|
{
"content_hash": "5eff3669aeb13724a1486e3118cb84e9",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 88,
"avg_line_length": 40.2814371257485,
"alnum_prop": 0.5886725137505574,
"repo_name": "postlund/home-assistant",
"id": "b95d940bda47772bf97254de0a2c2fc61d55a3be",
"size": "13454",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/uvc/test_camera.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
}
|
"""Test the collection viewer interface."""
from grr.gui import runtests_test
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
class TestContainerViewer(test_lib.GRRSeleniumTest):
"""Test the collection viewer interface."""
def CreateCollectionFixture(self):
with aff4.FACTORY.Create("aff4:/C.0000000000000001/analysis/FindFlowTest",
"AFF4Collection", token=self.token) as out_fd:
out_fd.CreateView(
["stat.st_mtime", "type", "stat.st_size", "size", "Age"])
for urn in [
"aff4:/C.0000000000000001/fs/os/c/bin C.0000000000000001/rbash",
"aff4:/C.0000000000000001/fs/os/c/bin C.0000000000000001/bash",
"aff4:/C.0000000000000001/fs/os/c/bin/bash",
"aff4:/C.0000000000000001/fs/os/c/bin/rbash",
]:
fd = aff4.FACTORY.Open(urn, token=self.token)
out_fd.Add(urn=urn, stat=fd.Get(fd.Schema.STAT))
def setUp(self):
super(TestContainerViewer, self).setUp()
# Create a new collection
with self.ACLChecksDisabled():
self.CreateCollectionFixture()
self.GrantClientApproval("C.0000000000000001")
def testContainerViewer(self):
self.Open("/")
self.Type("client_query", "0001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001",
self.GetText, "css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('0001')")
# Go to Browse VFS
self.Click("css=a:contains('Browse Virtual Filesystem')")
# Navigate to the analysis directory
self.Click("link=analysis")
self.Click("css=span[type=subject]:contains(\"FindFlowTest\")")
self.WaitUntil(self.IsElementPresent, "css=td:contains(\"VIEW\")")
self.assert_("View details" in self.GetText(
"css=a[href=\"#"
"c=C.0000000000000001&"
"container=aff4%3A%2FC.0000000000000001%2Fanalysis%2FFindFlowTest&"
"main=ContainerViewer&"
"reason=Running+tests\"]"))
self.Click("css=a:contains(\"View details\")")
self.WaitUntil(self.IsElementPresent, "css=button[id=export]")
self.ClickUntil("css=#_C_2E0000000000000001 ins.jstree-icon",
self.IsElementPresent,
"css=#_C_2E0000000000000001-fs ins.jstree-icon")
self.ClickUntil("css=#_C_2E0000000000000001-fs ins.jstree-icon",
self.IsElementPresent,
"css=#_C_2E0000000000000001-fs-os ins.jstree-icon")
self.ClickUntil("css=#_C_2E0000000000000001-fs-os ins.jstree-icon",
self.IsElementPresent,
"link=c")
# Navigate to the bin C.0000000000000001 directory
self.Click("link=c")
# Check the filter string
self.assertEqual("subject startswith 'aff4:/C.0000000000000001/fs/os/c/'",
self.GetValue("query"))
# We should have exactly 4 files
self.WaitUntilEqual(4, self.GetCssCount,
"css=.containerFileTable tbody > tr")
# Check the rows
self.assertEqual(
"C.0000000000000001/fs/os/c/bin C.0000000000000001/bash",
self.GetText("css=.containerFileTable tbody > tr:nth(0) td:nth(1)"))
self.assertEqual(
"C.0000000000000001/fs/os/c/bin C.0000000000000001/rbash",
self.GetText("css=.containerFileTable tbody > tr:nth(1) td:nth(1)"))
self.assertEqual(
"C.0000000000000001/fs/os/c/bin/bash",
self.GetText("css=.containerFileTable tbody > tr:nth(2) td:nth(1)"))
self.assertEqual(
"C.0000000000000001/fs/os/c/bin/rbash",
self.GetText("css=.containerFileTable tbody > tr:nth(3) td:nth(1)"))
# Check that query filtering works (Pressing enter)
self.Type("query", "stat.st_size < 5000")
self.Click("css=form[name=query_form] button[type=submit]")
self.WaitUntilEqual("4874", self.GetText,
"css=.containerFileTable tbody > tr:nth(0) td:nth(4)")
# We should have exactly 1 file
self.assertEqual(
1, self.GetCssCount("css=.containerFileTable tbody > tr"))
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
{
"content_hash": "e67086e0ef214e3b0f2d9e0622da378e",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 78,
"avg_line_length": 34.04,
"alnum_prop": 0.6399529964747356,
"repo_name": "ojengwa/grr",
"id": "bb0e2658891f0f78d1ee6f637778a4b4dee6e47b",
"size": "4318",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gui/plugins/container_viewer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "7781"
},
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "37269"
},
{
"name": "HTML",
"bytes": "30838"
},
{
"name": "JavaScript",
"bytes": "831938"
},
{
"name": "Makefile",
"bytes": "6524"
},
{
"name": "Protocol Buffer",
"bytes": "170942"
},
{
"name": "Python",
"bytes": "4652186"
},
{
"name": "Ruby",
"bytes": "1131"
},
{
"name": "Shell",
"bytes": "42248"
}
],
"symlink_target": ""
}
|
from collections import Sequence, Hashable
from itertools import islice, chain
from numbers import Integral
from pyrsistent._plist import plist
class PDeque(object):
"""
Persistent double ended queue (deque). Allows quick appends and pops in both ends. Implemented
using two persistent lists.
A maximum length can be specified to create a bounded queue.
Fully supports the Sequence and Hashable protocols including indexing and slicing but
if you need fast random access go for the PVector instead.
Do not instantiate directly, instead use the factory functions :py:func:`dq` or :py:func:`pdeque` to
create an instance.
Some examples:
>>> x = pdeque([1, 2, 3])
>>> x.left
1
>>> x.right
3
>>> x[0] == x.left
True
>>> x[-1] == x.right
True
>>> x.pop()
pdeque([1, 2])
>>> x.pop() == x[:-1]
True
>>> x.popleft()
pdeque([2, 3])
>>> x.append(4)
pdeque([1, 2, 3, 4])
>>> x.appendleft(4)
pdeque([4, 1, 2, 3])
>>> y = pdeque([1, 2, 3], maxlen=3)
>>> y.append(4)
pdeque([2, 3, 4], maxlen=3)
>>> y.appendleft(4)
pdeque([4, 1, 2], maxlen=3)
"""
__slots__ = ('_left_list', '_right_list', '_length', '_maxlen', '__weakref__')
def __new__(cls, left_list, right_list, length, maxlen=None):
instance = super(PDeque, cls).__new__(cls)
instance._left_list = left_list
instance._right_list = right_list
instance._length = length
if maxlen is not None:
if not isinstance(maxlen, Integral):
raise TypeError('An integer is required as maxlen')
if maxlen < 0:
raise ValueError("maxlen must be non-negative")
instance._maxlen = maxlen
return instance
@property
def right(self):
"""
Rightmost element in dqueue.
"""
return PDeque._tip_from_lists(self._right_list, self._left_list)
@property
def left(self):
"""
Leftmost element in dqueue.
"""
return PDeque._tip_from_lists(self._left_list, self._right_list)
@staticmethod
def _tip_from_lists(primary_list, secondary_list):
if primary_list:
return primary_list.first
if secondary_list:
return secondary_list[-1]
raise IndexError('No elements in empty deque')
def __iter__(self):
return chain(self._left_list, self._right_list.reverse())
def __repr__(self):
return "pdeque({0}{1})".format(list(self),
', maxlen={0}'.format(self._maxlen) if self._maxlen is not None else '')
__str__ = __repr__
@property
def maxlen(self):
"""
Maximum length of the queue.
"""
return self._maxlen
def pop(self, count=1):
"""
Return new deque with rightmost element removed. Popping the empty queue
will return the empty queue. A optional count can be given to indicate the
number of elements to pop. Popping with a negative index is the same as
popleft. Executes in amortized O(k) where k is the number of elements to pop.
>>> pdeque([1, 2]).pop()
pdeque([1])
>>> pdeque([1, 2]).pop(2)
pdeque([])
>>> pdeque([1, 2]).pop(-1)
pdeque([2])
"""
if count < 0:
return self.popleft(-count)
new_right_list, new_left_list = PDeque._pop_lists(self._right_list, self._left_list, count)
return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
def popleft(self, count=1):
"""
Return new deque with leftmost element removed. Otherwise functionally
equivalent to pop().
>>> pdeque([1, 2]).popleft()
pdeque([2])
"""
if count < 0:
return self.pop(-count)
new_left_list, new_right_list = PDeque._pop_lists(self._left_list, self._right_list, count)
return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
@staticmethod
def _pop_lists(primary_list, secondary_list, count):
new_primary_list = primary_list
new_secondary_list = secondary_list
while count > 0 and (new_primary_list or new_secondary_list):
count -= 1
if new_primary_list.rest:
new_primary_list = new_primary_list.rest
elif new_primary_list:
new_primary_list = new_secondary_list.reverse()
new_secondary_list = plist()
else:
new_primary_list = new_secondary_list.reverse().rest
new_secondary_list = plist()
return new_primary_list, new_secondary_list
def _is_empty(self):
return not self._left_list and not self._right_list
def __lt__(self, other):
if not isinstance(other, PDeque):
return NotImplemented
return tuple(self) < tuple(other)
def __eq__(self, other):
if not isinstance(other, PDeque):
return NotImplemented
if tuple(self) == tuple(other):
# Sanity check of the length value since it is redundant (there for performance)
assert len(self) == len(other)
return True
return False
def __hash__(self):
return hash(tuple(self))
def __len__(self):
return self._length
def append(self, elem):
"""
Return new deque with elem as the rightmost element.
>>> pdeque([1, 2]).append(3)
pdeque([1, 2, 3])
"""
new_left_list, new_right_list, new_length = self._append(self._left_list, self._right_list, elem)
return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
def appendleft(self, elem):
"""
Return new deque with elem as the leftmost element.
>>> pdeque([1, 2]).appendleft(3)
pdeque([3, 1, 2])
"""
new_right_list, new_left_list, new_length = self._append(self._right_list, self._left_list, elem)
return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
def _append(self, primary_list, secondary_list, elem):
if self._maxlen is not None and self._length == self._maxlen:
if self._maxlen == 0:
return primary_list, secondary_list, 0
new_primary_list, new_secondary_list = PDeque._pop_lists(primary_list, secondary_list, 1)
return new_primary_list, new_secondary_list.cons(elem), self._length
return primary_list, secondary_list.cons(elem), self._length + 1
@staticmethod
def _extend_list(the_list, iterable):
count = 0
for elem in iterable:
the_list = the_list.cons(elem)
count += 1
return the_list, count
def _extend(self, primary_list, secondary_list, iterable):
new_primary_list, extend_count = PDeque._extend_list(primary_list, iterable)
new_secondary_list = secondary_list
current_len = self._length + extend_count
if self._maxlen is not None and current_len > self._maxlen:
pop_len = current_len - self._maxlen
new_secondary_list, new_primary_list = PDeque._pop_lists(new_secondary_list, new_primary_list, pop_len)
extend_count -= pop_len
return new_primary_list, new_secondary_list, extend_count
def extend(self, iterable):
"""
Return new deque with all elements of iterable appended to the right.
>>> pdeque([1, 2]).extend([3, 4])
pdeque([1, 2, 3, 4])
"""
new_right_list, new_left_list, extend_count = self._extend(self._right_list, self._left_list, iterable)
return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
def extendleft(self, iterable):
"""
Return new deque with all elements of iterable appended to the left.
NB! The elements will be inserted in reverse order compared to the order in the iterable.
>>> pdeque([1, 2]).extendleft([3, 4])
pdeque([4, 3, 1, 2])
"""
new_left_list, new_right_list, extend_count = self._extend(self._left_list, self._right_list, iterable)
return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
def count(self, elem):
"""
Return the number of elements equal to elem present in the queue
>>> pdeque([1, 2, 1]).count(1)
2
"""
return self._left_list.count(elem) + self._right_list.count(elem)
def remove(self, elem):
"""
Return new deque with first element from left equal to elem removed. If no such element is found
a ValueError is raised.
>>> pdeque([2, 1, 2]).remove(2)
pdeque([1, 2])
"""
try:
return PDeque(self._left_list.remove(elem), self._right_list, self._length - 1)
except ValueError:
# Value not found in left list, try the right list
try:
# This is severely inefficient with a double reverse, should perhaps implement a remove_last()?
return PDeque(self._left_list,
self._right_list.reverse().remove(elem).reverse(), self._length - 1)
except ValueError:
raise ValueError('{0} not found in PDeque'.format(elem))
def reverse(self):
"""
Return reversed deque.
>>> pdeque([1, 2, 3]).reverse()
pdeque([3, 2, 1])
Also supports the standard python reverse function.
>>> reversed(pdeque([1, 2, 3]))
pdeque([3, 2, 1])
"""
return PDeque(self._right_list, self._left_list, self._length)
__reversed__ = reverse
def rotate(self, steps):
"""
Return deque with elements rotated steps steps.
>>> x = pdeque([1, 2, 3])
>>> x.rotate(1)
pdeque([3, 1, 2])
>>> x.rotate(-2)
pdeque([3, 1, 2])
"""
popped_deque = self.pop(steps)
if steps >= 0:
return popped_deque.extendleft(islice(self.reverse(), steps))
return popped_deque.extend(islice(self, -steps))
def __reduce__(self):
# Pickling support
return pdeque, (list(self), self._maxlen)
def __getitem__(self, index):
if isinstance(index, slice):
if index.step is not None and index.step != 1:
# Too difficult, no structural sharing possible
return pdeque(tuple(self)[index], maxlen=self._maxlen)
result = self
if index.start is not None:
result = result.popleft(index.start % self._length)
if index.stop is not None:
result = result.pop(self._length - (index.stop % self._length))
return result
if not isinstance(index, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
if index >= 0:
return self.popleft(index).left
return self.pop(index).right
index = Sequence.index
Sequence.register(PDeque)
Hashable.register(PDeque)
def pdeque(iterable=(), maxlen=None):
"""
Return deque containing the elements of iterable. If maxlen is specified then
len(iterable) - maxlen elements are discarded from the left to if len(iterable) > maxlen.
>>> pdeque([1, 2, 3])
pdeque([1, 2, 3])
>>> pdeque([1, 2, 3, 4], maxlen=2)
pdeque([3, 4], maxlen=2)
"""
t = tuple(iterable)
if maxlen is not None:
t = t[-maxlen:]
length = len(t)
pivot = int(length / 2)
left = plist(t[:pivot])
right = plist(t[pivot:], reverse=True)
return PDeque(left, right, length, maxlen)
def dq(*elements):
"""
Return deque containing all arguments.
>>> dq(1, 2, 3)
pdeque([1, 2, 3])
"""
return pdeque(elements)
|
{
"content_hash": "46cb5b3fb8f1e0b03d889d72da3fd0c2",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 115,
"avg_line_length": 32.34770889487871,
"alnum_prop": 0.5759520039996667,
"repo_name": "Futrell/pyrsistent",
"id": "ef89cc5441ae619ec2d959f7136c11ad1eaff8bf",
"size": "12001",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyrsistent/_pdeque.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "48009"
},
{
"name": "Python",
"bytes": "230446"
},
{
"name": "Shell",
"bytes": "247"
}
],
"symlink_target": ""
}
|
"""Variables. See the @{$python/state_ops} guide."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_state_ops import *
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
# pylint: disable=protected-access,g-doc-return-or-yield,g-doc-args
def variable_op(shape, dtype, name="Variable", set_shape=True, container="",
shared_name=""):
"""Deprecated. Used variable_op_v2 instead."""
if not set_shape:
shape = tensor_shape.unknown_shape()
ret = gen_state_ops.variable(shape=shape, dtype=dtype, name=name,
container=container, shared_name=shared_name)
# TODO(mrry): Move this to where it is used, so we can get rid of this op
# wrapper?
if set_shape:
ret.set_shape(shape)
return ret
def variable_op_v2(shape, dtype, name="Variable", container="", shared_name=""):
"""Create a variable Operation.
See also variables.Variable.
Args:
shape: The shape of the tensor managed by this variable
dtype: The underlying type of the tensor values.
name: optional name to use for the variable op.
container: An optional string. Defaults to "".
If non-empty, this variable is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional string. Defaults to "".
If non-empty, this variable is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
Returns:
A variable tensor.
"""
return gen_state_ops.variable_v2(
shape=shape,
dtype=dtype,
name=name,
container=container,
shared_name=shared_name)
def init_variable(v, init, name="init"):
"""Initializes variable with "init".
This op does the following:
if init is a Tensor, v = init
if callable(init): v = init(VariableShape(v), v.dtype)
Args:
v: Variable to initialize
init: Tensor to assign to v,
Or an object convertible to Tensor e.g. nparray,
Or an Initializer that generates a tensor given the shape and type of v.
An "Initializer" is a callable that returns a tensor that "v" should be
set to. It will be called as init(shape, dtype).
name: Optional name for the op.
Returns:
The operation that initializes v.
"""
with ops.name_scope(None, v.op.name + "/", [v, init]):
with ops.name_scope(name) as scope:
with ops.colocate_with(v):
if callable(init):
assert v.get_shape().is_fully_defined(), "Variable shape unknown."
# TODO(mrry): Convert to v.shape when the property and
# accessor are reconciled (and all initializers support
# tf.TensorShape objects).
value = init(v.get_shape().as_list(), v.dtype.base_dtype)
value = ops.convert_to_tensor(value, name="value")
return gen_state_ops.assign(v, value, name=scope)
else:
init = ops.convert_to_tensor(init, name="init")
return gen_state_ops.assign(v, init, name=scope)
def is_variable_initialized(ref, name=None):
"""Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.is_variable_initialized(ref=ref, name=name)
# Handle resource variables.
return ref.is_initialized(name=name)
@tf_export("assign_sub")
def assign_sub(ref, value, use_locking=None, name=None):
"""Update 'ref' by subtracting 'value' from it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types:
`float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
`int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be subtracted to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_sub(
ref, value, use_locking=use_locking, name=name)
return ref.assign_sub(value)
@tf_export("assign_add")
def assign_add(ref, value, use_locking=None, name=None):
"""Update 'ref' by adding 'value' to it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types:
`float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
`int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be added to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the addition will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_add(
ref, value, use_locking=use_locking, name=name)
return ref.assign_add(value)
@tf_export("assign")
def assign(ref, value, validate_shape=None, use_locking=None, name=None):
"""Update 'ref' by assigning 'value' to it.
This operation outputs a Tensor that holds the new value of 'ref' after
the value has been assigned. This makes it easier to chain operations
that need to use the reset value.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
value: A `Tensor`. Must have the same type as `ref`.
The value to be assigned to the variable.
validate_shape: An optional `bool`. Defaults to `True`.
If true, the operation will validate that the shape
of 'value' matches the shape of the Tensor being assigned to. If false,
'ref' will take on the shape of 'value'.
use_locking: An optional `bool`. Defaults to `True`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A `Tensor` that will hold the new value of 'ref' after
the assignment has completed.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign(
ref, value, use_locking=use_locking, name=name,
validate_shape=validate_shape)
return ref.assign(value, name=name)
@tf_export("count_up_to")
def count_up_to(ref, limit, name=None):
r"""Increments 'ref' until it reaches 'limit'.
Args:
ref: A Variable. Must be one of the following types: `int32`, `int64`.
Should be from a scalar `Variable` node.
limit: An `int`.
If incrementing ref would bring it above limit, instead generates an
'OutOfRange' error.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `ref`.
A copy of the input before increment. If nothing else modifies the
input, the values produced will all be distinct.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.count_up_to(ref, limit=limit, name=name)
return gen_state_ops.resource_count_up_to(
ref.handle, limit, T=ref.dtype, name=name)
@tf_export("scatter_update")
def scatter_update(ref, indices, updates, use_locking=True, name=None):
# pylint: disable=line-too-long
r"""Applies sparse updates to a variable reference.
This operation computes
```python
# Scalar indices
ref[indices, ...] = updates[...]
# Vector indices (for each i)
ref[indices[i], ...] = updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
If values in `ref` is to be updated more than once, because there are
duplicate entries in `indices`, the order at which the updates happen
for each value is undefined.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt>
</div>
Args:
ref: A `Variable`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to store in `ref`.
use_locking: An optional `bool`. Defaults to `True`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_update(ref, indices, updates,
use_locking=use_locking, name=name)
return ref._lazy_read(gen_resource_variable_ops.resource_scatter_update( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export("scatter_nd_update")
def scatter_nd_update(ref, indices, updates, use_locking=True, name=None):
r"""Applies sparse `updates` to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to update 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
update = tf.scatter_nd_update(ref, indices, updates)
with tf.Session() as sess:
print sess.run(update)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See @{tf.scatter_nd} for more details about how to make updates to
slices.
Args:
ref: A Variable.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A Tensor. Must have the same type as ref. A tensor of updated
values to add to ref.
use_locking: An optional `bool`. Defaults to `True`.
An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The value of the variable after the update.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_nd_update(
ref, indices, updates, use_locking, name)
return ref._lazy_read(gen_state_ops.resource_scatter_nd_update( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export("scatter_add")
def scatter_add(ref, indices, updates, use_locking=False, name=None):
# pylint: disable=line-too-long
r"""Adds sparse updates to the variable referenced by `resource`.
This operation computes
```python
# Scalar indices
ref[indices, ...] += updates[...]
# Vector indices (for each i)
ref[indices[i], ...] += updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the updated value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
</div>
Args:
ref: A `Variable`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to store in `ref`.
use_locking: An optional `bool`. Defaults to `False`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_add(ref, indices, updates,
use_locking=use_locking, name=name)
return ref._lazy_read(gen_resource_variable_ops.resource_scatter_add( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export("scatter_nd_add")
def scatter_nd_add(ref, indices, updates, use_locking=False, name=None):
r"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = tf.scatter_nd_add(ref, indices, updates)
with tf.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See @{tf.scatter_nd} for more details about how to make updates to
slices.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. A mutable Tensor. Should be from a Variable node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to add to ref.
use_locking: An optional `bool`. Defaults to `False`.
An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_nd_add(
ref, indices, updates, use_locking, name)
return ref._lazy_read(gen_state_ops.resource_scatter_nd_add( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
|
{
"content_hash": "4e47a7f18090b7b3d5f5bce12802ed81",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 110,
"avg_line_length": 37.63019693654267,
"alnum_prop": 0.6645345118334594,
"repo_name": "aselle/tensorflow",
"id": "2c93cf72c75ba27145e06abe69bcbef9418b39e0",
"size": "17887",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/state_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "321697"
},
{
"name": "C#",
"bytes": "7259"
},
{
"name": "C++",
"bytes": "46003590"
},
{
"name": "CMake",
"bytes": "207738"
},
{
"name": "Dockerfile",
"bytes": "6905"
},
{
"name": "Go",
"bytes": "1210133"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "829230"
},
{
"name": "Jupyter Notebook",
"bytes": "2578736"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52243"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99265"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "39898642"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "447009"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
}
|
import re
import pydoop.mapreduce.api as api
import pydoop.mapreduce.pipes as pp
class Mapper(api.Mapper):
def map(self, context):
words = re.sub('[^0-9a-zA-Z]+', ' ', context.getInputValue()).split()
for w in words:
context.emit(w, 1)
class Reducer(api.Reducer):
def reduce(self, context):
s = sum(context.values)
context.emit(context.key, s)
factory = pp.Factory(mapper_class=Mapper, reducer_class=Reducer)
def __main__():
pp.run_task(factory)
|
{
"content_hash": "738c164087f5d911befb8102ba738e6d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 20.48,
"alnum_prop": 0.634765625,
"repo_name": "ilveroluca/pydoop",
"id": "758b5fc7803f2f87272973293c8fb56465f947ba",
"size": "1123",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "examples/input_format/wordcount_rr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6685"
},
{
"name": "C",
"bytes": "505872"
},
{
"name": "C++",
"bytes": "105384"
},
{
"name": "Java",
"bytes": "355906"
},
{
"name": "Makefile",
"bytes": "2351"
},
{
"name": "Python",
"bytes": "534144"
},
{
"name": "Shell",
"bytes": "45693"
},
{
"name": "XSLT",
"bytes": "1335"
}
],
"symlink_target": ""
}
|
import collections
import uuid
from eventlet import event
from eventlet import greenthread
from neutron.common import exceptions
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.vmware.vshield.tasks import constants
DEFAULT_INTERVAL = 1000
LOG = logging.getLogger(__name__)
def nop(task):
return constants.TaskStatus.COMPLETED
class TaskException(exceptions.NeutronException):
def __init__(self, message=None, **kwargs):
if message is not None:
self.message = message
super(TaskException, self).__init__(**kwargs)
class InvalidState(TaskException):
message = _("Invalid state %(state)d")
class TaskStateSkipped(TaskException):
message = _("State %(state)d skipped. Current state %(current)d")
class Task():
def __init__(self, name, resource_id, execute_callback,
status_callback=nop, result_callback=nop, userdata=None):
self.name = name
self.resource_id = resource_id
self._execute_callback = execute_callback
self._status_callback = status_callback
self._result_callback = result_callback
self.userdata = userdata
self.id = None
self.status = None
self._monitors = {
constants.TaskState.START: [],
constants.TaskState.EXECUTED: [],
constants.TaskState.RESULT: []
}
self._states = [None, None, None, None]
self._state = constants.TaskState.NONE
def _add_monitor(self, action, func):
self._monitors[action].append(func)
return self
def _move_state(self, state):
self._state = state
if self._states[state] is not None:
e = self._states[state]
self._states[state] = None
e.send()
for s in range(state):
if self._states[s] is not None:
e = self._states[s]
self._states[s] = None
e.send_exception(
TaskStateSkipped(state=s, current=self._state))
def _invoke_monitor(self, state):
for func in self._monitors[state]:
try:
func(self)
except Exception:
msg = _("Task %(task)s encountered exception in %(func)s "
"at state %(state)s") % {
'task': str(self),
'func': str(func),
'state': state}
LOG.exception(msg)
self._move_state(state)
return self
def _start(self):
return self._invoke_monitor(constants.TaskState.START)
def _executed(self):
return self._invoke_monitor(constants.TaskState.EXECUTED)
def _update_status(self, status):
if self.status == status:
return self
self.status = status
def _finished(self):
return self._invoke_monitor(constants.TaskState.RESULT)
def add_start_monitor(self, func):
return self._add_monitor(constants.TaskState.START, func)
def add_executed_monitor(self, func):
return self._add_monitor(constants.TaskState.EXECUTED, func)
def add_result_monitor(self, func):
return self._add_monitor(constants.TaskState.RESULT, func)
def wait(self, state):
if (state < constants.TaskState.START or
state > constants.TaskState.RESULT or
state == constants.TaskState.STATUS):
raise InvalidState(state=state)
if state <= self._state:
# we already passed this current state, so no wait
return
e = event.Event()
self._states[state] = e
e.wait()
def __repr__(self):
return "Task-%s-%s-%s" % (
self.name, self.resource_id, self.id)
class TaskManager():
_instance = None
_default_interval = DEFAULT_INTERVAL
def __init__(self, interval=None):
self._interval = interval or TaskManager._default_interval
# A queue to pass tasks from other threads
self._tasks_queue = collections.deque()
# A dict to store resource -> resource's tasks
self._tasks = {}
# Current task being executed in main thread
self._main_thread_exec_task = None
# New request event
self._req = event.Event()
# TaskHandler stopped event
self._stopped = False
# Periodic function trigger
self._monitor = None
self._monitor_busy = False
# Thread handling the task request
self._thread = None
def _execute(self, task):
"""Execute task."""
msg = _("Start task %s") % str(task)
LOG.debug(msg)
task._start()
try:
status = task._execute_callback(task)
except Exception:
msg = _("Task %(task)s encountered exception in %(cb)s") % {
'task': str(task),
'cb': str(task._execute_callback)}
LOG.exception(msg)
status = constants.TaskStatus.ERROR
LOG.debug(_("Task %(task)s return %(status)s"), {
'task': str(task),
'status': status})
task._update_status(status)
task._executed()
return status
def _result(self, task):
"""Notify task execution result."""
try:
task._result_callback(task)
except Exception:
msg = _("Task %(task)s encountered exception in %(cb)s") % {
'task': str(task),
'cb': str(task._result_callback)}
LOG.exception(msg)
LOG.debug(_("Task %(task)s return %(status)s"),
{'task': str(task), 'status': task.status})
task._finished()
def _check_pending_tasks(self):
"""Check all pending tasks status."""
for resource_id in self._tasks.keys():
if self._stopped:
# Task manager is stopped, return now
return
tasks = self._tasks[resource_id]
# only the first task is executed and pending
task = tasks[0]
try:
status = task._status_callback(task)
except Exception:
msg = _("Task %(task)s encountered exception in %(cb)s") % {
'task': str(task),
'cb': str(task._status_callback)}
LOG.exception(msg)
status = constants.TaskStatus.ERROR
task._update_status(status)
if status != constants.TaskStatus.PENDING:
self._dequeue(task, True)
def _enqueue(self, task):
if task.resource_id in self._tasks:
# append to existing resource queue for ordered processing
self._tasks[task.resource_id].append(task)
else:
# put the task to a new resource queue
tasks = collections.deque()
tasks.append(task)
self._tasks[task.resource_id] = tasks
def _dequeue(self, task, run_next):
self._result(task)
tasks = self._tasks[task.resource_id]
tasks.remove(task)
if not tasks:
# no more tasks for this resource
del self._tasks[task.resource_id]
return
if run_next:
# process next task for this resource
while tasks:
task = tasks[0]
status = self._execute(task)
if status == constants.TaskStatus.PENDING:
break
self._dequeue(task, False)
def _abort(self):
"""Abort all tasks."""
# put all tasks haven't been received by main thread to queue
# so the following abort handling can cover them
for t in self._tasks_queue:
self._enqueue(t)
self._tasks_queue.clear()
for resource_id in self._tasks.keys():
tasks = list(self._tasks[resource_id])
for task in tasks:
task._update_status(constants.TaskStatus.ABORT)
self._dequeue(task, False)
def _get_task(self):
"""Get task request."""
while True:
for t in self._tasks_queue:
return self._tasks_queue.popleft()
self._req.wait()
self._req.reset()
def run(self):
while True:
try:
if self._stopped:
# Gracefully terminate this thread if the _stopped
# attribute was set to true
LOG.info(_("Stopping TaskManager"))
break
# get a task from queue, or timeout for periodic status check
task = self._get_task()
if task.resource_id in self._tasks:
# this resource already has some tasks under processing,
# append the task to same queue for ordered processing
self._enqueue(task)
continue
try:
self._main_thread_exec_task = task
self._execute(task)
finally:
self._main_thread_exec_task = None
if task.status is None:
# The thread is killed during _execute(). To guarantee
# the task been aborted correctly, put it to the queue.
self._enqueue(task)
elif task.status != constants.TaskStatus.PENDING:
self._result(task)
else:
self._enqueue(task)
except Exception:
LOG.exception(_("TaskManager terminating because "
"of an exception"))
break
def add(self, task):
task.id = uuid.uuid1()
self._tasks_queue.append(task)
if not self._req.ready():
self._req.send()
return task.id
def stop(self):
if self._thread is None:
return
self._stopped = True
self._thread.kill()
self._thread = None
# Stop looping call and abort running tasks
self._monitor.stop()
if self._monitor_busy:
self._monitor.wait()
self._abort()
LOG.info(_("TaskManager terminated"))
def has_pending_task(self):
if self._tasks_queue or self._tasks or self._main_thread_exec_task:
return True
else:
return False
def show_pending_tasks(self):
for task in self._tasks_queue:
LOG.info(str(task))
for resource, tasks in self._tasks.iteritems():
for task in tasks:
LOG.info(str(task))
if self._main_thread_exec_task:
LOG.info(str(self._main_thread_exec_task))
def count(self):
count = 0
for resource_id, tasks in self._tasks.iteritems():
count += len(tasks)
return count
def start(self, interval=None):
def _inner():
self.run()
def _loopingcall_callback():
self._monitor_busy = True
try:
self._check_pending_tasks()
except Exception:
LOG.exception(_("Exception in _check_pending_tasks"))
self._monitor_busy = False
if self._thread is not None:
return self
if interval is None or interval == 0:
interval = self._interval
self._stopped = False
self._thread = greenthread.spawn(_inner)
self._monitor = loopingcall.FixedIntervalLoopingCall(
_loopingcall_callback)
self._monitor.start(interval / 1000.0,
interval / 1000.0)
# To allow the created thread start running
greenthread.sleep(0)
return self
@classmethod
def set_default_interval(cls, interval):
cls._default_interval = interval
|
{
"content_hash": "6220d82dae4272be264046ffce8e7fe6",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 79,
"avg_line_length": 31.484293193717278,
"alnum_prop": 0.5416978465120146,
"repo_name": "virtualopensystems/neutron",
"id": "7037c430dd42dca92c55625485e02df7b5cea29c",
"size": "12654",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "neutron/plugins/vmware/vshield/tasks/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60527"
},
{
"name": "Python",
"bytes": "9873662"
},
{
"name": "Shell",
"bytes": "9202"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
import gettext
import json
from os import path
from django.conf import settings
from django.test import (
RequestFactory, SimpleTestCase, TestCase, ignore_warnings, modify_settings,
override_settings,
)
from django.test.selenium import SeleniumTestCase
from django.urls import reverse
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.translation import (
LANGUAGE_SESSION_KEY, get_language, override,
)
from django.views.i18n import JavaScriptCatalog, get_formats
from ..urls import locale_dir
@override_settings(ROOT_URLCONF='view_tests.urls')
class SetLanguageTests(TestCase):
"""Test the django.views.i18n.set_language view."""
def _get_inactive_language_code(self):
"""Return language code for a language which is not activated."""
current_language = get_language()
return [code for code, name in settings.LANGUAGES if not code == current_language][0]
def test_setlang(self):
"""
The set_language view can be used to change the session language.
The user is redirected to the 'next' argument if provided.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '/'}
response = self.client.post('/i18n/setlang/', post_data, HTTP_REFERER='/i_should_not_be_used/')
self.assertRedirects(response, '/')
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
# The language is set in a cookie.
language_cookie = self.client.cookies[settings.LANGUAGE_COOKIE_NAME]
self.assertEqual(language_cookie.value, lang_code)
self.assertEqual(language_cookie['domain'], '')
self.assertEqual(language_cookie['path'], '/')
self.assertEqual(language_cookie['max-age'], '')
self.assertEqual(language_cookie['httponly'], '')
self.assertEqual(language_cookie['samesite'], '')
self.assertEqual(language_cookie['secure'], '')
def test_setlang_unsafe_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe".
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '//unsafe/redirection/'}
response = self.client.post('/i18n/setlang/', data=post_data)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_http_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe" and its scheme is https if the request was sent over https.
"""
lang_code = self._get_inactive_language_code()
non_https_next_url = 'http://testserver/redirection/'
post_data = {'language': lang_code, 'next': non_https_next_url}
# Insecure URL in POST data.
response = self.client.post('/i18n/setlang/', data=post_data, secure=True)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
# Insecure URL in HTTP referer.
response = self.client.post('/i18n/setlang/', secure=True, HTTP_REFERER=non_https_next_url)
self.assertEqual(response.url, '/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_redirect_to_referer(self):
"""
The set_language view redirects to the URL in the referer header when
there isn't a "next" parameter.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
response = self.client.post('/i18n/setlang/', post_data, HTTP_REFERER='/i18n/')
self.assertRedirects(response, '/i18n/', fetch_redirect_response=False)
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_default_redirect(self):
"""
The set_language view redirects to '/' when there isn't a referer or
"next" parameter.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
response = self.client.post('/i18n/setlang/', post_data)
self.assertRedirects(response, '/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_performs_redirect_for_ajax_if_explicitly_requested(self):
"""
The set_language view redirects to the "next" parameter for requests
not accepting HTML response content.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '/'}
response = self.client.post('/i18n/setlang/', post_data, HTTP_ACCEPT='application/json')
self.assertRedirects(response, '/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_doesnt_perform_a_redirect_to_referer_for_ajax(self):
"""
The set_language view doesn't redirect to the HTTP referer header if
the request doesn't accept HTML response content.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
headers = {'HTTP_REFERER': '/', 'HTTP_ACCEPT': 'application/json'}
response = self.client.post('/i18n/setlang/', post_data, **headers)
self.assertEqual(response.status_code, 204)
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_doesnt_perform_a_default_redirect_for_ajax(self):
"""
The set_language view returns 204 by default for requests not accepting
HTML response content.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code}
response = self.client.post('/i18n/setlang/', post_data, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 204)
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
def test_setlang_unsafe_next_for_ajax(self):
"""
The fallback to root URL for the set_language view works for requests
not accepting HTML response content.
"""
lang_code = self._get_inactive_language_code()
post_data = {'language': lang_code, 'next': '//unsafe/redirection/'}
response = self.client.post('/i18n/setlang/', post_data, HTTP_ACCEPT='application/json')
self.assertEqual(response.url, '/')
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
def test_session_language_deprecation(self):
msg = (
'The user language will no longer be stored in request.session '
'in Django 4.0. Read it from '
'request.COOKIES[settings.LANGUAGE_COOKIE_NAME] instead.'
)
with self.assertRaisesMessage(RemovedInDjango40Warning, msg):
self.client.session[LANGUAGE_SESSION_KEY]
def test_setlang_reversal(self):
self.assertEqual(reverse('set_language'), '/i18n/setlang/')
def test_setlang_cookie(self):
# we force saving language to a cookie rather than a session
# by excluding session middleware and those which do require it
test_settings = {
'MIDDLEWARE': ['django.middleware.common.CommonMiddleware'],
'LANGUAGE_COOKIE_NAME': 'mylanguage',
'LANGUAGE_COOKIE_AGE': 3600 * 7 * 2,
'LANGUAGE_COOKIE_DOMAIN': '.example.com',
'LANGUAGE_COOKIE_PATH': '/test/',
'LANGUAGE_COOKIE_HTTPONLY': True,
'LANGUAGE_COOKIE_SAMESITE': 'Strict',
'LANGUAGE_COOKIE_SECURE': True,
}
with self.settings(**test_settings):
post_data = {'language': 'pl', 'next': '/views/'}
response = self.client.post('/i18n/setlang/', data=post_data)
language_cookie = response.cookies.get('mylanguage')
self.assertEqual(language_cookie.value, 'pl')
self.assertEqual(language_cookie['domain'], '.example.com')
self.assertEqual(language_cookie['path'], '/test/')
self.assertEqual(language_cookie['max-age'], 3600 * 7 * 2)
self.assertIs(language_cookie['httponly'], True)
self.assertEqual(language_cookie['samesite'], 'Strict')
self.assertIs(language_cookie['secure'], True)
def test_setlang_decodes_http_referer_url(self):
"""
The set_language view decodes the HTTP_REFERER URL.
"""
# The URL & view must exist for this to work as a regression test.
self.assertEqual(reverse('with_parameter', kwargs={'parameter': 'x'}), '/test-setlang/x/')
lang_code = self._get_inactive_language_code()
encoded_url = '/test-setlang/%C3%A4/' # (%C3%A4 decodes to ä)
response = self.client.post('/i18n/setlang/', {'language': lang_code}, HTTP_REFERER=encoded_url)
self.assertRedirects(response, encoded_url, fetch_redirect_response=False)
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, lang_code)
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)
@modify_settings(MIDDLEWARE={
'append': 'django.middleware.locale.LocaleMiddleware',
})
def test_lang_from_translated_i18n_pattern(self):
response = self.client.post(
'/i18n/setlang/', data={'language': 'nl'},
follow=True, HTTP_REFERER='/en/translated/'
)
self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, 'nl')
with ignore_warnings(category=RemovedInDjango40Warning):
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'nl')
self.assertRedirects(response, '/nl/vertaald/')
# And reverse
response = self.client.post(
'/i18n/setlang/', data={'language': 'en'},
follow=True, HTTP_REFERER='/nl/vertaald/'
)
self.assertRedirects(response, '/en/translated/')
@override_settings(ROOT_URLCONF='view_tests.urls')
class I18NViewTests(SimpleTestCase):
"""Test django.views.i18n views other than set_language."""
@override_settings(LANGUAGE_CODE='de')
def test_get_formats(self):
formats = get_formats()
# Test 3 possible types in get_formats: integer, string, and list.
self.assertEqual(formats['FIRST_DAY_OF_WEEK'], 0)
self.assertEqual(formats['DECIMAL_SEPARATOR'], '.')
self.assertEqual(formats['TIME_INPUT_FORMATS'], ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'])
def test_jsi18n(self):
"""The javascript_catalog can be deployed with language settings"""
for lang_code in ['es', 'fr', 'ru']:
with override(lang_code):
catalog = gettext.translation('djangojs', locale_dir, [lang_code])
trans_txt = catalog.gettext('this is to be translated')
response = self.client.get('/jsi18n/')
self.assertEqual(response['Content-Type'], 'text/javascript; charset="utf-8"')
# response content must include a line like:
# "this is to be translated": <value of trans_txt Python variable>
# json.dumps() is used to be able to check unicode strings
self.assertContains(response, json.dumps(trans_txt), 1)
if lang_code == 'fr':
# Message with context (msgctxt)
self.assertContains(response, '"month name\\u0004May": "mai"', 1)
@override_settings(USE_I18N=False)
def test_jsi18n_USE_I18N_False(self):
response = self.client.get('/jsi18n/')
# default plural function
self.assertContains(response, 'django.pluralidx = function(count) { return (count == 1) ? 0 : 1; };')
self.assertNotContains(response, 'var newcatalog =')
def test_jsoni18n(self):
"""
The json_catalog returns the language catalog and settings as JSON.
"""
with override('de'):
response = self.client.get('/jsoni18n/')
data = json.loads(response.content.decode())
self.assertIn('catalog', data)
self.assertIn('formats', data)
self.assertEqual(data['formats']['TIME_INPUT_FORMATS'], ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'])
self.assertEqual(data['formats']['FIRST_DAY_OF_WEEK'], 0)
self.assertIn('plural', data)
self.assertEqual(data['catalog']['month name\x04May'], 'Mai')
self.assertIn('DATETIME_FORMAT', data['formats'])
self.assertEqual(data['plural'], '(n != 1)')
def test_jsi18n_with_missing_en_files(self):
"""
The javascript_catalog shouldn't load the fallback language in the
case that the current selected language is actually the one translated
from, and hence missing translation files completely.
This happens easily when you're translating from English to other
languages and you've set settings.LANGUAGE_CODE to some other language
than English.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'esto tiene que ser traducido')
def test_jsoni18n_with_missing_en_files(self):
"""
Same as above for the json_catalog view. Here we also check for the
expected JSON format.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/jsoni18n/')
data = json.loads(response.content.decode())
self.assertIn('catalog', data)
self.assertIn('formats', data)
self.assertIn('plural', data)
self.assertEqual(data['catalog'], {})
self.assertIn('DATETIME_FORMAT', data['formats'])
self.assertIsNone(data['plural'])
def test_jsi18n_fallback_language(self):
"""
Let's make sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('fi'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'il faut le traduire')
self.assertNotContains(response, "Untranslated string")
def test_i18n_fallback_language_plural(self):
"""
The fallback to a language with less plural forms maintains the real
language's number of plural forms and correct translations.
"""
with self.settings(LANGUAGE_CODE='pt'), override('ru'):
response = self.client.get('/jsi18n/')
self.assertEqual(
response.context['catalog']['{count} plural3'],
['{count} plural3 p3', '{count} plural3 p3s', '{count} plural3 p3t']
)
self.assertEqual(
response.context['catalog']['{count} plural2'],
['{count} plural2', '{count} plural2s', '']
)
with self.settings(LANGUAGE_CODE='ru'), override('pt'):
response = self.client.get('/jsi18n/')
self.assertEqual(
response.context['catalog']['{count} plural3'],
['{count} plural3', '{count} plural3s']
)
self.assertEqual(
response.context['catalog']['{count} plural2'],
['{count} plural2', '{count} plural2s']
)
def test_i18n_english_variant(self):
with override('en-gb'):
response = self.client.get('/jsi18n/')
self.assertIn(
'"this color is to be translated": "this colour is to be translated"',
response.context['catalog_str']
)
def test_i18n_language_non_english_default(self):
"""
Check if the Javascript i18n view returns an empty language catalog
if the default language is non-English, the selected language
is English and there is not 'en' translation available. See #13388,
#3594 and #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
@modify_settings(INSTALLED_APPS={'append': 'view_tests.app0'})
def test_non_english_default_english_userpref(self):
"""
Same as above with the difference that there IS an 'en' translation
available. The Javascript i18n view must return a NON empty language catalog
with the proper English translations. See #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/jsi18n_english_translation/')
self.assertContains(response, 'this app0 string is to be translated')
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('none'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'Choisir une heure')
def test_escaping(self):
# Force a language via GET otherwise the gettext functions are a noop!
response = self.client.get('/jsi18n_admin/?language=de')
self.assertContains(response, '\\x04')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app5']})
def test_non_BMP_char(self):
"""
Non-BMP characters should not break the javascript_catalog (#21725).
"""
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n/app5/')
self.assertContains(response, 'emoji')
self.assertContains(response, '\\ud83d\\udca9')
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
def test_i18n_language_english_default(self):
"""
Check if the JavaScript i18n view returns a complete language catalog
if the default language is en-us, the selected language has a
translation available and a catalog composed by djangojs domain
translations of multiple Python packages is requested. See #13388,
#3594 and #13514 for more details.
"""
base_trans_string = 'il faut traduire cette cha\\u00eene de caract\\u00e8res de '
app1_trans_string = base_trans_string + 'app1'
app2_trans_string = base_trans_string + 'app2'
with self.settings(LANGUAGE_CODE='en-us'), override('fr'):
response = self.client.get('/jsi18n_multi_packages1/')
self.assertContains(response, app1_trans_string)
self.assertContains(response, app2_trans_string)
response = self.client.get('/jsi18n/app1/')
self.assertContains(response, app1_trans_string)
self.assertNotContains(response, app2_trans_string)
response = self.client.get('/jsi18n/app2/')
self.assertNotContains(response, app1_trans_string)
self.assertContains(response, app2_trans_string)
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app3', 'view_tests.app4']})
def test_i18n_different_non_english_languages(self):
"""
Similar to above but with neither default or requested language being
English.
"""
with self.settings(LANGUAGE_CODE='fr'), override('es-ar'):
response = self.client.get('/jsi18n_multi_packages2/')
self.assertContains(response, 'este texto de app3 debe ser traducido')
def test_i18n_with_locale_paths(self):
extended_locale_paths = settings.LOCALE_PATHS + [
path.join(
path.dirname(path.dirname(path.abspath(__file__))),
'app3',
'locale',
),
]
with self.settings(LANGUAGE_CODE='es-ar', LOCALE_PATHS=extended_locale_paths):
with override('es-ar'):
response = self.client.get('/jsi18n/')
self.assertContains(response, 'este texto de app3 debe ser traducido')
def test_i18n_unknown_package_error(self):
view = JavaScriptCatalog.as_view()
request = RequestFactory().get('/')
msg = 'Invalid package(s) provided to JavaScriptCatalog: unknown_package'
with self.assertRaisesMessage(ValueError, msg):
view(request, packages='unknown_package')
msg += ',unknown_package2'
with self.assertRaisesMessage(ValueError, msg):
view(request, packages='unknown_package+unknown_package2')
@override_settings(ROOT_URLCONF='view_tests.urls')
class I18nSeleniumTests(SeleniumTestCase):
# The test cases use fixtures & translations from these apps.
available_apps = [
'django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'view_tests',
]
@override_settings(LANGUAGE_CODE='de')
def test_javascript_gettext(self):
self.selenium.get(self.live_server_url + '/jsi18n_template/')
elem = self.selenium.find_element_by_id("gettext")
self.assertEqual(elem.text, "Entfernen")
elem = self.selenium.find_element_by_id("ngettext_sing")
self.assertEqual(elem.text, "1 Element")
elem = self.selenium.find_element_by_id("ngettext_plur")
self.assertEqual(elem.text, "455 Elemente")
elem = self.selenium.find_element_by_id("ngettext_onnonplural")
self.assertEqual(elem.text, "Bild")
elem = self.selenium.find_element_by_id("pgettext")
self.assertEqual(elem.text, "Kann")
elem = self.selenium.find_element_by_id("npgettext_sing")
self.assertEqual(elem.text, "1 Resultat")
elem = self.selenium.find_element_by_id("npgettext_plur")
self.assertEqual(elem.text, "455 Resultate")
elem = self.selenium.find_element_by_id("formats")
self.assertEqual(
elem.text,
"DATE_INPUT_FORMATS is an object; DECIMAL_SEPARATOR is a string; FIRST_DAY_OF_WEEK is a number;"
)
@modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']})
@override_settings(LANGUAGE_CODE='fr')
def test_multiple_catalogs(self):
self.selenium.get(self.live_server_url + '/jsi18n_multi_catalogs/')
elem = self.selenium.find_element_by_id('app1string')
self.assertEqual(elem.text, 'il faut traduire cette chaîne de caractères de app1')
elem = self.selenium.find_element_by_id('app2string')
self.assertEqual(elem.text, 'il faut traduire cette chaîne de caractères de app2')
|
{
"content_hash": "50433dfe6626d4203f1504a10b9ba41d",
"timestamp": "",
"source": "github",
"line_count": 503,
"max_line_length": 109,
"avg_line_length": 48.512922465208746,
"alnum_prop": 0.635439718055897,
"repo_name": "claudep/django",
"id": "dd07aa9f935fb915301ad7ec6a01d1755f3e8255",
"size": "24407",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/view_tests/tests/test_i18n.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "78915"
},
{
"name": "HTML",
"bytes": "227663"
},
{
"name": "JavaScript",
"bytes": "137605"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13662118"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.template.defaultfilters import slugify
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
views = models.IntegerField(default=0)
likes = models.IntegerField(default=0)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs)
def __unicode__(self): #For Python 2, use __str__ on Python 3
return self.name
class Page(models.Model):
category = models.ForeignKey(Category)
title = models.CharField(max_length=128)
url = models.URLField()
views = models.IntegerField(default=0)
def __unicode__(self): #For Python 2, use __str__ on Python 3
return self.title
#class UserProfile(models.Model):
# # A required line - links a UserProfile to User.
# user = models.OneToOneField(User)
#
# # The additional attributes we wish to include.
# website = models.URLField(blank=True)
# picture = models.ImageField(upload_to='profile_images', blank=True)
#
# def __unicode__(self):
# return self.user.username
|
{
"content_hash": "5adc7687997a7579034b07e3a4aca790",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 70,
"avg_line_length": 31.77777777777778,
"alnum_prop": 0.6870629370629371,
"repo_name": "bobbybabra/codeGuild",
"id": "dbf73ffc13bb77f8f29059f9971955d86b7e88e5",
"size": "1144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geneGroove_project/groove/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "161"
},
{
"name": "HTML",
"bytes": "3925"
},
{
"name": "JavaScript",
"bytes": "676"
},
{
"name": "Python",
"bytes": "71225"
}
],
"symlink_target": ""
}
|
"""Files
We're getting close to being able to write
something really useful. To do that, we need to
_receive_ data from the outside world, not just
_produce_ it.
In this environment, you can access a virtual filesystem
that is part of the in-browser interpreter.
Let's do something silly: let's get
all of the lines of the Python |string| module
and print out the ones that contain comments.
To do this, we'll use the builtin |open| function.
It takes a filename as an argument and returns a
"file-like" object. In Python-speak, this means it
supports some basic things like |read|, |write|
(if writeable), and _iteration_.
Because file objects are **iterable**, they can be
used as the sequence in a |for| loop. When used
like this, they look like a sequence of lines.
Another tidbit in the code is the use of |lstrip|
and |rstrip| on each line in the file:
- |lstrip|: strip whitespace from the left
- |rstrip|: strip whitespace from the right
(including newlines)
There is also |strip|, which strips it from both sides.
"""
__doc__ = """Files: Opening the Code"""
f = open('lib/pypyjs/lib_pypy/string.py')
for line in f:
if line.lstrip().startswith('#'): # ignore leading space
print line.rstrip() # strip trailing space, including \n.
f.close()
|
{
"content_hash": "e8482536d59f78db20bdee6fc3b56ede",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 62,
"avg_line_length": 29.558139534883722,
"alnum_prop": 0.7285601888276947,
"repo_name": "shiblon/pytour",
"id": "ffa9d8bd02e37d36a553f400e058c44c8c4aedca",
"size": "1284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "195977"
},
{
"name": "HTML",
"bytes": "2110262"
},
{
"name": "JavaScript",
"bytes": "5106892"
},
{
"name": "Python",
"bytes": "15081380"
},
{
"name": "Shell",
"bytes": "1018"
}
],
"symlink_target": ""
}
|
import codecs
import os
import sys
from distutils.util import convert_path
from fnmatch import fnmatchcase
from setuptools import setup, find_packages
def readme():
return open(os.path.join(os.path.dirname(__file__), 'README.md'), "r").read()
setup(
name='python-cache',
packages = find_packages(exclude=["tests.*", "tests"]),
version='0.0.11',
description='Pythonic way of Caching',
long_description=readme(),
author='python-cache',
author_email='kevin830222@gmail.com, alan4chen@kimo.com',
url='https://github.com/python-cache/python-cache',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Intended Audience :: Developers'
],
install_requires=[],
include_package_data=True,
license='MIT License',
)
|
{
"content_hash": "9c3cbb5bd7d69fcc8ec47bbd33e9b52b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 25.84375,
"alnum_prop": 0.6662636033857315,
"repo_name": "python-cache/python-cache",
"id": "b04680dc7a3caf29c702e235cfe581a6a3801d31",
"size": "827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30545"
}
],
"symlink_target": ""
}
|
from django.db.models import Count, F
from django.shortcuts import get_object_or_404
from rest_framework.viewsets import ReadOnlyModelViewSet
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status as http_status
from .models import Connection, Image, Vote
from .serializers import ConnectionSerializer
class ConnectionViewSet(ReadOnlyModelViewSet):
queryset = (
Connection.objects
.select_related('image')
.annotate(
vote_count=Count('votes'),
image_url=F('image__image'),
)
)
serializer_class = ConnectionSerializer
class AddVoteView(APIView):
@staticmethod
def get_client_ip(request):
"""
http://stackoverflow.com/questions/4581789/how-do-i-get-user-ip-address-in-django
"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get(self, request, connection_id):
ip = self.get_client_ip(request)
vote = Vote.objects.filter(ip=ip, connection_id=connection_id).first()
if vote is None:
return Response(status=http_status.HTTP_404_NOT_FOUND)
else:
return Response(status=http_status.HTTP_200_OK)
def delete(self, request, connection_id):
ip = self.get_client_ip(request)
count = Vote.objects.filter(ip=ip, connection_id=connection_id).delete()[0]
if count == 0:
return Response(status=http_status.HTTP_404_NOT_FOUND)
else:
return Response(status=http_status.HTTP_200_OK)
def post(self, request, connection_id):
ip = self.get_client_ip(request)
if ip is None:
return Response(status=http_status.HTTP_403_FORBIDDEN)
connection = get_object_or_404(Connection, pk=connection_id)
vote = Vote.objects.filter(ip=ip, connection=connection).first()
if vote is not None:
return Response(status=http_status.HTTP_409_CONFLICT)
Vote.objects.create(ip=ip, connection=connection)
return Response(status=http_status.HTTP_201_CREATED)
|
{
"content_hash": "982f01f9e910d9a52bcdcc65daeaf1f0",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 89,
"avg_line_length": 35.06153846153846,
"alnum_prop": 0.6520403685827117,
"repo_name": "hackerspace-silesia/jakniedojade",
"id": "5834a1fef62d634d40b8a166b0d300821005f696",
"size": "2279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jakniedojade/app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3025"
},
{
"name": "HTML",
"bytes": "4298"
},
{
"name": "JavaScript",
"bytes": "4841"
},
{
"name": "Python",
"bytes": "21653"
}
],
"symlink_target": ""
}
|
import sbs
import cp2112
i2c = cp2112.CP2112()
batt = sbs.Battery(i2c)
print "Manufacturer:", batt.manufacturer()
print "Device Name: ", batt.device_name()
print "Chemistry: ", batt.chemistry()
print "Temperature: ", batt.temperature(), "C"
print "Voltage: ", batt.voltage(), "V"
print "Aux Data: ", batt.manufacturer_data()
|
{
"content_hash": "5f65bc9175d511d6b042bda5da955947",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 47,
"avg_line_length": 28,
"alnum_prop": 0.6845238095238095,
"repo_name": "taiwenko/python",
"id": "f295d3caf20361ca0b1d3056c3df7dd150b2d2c5",
"size": "358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "altimeter/query.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3676"
},
{
"name": "C++",
"bytes": "9570"
},
{
"name": "Makefile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "602703"
},
{
"name": "Shell",
"bytes": "692"
}
],
"symlink_target": ""
}
|
'''
Copyright 2018 Chris McKinney
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy
of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from .utils import *
def navlist(args):
pass
def sidebar(args):
pass
|
{
"content_hash": "c25fec2797dd023fae29ef427148fa21",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 29.73913043478261,
"alnum_prop": 0.7207602339181286,
"repo_name": "NighttimeDriver50000/tachibanasite",
"id": "6d05594d895c767793eb3ad669e43ababf8656e6",
"size": "684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/autots/lists.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "105611"
},
{
"name": "HTML",
"bytes": "1041"
},
{
"name": "JavaScript",
"bytes": "10300"
},
{
"name": "PHP",
"bytes": "31138"
},
{
"name": "Python",
"bytes": "59314"
},
{
"name": "Shell",
"bytes": "2018"
}
],
"symlink_target": ""
}
|
'''
Description : Unit test for recsys.py
Author : Jin Kim jjinking(at)gmail(dot)com
License : MIT
Creation date : 2014.03.25
Last Modified : 2014.03.27
Modified By : Jin Kim jjinking(at)gmail(dot)com
'''
import os,sys
# Add project root to PYTHONPATH
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir))
import numpy as np
import pandas as pd
import unittest
from datsci import recsys
class TestRecommenderFrame(unittest.TestCase):
'''
Unit tests for the RecommenderFrame class
'''
def test_init(self):
'''
Test initializer method
'''
data = [['1','2','3'],['1','2','3']]
self.failUnlessRaises(ValueError, recsys.RecommenderFrame, data)
recframe = recsys.RecommenderFrame(data, columns=['user','item','rating'])
self.assertTrue(len({'user', 'item', 'rating'}.intersection(set(recframe.columns))), 3)
def test_create_matrix(self):
'''
Test create_matrix method
'''
recframe = recsys.RecommenderFrame([['a', 'x', 1],
['a', 'y', 2],
['b', 'x', 4]],
columns=['user', 'item', 'rating'])
m = recframe.create_matrix()
self.assertEqual(sorted(m.columns), ['x','y'])
self.assertEqual(sorted(m.index), ['a','b'])
self.assertEqual(m.ix['a','x'], 1)
self.assertEqual(m.ix['a','y'], 2)
self.assertEqual(m.ix['b','x'], 4)
self.assertTrue(np.isnan(m.ix['b','y']))
class TestCollabFilterFrame(unittest.TestCase):
'''
Unit tests for the CollabFilterFrame class
'''
def setUp(self):
'''
Set up test data
'''
# Data from Programming Collective Intelligence by Toby Segaran
data = {'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane': 4.5,'You, Me and Dupree': 1.0,'Superman Returns': 4.0},
'Bob': {'Batman': 5.0},
'Joe': {'Superman': 3.0}}
# Convert data to dataframe containing user, item, rating columns
# user item rating
# 0 Jack Matthews Lady in the Water 3.0
# 1 Jack Matthews Snakes on a Plane 4.0
# 2 Jack Matthews You, Me and Dupree 3.5
self.recframe = recsys.CollabFilterFrame([(user, item, rating) for user,item_rating in data.iteritems()
for item,rating in item_rating.iteritems()],
columns=['user', 'item', 'rating'])
def test_similarity(self):
'''
Test similarity method
'''
dist_pearson = self.recframe.similarity('Lisa Rose', 'Gene Seymour', method='pearson')
self.assertEqual(round(dist_pearson, 12), 0.396059017191)
dist_euclidean = self.recframe.similarity('Lisa Rose', 'Gene Seymour', method='euclidean')
self.assertEqual(round(dist_euclidean, 12), 0.294298055086)
dist_manhattan = self.recframe.similarity('Lisa Rose', 'Gene Seymour', method='manhattan')
self.assertEqual(round(dist_manhattan, 12), 0.181818181818)
#TODO Test if no overlaps, return 0
dist_nooverlaps = self.recframe.similarity('Bob', 'Joe')
self.assertEqual(dist_nooverlaps, 0)
def test_get_user_matches(self):
'''
Test get_user_matches method
'''
tm = self.recframe.get_user_matches('Toby', n=3)
self.assertEqual([(round(item[0], 11), item[1]) for item in tm],
[(0.99124070716, 'Lisa Rose'),
(0.92447345164, 'Mick LaSalle'),
(0.89340514744, 'Claudia Puig')])
self.assertEqual(len(self.recframe.get_user_matches('Toby', n=5)), 5)
self.assertEqual(len(self.recframe.get_user_matches('Toby')), 8)
def test_get_recommendations(self):
'''
Test get_recommendations method
'''
self.assertEqual(self.recframe.get_recommendations('Toby', n=3, method='pearson'),
[(3.3477895267131013, 'The Night Listener'),
(2.8325499182641622, 'Lady in the Water'),
(2.5309807037655645, 'Just My Luck')])
self.assertEqual(self.recframe.get_recommendations('Toby', n=3, method='euclidean'),
[(3.457128694491423, 'The Night Listener'),
(2.7785840038149239, 'Lady in the Water'),
(2.4224820423619167, 'Just My Luck')])
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "24450ab030116d7c08e67e6bb5a6bd45",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 113,
"avg_line_length": 44.94074074074074,
"alnum_prop": 0.522333937695731,
"repo_name": "jjinking/datsci",
"id": "16f1531e89698b846f670f879473472b2b01a567",
"size": "6089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_recsys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95726"
}
],
"symlink_target": ""
}
|
import logging
import time
from injector import singleton, inject, Injector
from .config import ConfigModule, Config
from .data_builder import DataBuilder
from .data_log import DataLog
from .measurement import MeasurementDispatcher
from .sensor import SensorModule
from .store import StoreClient
from .store import StoreModule
from . import logger
INJECTOR = Injector(
[StoreModule(), ConfigModule(), SensorModule()])
root_logger = logging.getLogger(__name__)
root_logger.setLevel(logging.WARN)
log = root_logger
def set_log_level(log_level):
root_logger.setLevel(log_level)
def add_log_handler(log_handler):
root_logger.addHandler(log_handler)
def set_parent_logger(logger):
logger.parent = root_logger
@singleton
class Client:
@inject
def __init__(self, measurement_dispatcher: MeasurementDispatcher,
store_client: StoreClient,
data_log: DataLog,
config: Config):
self.measurement_dispatcher = measurement_dispatcher
self.store_client = store_client
self.data_log = data_log
self.config = config
def measure_and_store_periodically(self, period=30):
log.info("measure_and_store_periodically(%d)", period)
last_measurement = time.time() - period
while True:
timestamp = time.time()
if last_measurement < timestamp - period:
self.measure_and_store()
last_measurement += period
else:
self.measure()
log.info("time taken: %d", time.time() - last_measurement)
time.sleep(1)
def measure_and_store(self):
log.info("measure_and_store()", )
(timestamp, data) = self.measure()
self.store_data(data, timestamp)
def measure(self):
result = self.measurement_dispatcher.measure()
return result.timestamp, result.data
def store_data(self, data, timestamp):
try:
self.store_client.store(data)
log.info("stored data")
except Exception as e:
log.error("error during data transmission: create local log entry", e)
self.data_log.store(data, timestamp)
self.data_log.transmit_stored_data()
def client():
return INJECTOR.get(Client)
|
{
"content_hash": "759b5422b4094b7e96210d9d4599ba3c",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 82,
"avg_line_length": 27.759036144578314,
"alnum_prop": 0.6449652777777778,
"repo_name": "wuan/klimalogger",
"id": "cc9987751b5c11df99f04110fc1a0d215e1c4229",
"size": "2304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "klimalogger/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "31507"
}
],
"symlink_target": ""
}
|
def domain_for_model(model):
return model._meta.db_table
|
{
"content_hash": "8a269ee8e5d936ac4fb706262ed79ab4",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 31,
"avg_line_length": 30,
"alnum_prop": 0.7333333333333333,
"repo_name": "danfairs/django-simpledb",
"id": "3731c0bf30fce83ec36b21ddd4ec457e6e72345f",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simpledb/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "29541"
}
],
"symlink_target": ""
}
|
import os
import sys
from collections import defaultdict
from . import helpers as util
import requests
import json
class Empath:
def __init__(self, backend_url="http://54.148.189.209:8000"):
self.cats = defaultdict(list)
self.staging = {}
self.backend_url = backend_url
self.base_dir = os.path.dirname(util.__file__)
self.inv_cache = {}
self.load(self.base_dir+"/data/categories.tsv")
for f in os.listdir(self.base_dir+"/data/user/"):
if len(f.split(".")) > 1 and f.split(".")[1] == "empath":
self.load(self.base_dir+"/data/user/"+f)
def load(self,file):
with open(file,"r") as f:
for line in f:
cols = line.strip().split("\t")
name = cols[0]
terms = cols[1:]
for t in set(terms):
self.cats[name].append(t)
#self.invcats[t].append(name)
def analyze_term_window(self,doc,targets,categories=None,window_size=10,normalize=False):
tokenizer = util.window_tokenizer(window_size,targets)
return self.analyze(doc,categories,tokenizer,normalize)
def analyze(self,doc,categories=None,tokenizer="default",normalize=False):
if isinstance(doc,list):
doc = "\n".join(doc)
if tokenizer == "default":
tokenizer = util.default_tokenizer
elif tokenizer == "bigrams":
tokenizer = util.bigram_tokenizer
if not hasattr(tokenizer,"__call__"):
raise Exception("invalid tokenizer")
if not categories:
categories = self.cats.keys()
invcats = defaultdict(list)
key = tuple(sorted(categories))
if key in self.inv_cache:
invcats = self.inv_cache[key]
else:
for k in categories:
for t in self.cats[k]: invcats[t].append(k)
self.inv_cache[key] = invcats
count = {}
tokens = 0.0
for cat in categories: count[cat] = 0.0
for tk in tokenizer(doc):
tokens += 1.0
for cat in invcats[tk]:
count[cat]+=1.0
if normalize:
for cat in count.keys():
if tokens == 0:
return None
else:
count[cat] = count[cat] / tokens
return count
def create_category(self,name,seeds,model="fiction",size=100,write=True):
resp = requests.post(self.backend_url + "/create_category", json={"terms":seeds,"size":size,"model":model})
print(resp.text)
results = json.loads(resp.text)
self.cats[name] = list(set(results))
if write:
with open(self.base_dir+"/data/user/"+name+".empath","w") as f:
f.write("\t".join([name]+results))
def delete_category(self,name):
if name in self.cats: del self.cats[name]
filename = self.base_dir+"/data/user/"+name+".empath"
if os.path.isfile(filename):
os.remove(filename)
|
{
"content_hash": "01e95bbe7aa6978deb0015c6771e79dc",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 115,
"avg_line_length": 37.28048780487805,
"alnum_prop": 0.5528295714753025,
"repo_name": "Ejhfast/empath-client",
"id": "03da9f4d09716650efc3fab82c11bfa30868a650",
"size": "3057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "empath/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4849"
}
],
"symlink_target": ""
}
|
""" Trains an agent with Deep Q Learning or Double DQN on Breakout. Uses OpenAI Gym.
"""
import sys
import os
sys.path.insert(0,os.path.expanduser('~/Library/Python/2.7/lib/python/site-packages/'))
import numpy as np
import cPickle as pickle
import gym
from optparse import OptionParser
import itertools
import random
import time
from collections import deque, namedtuple
import copy
from scipy.misc import imresize
from malpi.layers import *
from malpi.model import *
from malpi.optimizer import Optimizer
from malpi.experience import Experience2
try:
import config
except:
print "Failed to load config file config.py."
print "Try copying config_empty.py to config.py and re-running."
exit()
import ast
from sklearn.linear_model import BayesianRidge, LinearRegression
import sklearn.gaussian_process as gp
from scipy.stats import norm
from scipy.optimize import minimize
def expected_improvement(x, gaussian_process, evaluated_loss, greater_is_better=False, n_params=1):
""" expected_improvement
Expected improvement acquisition function.
Arguments:
----------
x: array-like, shape = [n_samples, n_hyperparams]
The point for which the expected improvement needs to be computed.
gaussian_process: GaussianProcessRegressor object.
Gaussian process trained on previously evaluated hyperparameters.
evaluated_loss: Numpy array.
Numpy array that contains the values off the loss function for the previously
evaluated hyperparameters.
greater_is_better: Boolean.
Boolean flag that indicates whether the loss function is to be maximised or minimised.
n_params: int.
Dimension of the hyperparameter space.
"""
x_to_predict = x.reshape(-1, n_params)
mu, sigma = gaussian_process.predict(x_to_predict, return_std=True)
if greater_is_better:
loss_optimum = np.max(evaluated_loss)
else:
loss_optimum = np.min(evaluated_loss)
scaling_factor = (-1) ** (not greater_is_better)
# In case sigma equals zero
with np.errstate(divide='ignore'):
Z = scaling_factor * (mu - loss_optimum) / sigma
expected_improvement = scaling_factor * (mu - loss_optimum) * norm.cdf(Z) + sigma * norm.pdf(Z)
expected_improvement[sigma == 0.0] == 0.0
return -1 * expected_improvement
def sample_next_hyperparameter(acquisition_func, gaussian_process, evaluated_loss, greater_is_better=False,
bounds=(0, 10), n_restarts=25):
""" sample_next_hyperparameter
Proposes the next hyperparameter to sample the loss function for.
Arguments:
----------
acquisition_func: function.
Acquisition function to optimise.
gaussian_process: GaussianProcessRegressor object.
Gaussian process trained on previously evaluated hyperparameters.
evaluated_loss: array-like, shape = [n_obs,]
Numpy array that contains the values off the loss function for the previously
evaluated hyperparameters.
greater_is_better: Boolean.
Boolean flag that indicates whether the loss function is to be maximised or minimised.
bounds: Tuple.
Bounds for the L-BFGS optimiser.
n_restarts: integer.
Number of times to run the minimiser with different starting points.
"""
best_x = None
best_acquisition_value = 1
n_params = bounds.shape[0]
for starting_point in np.random.uniform(bounds[:, 0], bounds[:, 1], size=(n_restarts, n_params)):
res = minimize(fun=acquisition_func,
x0=starting_point.reshape(1, -1),
bounds=bounds,
method='L-BFGS-B',
args=(gaussian_process, evaluated_loss, greater_is_better, n_params))
if res.fun < best_acquisition_value:
best_acquisition_value = res.fun
best_x = res.x
return best_x
def bayesian_optimisation(n_iters, sample_loss, bounds, x0=None, n_pre_samples=5,
gp_params=None, random_search=False, alpha=1e-5, epsilon=1e-7):
""" bayesian_optimisation
Uses Gaussian Processes to optimise the loss function `sample_loss`.
Arguments:
----------
n_iters: integer.
Number of iterations to run the search algorithm.
sample_loss: function.
Function to be optimised.
bounds: array-like, shape = [n_params, 2].
Lower and upper bounds on the parameters of the function `sample_loss`.
x0: array-like, shape = [n_pre_samples, n_params].
Array of initial points to sample the loss function for. If None, randomly
samples from the loss function.
n_pre_samples: integer.
If x0 is None, samples `n_pre_samples` initial points from the loss function.
gp_params: dictionary.
Dictionary of parameters to pass on to the underlying Gaussian Process.
random_search: integer.
Flag that indicates whether to perform random search or L-BFGS-B optimisation
over the acquisition function.
alpha: double.
Variance of the error term of the GP.
epsilon: double.
Precision tolerance for floats.
"""
x_list = []
y_list = []
n_params = bounds.shape[0]
if x0 is None:
for params in np.random.uniform(bounds[:, 0], bounds[:, 1], (n_pre_samples, bounds.shape[0])):
x_list.append(params)
y_list.append(sample_loss(params))
else:
for params in x0:
x_list.append(params)
y_list.append(sample_loss(params))
xp = np.array(x_list)
yp = np.array(y_list)
# Create the GP
if gp_params is not None:
model = gp.GaussianProcessRegressor(**gp_params)
else:
kernel = gp.kernels.Matern()
model = gp.GaussianProcessRegressor(kernel=kernel,
alpha=alpha,
n_restarts_optimizer=10,
normalize_y=True)
for n in range(n_iters):
model.fit(xp, yp)
# Sample next hyperparameter
if random_search:
x_random = np.random.uniform(bounds[:, 0], bounds[:, 1], size=(random_search, n_params))
ei = -1 * expected_improvement(x_random, model, yp, greater_is_better=True, n_params=n_params)
next_sample = x_random[np.argmax(ei), :]
else:
next_sample = sample_next_hyperparameter(expected_improvement, model, yp, greater_is_better=True, bounds=bounds, n_restarts=100)
# Duplicates will break the GP. In case of a duplicate, we will randomly sample a next query point.
if np.any(np.abs(next_sample - xp) <= epsilon):
next_sample = np.random.uniform(bounds[:, 0], bounds[:, 1], bounds.shape[0])
# Sample loss for new set of parameters
cv_score = sample_loss(next_sample)
# Update lists
x_list.append(next_sample)
y_list.append(cv_score)
# Update xp and yp
xp = np.array(x_list)
yp = np.array(y_list)
return xp, yp
# {'epsilon_decay': 0.99957392597900963, 'epsilon': 0.96126118058910504, 'learning_rate': 0.0048160891703121133, 'batch_size': 32, 'best_score': 164.90000000000001, 'episodes': 3000, 'clip_error': False, 'learning_rate_decay': 0.99992369857077323, 'lr_decay_on_best': 0.94999999999999996, 'update_rate': 20, 'reg': 0.0050000000000000001, 'gamma': 0.99}
def readParams():
hparams = []
y = []
with open('CartPole-v0_dqn_won.txt', 'r') as f:
for line in f:
resd = ast.literal_eval(line)
if isinstance(resd,dict):
best = 195.0
if 'best_score' in resd:
best = resd['best_score']
sample = [32, 10, 200, 0.99, resd['epsilon'], resd['epsilon_decay'],resd['learning_rate'],resd['learning_rate_decay'],resd['lr_decay_on_best'],resd['clip_error'], 0.005]
elif isinstance(resd,list):
sample = resd[0:11]
best = resd[11]
hparams.append(sample)
y.append(best)
#hparams = np.array(hparams)
#y = np.array(y)
return (hparams,y)
#clf = BayesianRidge(compute_score=True)
#clf.fit(hparams, y)
#ols = LinearRegression()
#ols.fit(X, y)
#np.seterr(all='raise')
np.seterr(under='ignore')
def stats(arr, msg=""):
mi = np.min(arr)
ma = np.max(arr)
av = np.mean(arr)
std = np.std(arr)
abs_arr = np.abs(arr)
mi_abs = np.min(abs_arr)
ma_abs = np.max(abs_arr)
print "%sMin/Max/Mean/Stdev abs(Min/Max): %g/%g/%g/%g %g/%g" % (msg,mi,ma,av,std,mi_abs,ma_abs)
def saveModel( model, options ):
filename = os.path.join( options.dir_model, options.model_name + ".pickle" )
with open(filename, 'wb') as f:
pickle.dump( model, f, pickle.HIGHEST_PROTOCOL)
def initializeModel( name, number_actions, input_dim=(4,84,84), verbose=False ):
output = "FC-%d" % (number_actions,)
# layers = ["conv-32", "maxpool", "conv-64", "maxpool", "conv-64", "FC-512", output]
# layer_params = [{'filter_size':3, 'stride':1 },
# {'pool_stride': 2, 'pool_width': 2, 'pool_height': 2},
# {'filter_size':3, 'stride':1 },
# {'pool_stride': 2, 'pool_width': 2, 'pool_height': 2},
# {'filter_size':3, 'stride':2 },
# {}, {'relu':False} ]
# From the DQN paper, mostly
# layers = ["conv-32", "conv-64", "conv-64", "FC-512", output]
# layer_params = [{'filter_size':8, 'stride':4, 'pad':4 },
# {'filter_size':4, 'stride':2, 'pad':2},
# {'filter_size':3, 'stride':1, 'pad':1},
# {}, {'relu':False} ]
layers = ["FC-200", output]
layer_params = [ {}, {'relu':False} ]
model = MalpiModel(layers, layer_params, input_dim=input_dim, reg=0.005, dtype=np.float32, verbose=verbose)
model.name = name
if verbose:
print
model.describe()
print
return model
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x)) # sigmoid "squashing" function to interval [0,1]
def prepro(I):
""" prepro 210x160x3 uint8 frame into (84x84) float
"""
rgb_weights = [0.2989, 0.5870, 0.1140]
I = I[35:195] # crop
I = imresize(I, (84,84), interp='nearest' )
I = np.sum( I * rgb_weights, axis=2) # Convert to grayscale, shape = (84,84)
return I.astype(np.float) / 255.0
#return I.astype(np.float)
def discount_rewards(r, gamma, done, normalize=True):
""" take 1D float array of rewards and compute discounted reward.
if normalize is True: subtract mean and divide by std dev
"""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(xrange(0, r.size)):
if not done[t]: running_add = 0 # reset the sum, since this was a game boundary
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
if normalize:
# standardize the rewards to be unit normal (helps control the gradient estimator variance)
discounted_r -= np.mean(discounted_r)
discounted_r /= np.std(discounted_r)
return discounted_r
def discount_rewards(r, gamma, normalize=True):
""" take 1D float array of rewards and compute discounted reward.
if normalize is True: subtract mean and divide by std dev
"""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(xrange(0, r.size)):
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
if normalize:
# standardize the rewards to be unit normal (helps control the gradient estimator variance)
discounted_r -= np.mean(discounted_r)
discounted_r /= np.std(discounted_r)
return discounted_r
def make_epsilon_greedy_policy(estimator, epsilon, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function approximator and epsilon.
Args:
estimator: An estimator that returns q values for a given state
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the observation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(observation):
A = np.ones(nA, dtype=float) * epsilon / nA
q_values,_ = estimator.forward(observation, mode="test")
best_action = np.argmax(q_values[0])
A[best_action] += (1.0 - epsilon)
return A
return policy_fn
def choose_epsilon_greedy( estimator, observation, epsilon, nA ):
if np.random.random() < epsilon:
return np.random.randint(nA)
else:
q_values,_ = estimator.forward(observation.reshape(1,4), mode="test")
return np.argmax(q_values[0])
def check_weights( model ):
for k,w in model.params.iteritems():
smallest = np.min( np.abs(w) )
print "Smallest %s: %g" % (k,smallest)
mask_zeros = w != 0.0
mask = np.abs(w) < 1e-20
mask = np.logical_and(mask_zeros,mask)
if np.count_nonzero(mask) > 0:
print "Underflow in %s " % (k,)
def hyperparameterGenerator( oneRun = False ):
batch_size = 32 # backprop batch size
update_rate = 20 # every how many episodes to copy behavior model to target
gamma = 0.99 # discount factor for reward
epsilon = 0.5
epsilon_decay = 0.999
learning_rate = 0.01
learning_rate_decay = 0.999
lr_decay_on_best = 0.95
clip_error = True
reg = 0.005
hparams = { "reg": reg, "learning_rate": learning_rate, "learning_rate_decay":learning_rate_decay, "batch_size":batch_size, "update_rate":update_rate, "gamma":gamma, "epsilon":epsilon, "epsilon_decay":epsilon_decay,
"lr_decay_on_best":lr_decay_on_best, "clip_error":clip_error }
variations = np.array([0.9,1.0,1.1])
if oneRun:
reguls = [3.37091767808e-05]
lrs = [0.0002006801544726]
else:
count = 4
reguls = np.array([0.005])
epsilons = np.random.uniform( 0.5, 1.0, count )
epsilon_decays = np.random.uniform( 0.999, 0.9999, count )
lrs = np.random.uniform( 0.0001, 0.03, count )
lr_decays = np.random.uniform( 0.999, 0.99999, count )
decays_on_best = np.array([lr_decay_on_best])
clip_errors = np.array([True,False])
# reguls = np.array([3.37091767808e-05]) * variations
# lrs = np.array([0.0002006801544726]) * variations
#reguls = 10 ** np.random.uniform(-5, -4, 2) #[0.0001, 0.001, 0.01]
#lrs = 10 ** np.random.uniform(-6, -3, 5) #[1e-4, 1e-3, 1e-2]
#reguls = np.append([3.37091767808e-05],reguls)
#lrs = np.append([0.000182436504066],lrs)
for reg in reguls:
for lr in lrs:
for decay in lr_decays:
for epsilon in epsilons:
for epsilon_decay in epsilon_decays:
for decay_on_best in decays_on_best:
for clip_error in clip_errors:
hparams["reg"] = reg
hparams["learning_rate"] = lr
hparams["learning_rate_decay"] = decay
hparams["epsilon"] = epsilon
hparams["epsilon_decay"] = epsilon_decay
hparams["lr_decay_on_best"] = decay_on_best
hparams["clip_error"] = clip_error
yield hparams
def test(tmodel, env, options):
reward_100 = 0
for i in range(100):
episode_reward = 0
state = env.reset()
done = False
steps = 0
while not done and (steps < 1000):
if options.render: env.render()
q_values,_ = tmodel.forward(state.reshape(1,4), mode="test")
action = np.argmax(q_values[0])
state, reward, done, info = env.step(action)
episode_reward += reward
steps += 1
reward_100 += episode_reward
return (reward_100 / 100.0)
def train(env, options):
alpha=1e-5
epsilon=1e-7
kernel = gp.kernels.Matern()
model = gp.GaussianProcessRegressor(kernel=kernel,
alpha=alpha,
n_restarts_optimizer=10,
normalize_y=True)
#x_list,y_list = readParams()
good_list = [ [48.76090415229852, 1.6927175081103532, 223.02341007066963, 0.5678010007909667, 0.5549954432648416, 0.9933886373603302, 0.04461187669276121, 0.9911454128640629, 0.9563065642076264, 0.8080555822008355, 0.0015395898545990808, 165.0, 5000.0],
[44.05077224914717, 4.581278696929539, 567.454951740726, 0.872342953950116, 0.18049877148475657, 0.990163709408304, 0.062370916712252866, 0.9944033072903318, 0.9963371399648688, 0.6395886294825446, 0.0010001848382758618, 117.75, 5000.0],
[36.3723209296144, 17.13540920146732, 649.2612028561178, 0.13224300863461783, 0.543166266140875, 0.9943310250757145, 0.08187568538373177, 0.9966047176258499, 0.996227517495977, 0.4472571272004753, 0.00832929196043553, 105.12, 5000.0],
[10.916692091321929, 1.7197588754360758, 859.5984930832783, 0.9928960793644326, 0.1274628002990129, 0.9905321890913572, 0.08505446936131436, 0.9954039819492306, 0.9393970414024277, 0.20165955117569845, 0.00393562696555546, 184.0, 5000.0],
[24.61024966623437, 2.3382317127384797, 125.6807628925593, 0.7513928228888437, 0.2758971455651426, 0.9928318138327047, 0.013857939559698086, 0.9927166247992542, 0.9609541841323385, 0.4939770517123132, 0.004033141328968626, 127.14, 5000.0],
[48.414647941793945, 29.60459215462402, 929.5659155100193, 0.22797686540871967, 0.29012857317101626, 0.9902589981938963, 0.048126323473176816, 0.999365668290878, 0.9537590730846931, 0.3837955994859634, 0.0046700024476340925, 131.60344827586206, 5000.0],
[11.625857336308801, 1.7992254729400174, 834.250910881173, 0.9904487770340547, 0.1441466452323528, 0.99, 0.08112103123697603, 0.9967248247150776, 0.9628560158758284, 0.64953096598099, 0.005206558865528496, 134.0, 5000.0],
[ 32, 20, 100, 0.99, 0.7, 0.9995, 0.01, 0.9999, 0.95,True, 0.0005, 195.0, 2000.0 ]
]
x_list = []
y_list = []
for param in good_list:
x_list.append( param[0:11] )
y_list.append( param[11] )
xp = np.array(x_list)
yp = np.array(y_list)
# batch_size update_rate update_freq gamma epsilon epsilon_decay learning_rate learning_rate_decay lr_decay_on_best clip_error behavior.reg
bounds = np.array( [ [10, 50], [1,50], [100,1000], [0.1,1.0], [0.1,1.0], [0.99,1.0], [0.0001,0.1], [0.99,1.0], [0.9,1.0],[0.0,1.0], [0.0005,0.01] ] )
do_bayes = False
do_uniform = False
do_normal = False
next_sample = np.array( [ 32, 20, 100, 0.99, 0.7, 0.9995, 0.01, 0.9999, 0.95,True, 0.0005 ] )
scores = []
for i in range(100):
if do_bayes:
model.fit(xp, yp)
next_sample = sample_next_hyperparameter(expected_improvement, model, yp, greater_is_better=True, bounds=bounds, n_restarts=100)
# Duplicates will break the GP. In case of a duplicate, we will randomly sample a next query point.
if np.any(np.abs(next_sample - xp) <= epsilon):
next_sample = np.random.uniform(bounds[:, 0], bounds[:, 1], bounds.shape[0])
#next_sample = [32, 20, 200, 0.99, 0.88, 0.99957, 0.0045, 0.9999, 0.95, True, 0.005]
# Sample loss for new set of parameters
cv_score = train_one(env, next_sample, options)
scores.append(cv_score)
print "Score %f for %s" % (cv_score, next_sample)
# Update lists
x_list.append(next_sample)
y_list.append(cv_score)
# Update xp and yp
xp = np.array(x_list)
yp = np.array(y_list)
else:
if do_uniform:
next_sample = []
for b in bounds:
next_sample.append( np.random.uniform( b[0], b[1] ) )
elif do_normal:
next_sample = []
stddev = [ 5.0, 0.1, 50, 0.01, 0.01, 0.01, 0.01, 0.01, 0.1, 0.5, 0.001 ]
stdi = 0
for b in good_list[3][0:11]:
next_sample.append( np.random.normal( b, stddev[stdi] ) )
stdi += 1
bt = bounds.T
next_sample = np.clip( next_sample, bt[0], bt[1] )
print next_sample
cv_score = train_one(env, next_sample, options)
scores.append(cv_score)
print "100 iterations: %f / %f" % (np.mean(scores), np.std(scores))
def train_one(env, hparams, options):
ksteps = options.k_steps # number of frames to skip before selecting a new action
num_actions = env.action_space.n
batch_size = int(hparams[0])
update_rate = int(hparams[1])
update_freq = int(hparams[2])
gamma = hparams[3]
epsilon = hparams[4]
epsilon_decay = hparams[5]
learning_rate = hparams[6]
learning_rate_decay = hparams[7]
lr_decay_on_best = hparams[8]
if hparams[9] < 0.5:
clip_error = False
else:
clip_error = True
target = initializeModel( options.model_name, num_actions, input_dim=(4,1) )
target.reg = hparams[10]
target.params["W1"] *= 0.1
behavior = copy.deepcopy(target)
optim = Optimizer( "rmsprop", behavior, learning_rate=learning_rate, decay_rate=0.99, upd_frequency=update_freq)
reward_sum = 0
reward_100 = deque(maxlen=100)
best_test = 15.0 # test(target, env, options)
steps = 0
episode_steps = 0
episode_number = 0
state = env.reset()
exp_history = Experience2( 2000, state.shape )
with open( os.path.join( options.game + ".txt" ), 'a+') as f:
f.write( "%s = %s\n" % ('Start',time.strftime("%Y-%m-%d %H:%M:%S")) )
f.write( "%s = %s\n" % ('Model Name',target.name) )
if options.initialize:
f.write( "Weights initialized\n" )
f.write( str(target.layers) + "\n" )
f.write( str(target.layer_params) + "\n" )
f.write( "%s = %d\n" % ('batch_size',batch_size) )
f.write( "%s = %d\n" % ('update_rate',update_rate) )
f.write( "%s = %f\n" % ('gamma',gamma) )
f.write( "%s = %f\n" % ('epsilon',epsilon) )
f.write( "%s = %f\n" % ('epsilon_decay',epsilon_decay) )
f.write( "%s = %d\n" % ('k-steps',ksteps) )
f.write( "%s = %f\n" % ('learning_rate',learning_rate) )
f.write( "%s = %f\n" % ('learning_rate_decay',learning_rate_decay) )
f.write( "%s = %f\n" % ('lr_decay_on_best',lr_decay_on_best) )
f.write( "%s = %s\n" % ('clip_error',str(clip_error)) )
f.write( "Optimizer %s\n" % (optim.optim_type,) )
f.write( " %s = %f\n" % ('learning rate',optim.learning_rate) )
f.write( " %s = %f\n" % ('decay rate',optim.decay_rate) )
f.write( " %s = %f\n" % ('epsilon',optim.epsilon) )
f.write( " %s = %f\n" % ('update frequency',optim.upd_frequency) )
f.write( "\n" )
while (options.max_episodes == 0) or (episode_number < options.max_episodes):
if options.render: env.render()
action = choose_epsilon_greedy( behavior, state, epsilon, num_actions )
#action = np.random.randint(num_actions)
# step the environment once, or ksteps times
reward = 0
done = False
for k in range(ksteps):
next_state, r, d, info = env.step(action)
reward += r
if d:
done = True
reward_sum += reward
steps += ksteps
episode_steps += ksteps
exp_history.save( state, action, reward, done, next_state )
state = next_state
if (exp_history.size() > (batch_size * 5)):
states, actions, rewards, batch_done, new_states, _ = exp_history.batch( batch_size )
actions = actions.astype(np.int)
target_values, _ = target.forward( new_states, mode='test' )
double_dqn = True
if double_dqn:
behavior_values, _ = behavior.forward( new_states, mode='test' )
best_actions = np.argmax(behavior_values,axis=1)
q_target = rewards + batch_done * gamma * target_values[np.arange(batch_size), best_actions]
else:
q_target = rewards + batch_done * gamma * np.max(target_values, axis=1)
action_values, cache = behavior.forward(states, mode='train', verbose=False)
q_error = np.zeros( action_values.shape )
#q_error[ np.arange(batch_size), actions ] = q_target - action_values[ np.arange(batch_size), actions ]
q_error[ np.arange(batch_size), actions ] = action_values[ np.arange(batch_size), actions ] - q_target
dx = q_error
dx /= batch_size
if clip_error:
np.clip( dx, -1.0, 1.0, dx )
q_error = np.sum( np.square( q_error ) )
# dx needs to have shape(batch_size,num_actions), e.g. (32,6)
_, grad = behavior.backward(cache, q_error, dx )
optim.update( grad, check_ratio=False )
if done: # an episode finished
episode_number += 1
reward_100.append(reward_sum)
if episode_number % update_rate == 0:
target = copy.deepcopy(behavior)
treward = np.mean(reward_100) # test(target, env, options)
print
print 'Ep %d' % ( episode_number, )
print 'Reward : %0.2f %0.2f' % ( reward_sum, np.mean(reward_100) )
print "Test reward : %0.2f vs %0.2f" % (treward, best_test)
print "Learning rate: %g" % (optim.learning_rate,)
print "Epsilon : %g" % (epsilon,)
if treward > best_test:
best_test = treward
if treward > 195.0:
print "Final Learning rate: %f" % (optim.learning_rate,)
print "WON! In %d episodes" % (episode_number,)
break
if optim.learning_rate > 0.00001:
optim.learning_rate *= lr_decay_on_best
if optim.learning_rate > 0.00001:
optim.learning_rate *= learning_rate_decay
if epsilon > 0.1:
epsilon *= epsilon_decay
reward_sum = 0
episode_steps = 0
steps = 0
state = env.reset()
with open( os.path.join( options.game + "_dqn_won.txt" ), 'a+') as f:
hparams = np.append( hparams, [best_test, episode_number] )
f.write( "%s\n" % (hparams.tolist(),) )
with open( os.path.join( options.game + ".txt" ), 'a+') as f:
f.write( "%s = %f\n" % ('Final epsilon', epsilon) )
f.write( "%s = %f\n" % ('Final learning rate', optim.learning_rate) )
f.write( "%s = %f\n" % ('Best test score', best_test) )
f.write( "%s = %d\n" % ('Episodes', episode_number) )
f.write( "\n\n" )
return best_test
def getOptions():
usage = "Usage: python pg-pong [options] <model name>"
parser = OptionParser( usage=usage )
parser.add_option("-i","--initialize", action="store_true", default=False, help="Initialize model, save to <model name>.pickle, then start training.");
parser.add_option("-d","--dir_model", default="", help="Directory for finding/initializing model files. Defaults to current directory.");
parser.add_option("-r","--render", action="store_true", default=False, help="Render gym environment while training. Will greatly reduce speed.");
parser.add_option("-s","--starting_ep", type="int", default=0, help="Starting episode number (for record keeping).");
parser.add_option("-k","--k_steps", type="int", default=1, help="How many game steps to take before the model chooses a new action.");
parser.add_option("-p","--play", action="store_true", default=False, help="Play only. No training and always choose the best action.");
parser.add_option("--test_only", action="store_true", default=False, help="Run tests, then exit.");
parser.add_option("--desc", action="store_true", default=False, help="Describe the model, then exit.");
parser.add_option("-g","--game", default="Breakout-v0", help="The game environment to use. Defaults to Breakout.");
parser.add_option("-m","--max_episodes", default="0", type="int", help="Maximum number of episodes to train.");
parser.add_option("--upload", action="store_true", default=False, help="Monitor the training run and upload to OpenAI.");
(options, args) = parser.parse_args()
options.model_name = "HyperParamSearch"
if options.desc or options.test_only:
if len(args) != 1:
print usage
exit()
if args[0].endswith('.pickle'):
args[0] = args[0][:-7]
options.model_name = args[0]
if options.k_steps != 1 and options.k_steps != 4:
print "Game step sizes other than 1 and 4 are not currently supported."
exit()
options.dir_model = os.path.expanduser(options.dir_model)
return (options, args)
if __name__ == "__main__":
options, _ = getOptions()
env = gym.envs.make(options.game)
if hasattr(env,'get_action_meanings'):
print env.get_action_meanings()
if options.desc or options.test_only:
if options.initialize:
filename = os.path.join( options.dir_model, options.model_name + ".pickle" )
if os.path.exists(filename):
print "Model already exists at " + filename
print "Delete the existing file or don't use the --initialize/-i flag."
exit()
nA = env.action_space.n
print "Initializing model with %d actions..." % (nA,)
model = initializeModel( options.model_name, nA, input_dim=(4,1) )
model.params["W1"] *= 0.1
model.describe()
model.env = options.game
saveModel( model, options )
else:
print "Reading model..."
with open( os.path.join( options.dir_model, options.model_name+'.pickle'), 'rb') as f:
model = pickle.load( f )
if not hasattr(model, 'env'):
print "Warning, model may not work with the current environment."
if options.desc:
model.describe()
exit()
if options.test_only:
if hasattr(model, 'env'):
if model.env != options.game:
print "Model was not initialized for the current environment: %s vs %s" % (model.env,options.game)
exit()
treward = test(model, env, options)
print "Gym reward: %f" % treward
exit()
if options.upload:
env = gym.wrappers.Monitor(env, "./" + options.game, force=True)
train(env, options)
env.close()
if options.upload:
if hasattr(config, 'openai_key'):
gym.upload('./' + options.game, api_key=config.openai_key)
else:
print "Unable to upload results. Missing 'openai_key' in config."
|
{
"content_hash": "f84af0f659f187f504a7b31cc70b0ff6",
"timestamp": "",
"source": "github",
"line_count": 768,
"max_line_length": 352,
"avg_line_length": 41.009114583333336,
"alnum_prop": 0.5934910303222733,
"repo_name": "Bleyddyn/malpi",
"id": "625990f3961f286c4294e83ffeb2676ad1f82d63",
"size": "31495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exp/dqn-pole.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "4982"
},
{
"name": "Jupyter Notebook",
"bytes": "16321"
},
{
"name": "Python",
"bytes": "947508"
},
{
"name": "Shell",
"bytes": "1444"
}
],
"symlink_target": ""
}
|
from minimax import minimax
from game_tree import game_tree
from copy import deepcopy
class connect_four(object):
def __init__(self):
self.number_cols = 7
self.number_rows = 6
# self.board = np.zeros((self.number_rows, self.number_cols), dtype=np.int)
self.board = [[0 for col in range(self.number_cols)] for row in range(self.number_rows)]
self.move_number = 1
self.move_history = []
def print_board(self):
print(" "+"-"*17)
for i in range(6):
print(" | {} {} {} {} {} {} {} |".format(self.board[i][0],
self.board[i][1], self.board[i][2], self.board[i][3], self.board[i][4],
self.board[i][5], self.board[i][6]))
print(" "+"-"*17)
def make_move(self, col, color):
row = self.find_row(col)
if(row == -1):
self.print_message("couldn't make move --- column full")
return
else:
self.board[row][col] = color
self.move_history.append([row, col])
self.move_number += 1
def find_row(self, col):
for i in range(5,-1,-1):
if(self.board[i][col] == 0):
return i
return -1
def play_game(self):
keep_going = True
while(keep_going):
self.print_board()
column = input("enter column to move (q to quit; u to undo): ")
if(column == 'q'):
keep_going = False
elif(column == 'u'):
self.undo_move()
elif(column != '1' and column != '2' and column != '3' and column != '4'
and column != '5' and column != '6' and column != '7'):
self.print_message("please enter a valid column value 1-7")
else:
if(self.move_number % 2 == 1):
self.make_move(int(column)-1, 1)
else:
self.make_move(int(column)-1, 2)
def undo_move(self):
if(self.move_history):
last_move = self.move_history.pop()
row, col = last_move[0], last_move[1]
self.board[row][col] = 0
self.move_number -= 1
else:
self.print_message("no more moves to undo")
def print_message(self, message):
print('-------------------------------')
print(message)
print('-------------------------------')
def get_potential_moves(self):
# dont forget to add column full check
game_states = []
for i in range(self.number_cols):
self.make_move(i, 1)
current_state = deepcopy(self.board)
game_states.append(current_state)
self.undo_move()
return game_states
def evaluate_board(self, board):
evalution = 10
return evalution
def main():
cf = connect_four()
# cf.play_game()
# cf.find_best_move()
if __name__=='__main__':
main()
|
{
"content_hash": "5ce732c66cdf6e960e5ce1f95207b152",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 96,
"avg_line_length": 32.28260869565217,
"alnum_prop": 0.4922558922558923,
"repo_name": "michaelneuder/connect_four",
"id": "194925ed5349e395296b3aabc0eb310da6dfad5d",
"size": "2993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/bin/back_end_scripts/connect_four_.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108140"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, absolute_import
import io
import os
import re
import abc
import csv
import sys
import zipp
import operator
import functools
import itertools
import posixpath
import collections
from ._compat import (
install,
NullFinder,
ConfigParser,
suppress,
map,
FileNotFoundError,
IsADirectoryError,
NotADirectoryError,
PermissionError,
pathlib,
ModuleNotFoundError,
MetaPathFinder,
email_message_from_string,
PyPy_repr,
unique_ordered,
str,
)
from importlib import import_module
from itertools import starmap
__metaclass__ = type
__all__ = [
'Distribution',
'DistributionFinder',
'PackageNotFoundError',
'distribution',
'distributions',
'entry_points',
'files',
'metadata',
'requires',
'version',
]
class PackageNotFoundError(ModuleNotFoundError):
"""The package was not found."""
def __str__(self):
tmpl = "No package metadata was found for {self.name}"
return tmpl.format(**locals())
@property
def name(self):
name, = self.args
return name
class EntryPoint(
PyPy_repr,
collections.namedtuple('EntryPointBase', 'name value group')):
"""An entry point as defined by Python packaging conventions.
See `the packaging docs on entry points
<https://packaging.python.org/specifications/entry-points/>`_
for more information.
"""
pattern = re.compile(
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
"""
A regular expression describing the syntax for an entry point,
which might look like:
- module
- package.module
- package.module:attribute
- package.module:object.attribute
- package.module:attr [extra1, extra2]
Other combinations are possible as well.
The expression is lenient about whitespace around the ':',
following the attr, and following any extras.
"""
def load(self):
"""Load the entry point from its definition. If only a module
is indicated by the value, return that module. Otherwise,
return the named object.
"""
match = self.pattern.match(self.value)
module = import_module(match.group('module'))
attrs = filter(None, (match.group('attr') or '').split('.'))
return functools.reduce(getattr, attrs, module)
@property
def module(self):
match = self.pattern.match(self.value)
return match.group('module')
@property
def attr(self):
match = self.pattern.match(self.value)
return match.group('attr')
@property
def extras(self):
match = self.pattern.match(self.value)
return list(re.finditer(r'\w+', match.group('extras') or ''))
@classmethod
def _from_config(cls, config):
return [
cls(name, value, group)
for group in config.sections()
for name, value in config.items(group)
]
@classmethod
def _from_text(cls, text):
config = ConfigParser(delimiters='=')
# case sensitive: https://stackoverflow.com/q/1611799/812183
config.optionxform = str
try:
config.read_string(text)
except AttributeError: # pragma: nocover
# Python 2 has no read_string
config.readfp(io.StringIO(text))
return EntryPoint._from_config(config)
def __iter__(self):
"""
Supply iter so one may construct dicts of EntryPoints easily.
"""
return iter((self.name, self))
def __reduce__(self):
return (
self.__class__,
(self.name, self.value, self.group),
)
class PackagePath(pathlib.PurePosixPath):
"""A reference to a path in a package"""
def read_text(self, encoding='utf-8'):
with self.locate().open(encoding=encoding) as stream:
return stream.read()
def read_binary(self):
with self.locate().open('rb') as stream:
return stream.read()
def locate(self):
"""Return a path-like object for this path"""
return self.dist.locate_file(self)
class FileHash:
def __init__(self, spec):
self.mode, _, self.value = spec.partition('=')
def __repr__(self):
return '<FileHash mode: {} value: {}>'.format(self.mode, self.value)
class Distribution:
"""A Python distribution package."""
@abc.abstractmethod
def read_text(self, filename):
"""Attempt to load metadata file given by the name.
:param filename: The name of the file in the distribution info.
:return: The text if found, otherwise None.
"""
@abc.abstractmethod
def locate_file(self, path):
"""
Given a path to a file in this distribution, return a path
to it.
"""
@classmethod
def from_name(cls, name):
"""Return the Distribution for the given package name.
:param name: The name of the distribution package to search for.
:return: The Distribution instance (or subclass thereof) for the named
package, if found.
:raises PackageNotFoundError: When the named package's distribution
metadata cannot be found.
"""
for resolver in cls._discover_resolvers():
dists = resolver(DistributionFinder.Context(name=name))
dist = next(iter(dists), None)
if dist is not None:
return dist
else:
raise PackageNotFoundError(name)
@classmethod
def discover(cls, **kwargs):
"""Return an iterable of Distribution objects for all packages.
Pass a ``context`` or pass keyword arguments for constructing
a context.
:context: A ``DistributionFinder.Context`` object.
:return: Iterable of Distribution objects for all packages.
"""
context = kwargs.pop('context', None)
if context and kwargs:
raise ValueError("cannot accept context and kwargs")
context = context or DistributionFinder.Context(**kwargs)
return itertools.chain.from_iterable(
resolver(context)
for resolver in cls._discover_resolvers()
)
@staticmethod
def at(path):
"""Return a Distribution for the indicated metadata path
:param path: a string or path-like object
:return: a concrete Distribution instance for the path
"""
return PathDistribution(pathlib.Path(path))
@staticmethod
def _discover_resolvers():
"""Search the meta_path for resolvers."""
declared = (
getattr(finder, 'find_distributions', None)
for finder in sys.meta_path
)
return filter(None, declared)
@classmethod
def _local(cls, root='.'):
from pep517 import build, meta
system = build.compat_system(root)
builder = functools.partial(
meta.build,
source_dir=root,
system=system,
)
return PathDistribution(zipp.Path(meta.build_as_zip(builder)))
@property
def metadata(self):
"""Return the parsed metadata for this Distribution.
The returned object will have keys that name the various bits of
metadata. See PEP 566 for details.
"""
text = (
self.read_text('METADATA')
or self.read_text('PKG-INFO')
# This last clause is here to support old egg-info files. Its
# effect is to just end up using the PathDistribution's self._path
# (which points to the egg-info file) attribute unchanged.
or self.read_text('')
)
return email_message_from_string(text)
@property
def version(self):
"""Return the 'Version' metadata for the distribution package."""
return self.metadata['Version']
@property
def entry_points(self):
return EntryPoint._from_text(self.read_text('entry_points.txt'))
@property
def files(self):
"""Files in this distribution.
:return: List of PackagePath for this distribution or None
Result is `None` if the metadata file that enumerates files
(i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
missing.
Result may be empty if the metadata exists but is empty.
"""
file_lines = self._read_files_distinfo() or self._read_files_egginfo()
def make_file(name, hash=None, size_str=None):
result = PackagePath(name)
result.hash = FileHash(hash) if hash else None
result.size = int(size_str) if size_str else None
result.dist = self
return result
return file_lines and list(starmap(make_file, csv.reader(file_lines)))
def _read_files_distinfo(self):
"""
Read the lines of RECORD
"""
text = self.read_text('RECORD')
return text and text.splitlines()
def _read_files_egginfo(self):
"""
SOURCES.txt might contain literal commas, so wrap each line
in quotes.
"""
text = self.read_text('SOURCES.txt')
return text and map('"{}"'.format, text.splitlines())
@property
def requires(self):
"""Generated requirements specified for this Distribution"""
reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
return reqs and list(reqs)
def _read_dist_info_reqs(self):
return self.metadata.get_all('Requires-Dist')
def _read_egg_info_reqs(self):
source = self.read_text('requires.txt')
return source and self._deps_from_requires_text(source)
@classmethod
def _deps_from_requires_text(cls, source):
section_pairs = cls._read_sections(source.splitlines())
sections = {
section: list(map(operator.itemgetter('line'), results))
for section, results in
itertools.groupby(section_pairs, operator.itemgetter('section'))
}
return cls._convert_egg_info_reqs_to_simple_reqs(sections)
@staticmethod
def _read_sections(lines):
section = None
for line in filter(None, lines):
section_match = re.match(r'\[(.*)\]$', line)
if section_match:
section = section_match.group(1)
continue
yield locals()
@staticmethod
def _convert_egg_info_reqs_to_simple_reqs(sections):
"""
Historically, setuptools would solicit and store 'extra'
requirements, including those with environment markers,
in separate sections. More modern tools expect each
dependency to be defined separately, with any relevant
extras and environment markers attached directly to that
requirement. This method converts the former to the
latter. See _test_deps_from_requires_text for an example.
"""
def make_condition(name):
return name and 'extra == "{name}"'.format(name=name)
def parse_condition(section):
section = section or ''
extra, sep, markers = section.partition(':')
if extra and markers:
markers = '({markers})'.format(markers=markers)
conditions = list(filter(None, [markers, make_condition(extra)]))
return '; ' + ' and '.join(conditions) if conditions else ''
for section, deps in sections.items():
for dep in deps:
yield dep + parse_condition(section)
class DistributionFinder(MetaPathFinder):
"""
A MetaPathFinder capable of discovering installed distributions.
"""
class Context:
"""
Keyword arguments presented by the caller to
``distributions()`` or ``Distribution.discover()``
to narrow the scope of a search for distributions
in all DistributionFinders.
Each DistributionFinder may expect any parameters
and should attempt to honor the canonical
parameters defined below when appropriate.
"""
name = None
"""
Specific name for which a distribution finder should match.
A name of ``None`` matches all distributions.
"""
def __init__(self, **kwargs):
vars(self).update(kwargs)
@property
def path(self):
"""
The path that a distribution finder should search.
Typically refers to Python package paths and defaults
to ``sys.path``.
"""
return vars(self).get('path', sys.path)
@abc.abstractmethod
def find_distributions(self, context=Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching the ``context``,
a DistributionFinder.Context instance.
"""
class FastPath:
"""
Micro-optimized class for searching a path for
children.
"""
def __init__(self, root):
self.root = str(root)
self.base = os.path.basename(self.root).lower()
def joinpath(self, child):
return pathlib.Path(self.root, child)
def children(self):
with suppress(Exception):
return os.listdir(self.root or '')
with suppress(Exception):
return self.zip_children()
return []
def zip_children(self):
zip_path = zipp.Path(self.root)
names = zip_path.root.namelist()
self.joinpath = zip_path.joinpath
return unique_ordered(
child.split(posixpath.sep, 1)[0]
for child in names
)
def is_egg(self, search):
base = self.base
return (
base == search.versionless_egg_name
or base.startswith(search.prefix)
and base.endswith('.egg'))
def search(self, name):
for child in self.children():
n_low = child.lower()
if (n_low in name.exact_matches
or n_low.startswith(name.prefix)
and n_low.endswith(name.suffixes)
# legacy case:
or self.is_egg(name) and n_low == 'egg-info'):
yield self.joinpath(child)
class Prepared:
"""
A prepared search for metadata on a possibly-named package.
"""
normalized = ''
prefix = ''
suffixes = '.dist-info', '.egg-info'
exact_matches = [''][:0]
versionless_egg_name = ''
def __init__(self, name):
self.name = name
if name is None:
return
self.normalized = name.lower().replace('-', '_')
self.prefix = self.normalized + '-'
self.exact_matches = [
self.normalized + suffix for suffix in self.suffixes]
self.versionless_egg_name = self.normalized + '.egg'
@install
class MetadataPathFinder(NullFinder, DistributionFinder):
"""A degenerate finder for distribution packages on the file system.
This finder supplies only a find_distributions() method for versions
of Python that do not have a PathFinder find_distributions().
"""
def find_distributions(self, context=DistributionFinder.Context()):
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching ``context.name``
(or all names if ``None`` indicated) along the paths in the list
of directories ``context.path``.
"""
found = self._search_paths(context.name, context.path)
return map(PathDistribution, found)
@classmethod
def _search_paths(cls, name, paths):
"""Find metadata directories in paths heuristically."""
return itertools.chain.from_iterable(
path.search(Prepared(name))
for path in map(FastPath, paths)
)
class PathDistribution(Distribution):
def __init__(self, path):
"""Construct a distribution from a path to the metadata directory.
:param path: A pathlib.Path or similar object supporting
.joinpath(), __div__, .parent, and .read_text().
"""
self._path = path
def read_text(self, filename):
with suppress(FileNotFoundError, IsADirectoryError, KeyError,
NotADirectoryError, PermissionError):
return self._path.joinpath(filename).read_text(encoding='utf-8')
read_text.__doc__ = Distribution.read_text.__doc__
def locate_file(self, path):
return self._path.parent / path
def distribution(distribution_name):
"""Get the ``Distribution`` instance for the named package.
:param distribution_name: The name of the distribution package as a string.
:return: A ``Distribution`` instance (or subclass thereof).
"""
return Distribution.from_name(distribution_name)
def distributions(**kwargs):
"""Get all ``Distribution`` instances in the current environment.
:return: An iterable of ``Distribution`` instances.
"""
return Distribution.discover(**kwargs)
def metadata(distribution_name):
"""Get the metadata for the named package.
:param distribution_name: The name of the distribution package to query.
:return: An email.Message containing the parsed metadata.
"""
return Distribution.from_name(distribution_name).metadata
def version(distribution_name):
"""Get the version string for the named package.
:param distribution_name: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
"""
return distribution(distribution_name).version
def entry_points():
"""Return EntryPoint objects for all installed packages.
:return: EntryPoint objects for all installed packages.
"""
eps = itertools.chain.from_iterable(
dist.entry_points for dist in distributions())
by_group = operator.attrgetter('group')
ordered = sorted(eps, key=by_group)
grouped = itertools.groupby(ordered, by_group)
return {
group: tuple(eps)
for group, eps in grouped
}
def files(distribution_name):
"""Return a list of files for the named package.
:param distribution_name: The name of the distribution package to query.
:return: List of files composing the distribution.
"""
return distribution(distribution_name).files
def requires(distribution_name):
"""
Return a list of requirements for the named package.
:return: An iterator of requirements, suitable for
packaging.requirement.Requirement.
"""
return distribution(distribution_name).requires
|
{
"content_hash": "ddd37ebe10bef9da43b1c583307cd56e",
"timestamp": "",
"source": "github",
"line_count": 620,
"max_line_length": 79,
"avg_line_length": 30.52741935483871,
"alnum_prop": 0.6130395730966345,
"repo_name": "kennethreitz/pipenv",
"id": "7031323db7b194b78a17a0865fd492e48df879e0",
"size": "18927",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pipenv/vendor/importlib_metadata/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "202"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2588085"
},
{
"name": "Roff",
"bytes": "40754"
}
],
"symlink_target": ""
}
|
from os import popen, system, chdir, mkdir
from os.path import exists, isdir, isfile
from sys import stderr,exit,platform
msg = """
This script will download a set of compatible BLAST executables, parameter and
database files used by the tcr-dist pipeline, and some TCR datasets for
testing and analysis. It should be run in the main tcr-dist/ directory.
Altogether, it will end up taking about 500 Megabytes of space.
(To reduce this a bit you can delete the .tgz files in external/ after
it completes successfully.)
Do you want to proceed? [Y/n] """
ans = raw_input(msg)
if ans and ans not in 'Yy':
print 'Setup aborted.'
exit()
old_directories = ['db','external','datasets','testing_ref']
found_old_directory = False
for d in old_directories:
if exists(d):
found_old_directory = True
if found_old_directory:
msg = """
It looks like you have some old directories from a previous setup.
I need to remove db/ external/ datasets/ and testing_ref/
Is that OK? [Y/n] """
ans = raw_input(msg)
if ans and ans not in 'Yy':
print 'Setup aborted.'
exit()
for d in old_directories:
if exists(d):
cmd = 'rm -rf '+d
print cmd
system(cmd)
# I don't know how reliable this is:
mac_osx = ( platform.lower() == "darwin" )
if mac_osx:
print 'Detected mac_osx operating system -- if not, hardcode mac_osx=False in setup.py'
def download_web_file( address ):
newfile = address.split('/')[-1]
if exists(newfile):
print 'download_web_file: {} already exists, delete it to re-download'
return
## try with wget
cmd = 'wget '+address
print cmd
system(cmd)
if not exists( newfile ):
print 'wget failed, trying curl'
cmd = 'curl -L {} -o {}'.format(address,newfile)
print cmd
system(cmd)
if not exists( newfile ):
print '[ERROR] unable to download (tried wget and curl) the link '+address
## check for python modules
try:
import numpy
except:
print '[ERROR] failed to import numpy'
exit(1)
try:
import scipy
except:
print '[ERROR] failed to import scipy'
exit(1)
try:
import matplotlib
except:
print '[ERROR] failed to import matplotlib'
exit(1)
try:
import sklearn
except:
print """
=============================================================================
=============================================================================
[ERROR]
[ERROR] Failed to import the python module sklearn (scikit-learn)
[ERROR] Some analyses (kernelPCA plots, adjusted_mutual_information) will fail
[ERROR] Take a look at http://scikit-learn.org/stable/install.html
[ERROR]
=============================================================================
=============================================================================
"""
#exit() ## not exiting since most stuff will probably still work...
## setup the
print 'Making the ./external/ directory'
external_dir = 'external/'
if not isdir( external_dir ):
mkdir( external_dir )
chdir( external_dir )
## download blast
blastdir = './blast-2.2.16'
if not isdir( blastdir ):
if mac_osx:
# need to host this elsewhere while updating to newer version!
#address = 'ftp://ftp.ncbi.nlm.nih.gov/blast/executables/legacy.NOTSUPPORTED/2.2.16/blast-2.2.16-universal-macosx.tar.gz'
address = 'https://www.dropbox.com/s/x3e8qs9pk5w6szq/blast-2.2.16-universal-macosx.tar.gz'
else:
# ack-- need to update to a newer version of blast! temp fix move to dropbox
#address = 'ftp://ftp.ncbi.nlm.nih.gov/blast/executables/legacy.NOTSUPPORTED/2.2.16/blast-2.2.16-x64-linux.tar.gz'
address = 'https://www.dropbox.com/s/gurbwgcys6xcttm/blast-2.2.16-x64-linux.tar.gz'
tarfile = address.split('/')[-1]
if not exists( tarfile ):
print 'Downloading a rather old BLAST tool'
download_web_file( address )
if not exists( tarfile ):
print '[ERROR] download BLAST failed!'
exit(1)
cmd = 'tar -xzf '+tarfile
print cmd
system(cmd)
## download other db files
# switching to dropbox as the default since some users networks don't like the port 7007 address
address = 'https://www.dropbox.com/s/kivfp27gbz2m2st/tcrdist_extras_v2.tgz'
backup_address = 'http://xfiles.fhcrc.org:7007/bradley_p/pub/tcrdist_extras_v2.tgz'
tarfile = address.split('/')[-1]
assert tarfile == backup_address.split('/')[-1]
if not exists( tarfile ):
print 'Downloading database files'
download_web_file( address )
if not exists( tarfile ):
print '[ERROR] download database files failed, trying a backup location'
download_web_file( backup_address )
if not exists( tarfile ):
print '[ERROR] download database files failed'
exit(1)
## md5sum check
lines = popen('md5sum '+tarfile).readlines()
if lines and len(lines[0].split()) == 2:
# rhino1 tcr-dist$ md5sum tcrdist_extras_v2.tgz
# 2705f3a79152cd0382aa6c5d4a81ad0b tcrdist_extras_v2.tgz
checksum = lines[0].split()[0]
expected_checksum = '2705f3a79152cd0382aa6c5d4a81ad0b'
if checksum == expected_checksum:
print "\n[SUCCESS] md5sum checksum for tarfile matches expected, phew!\n"
else:
print "[ERROR] OH NO! md5sum checksum for tarfile does not match: actual={} expected={}"\
.format( checksum, expected_checksum )
else:
print '[WARNING] md5sum command failed or gave unparseable output, unable to check the tarfile...'
download_dir = tarfile[:-4]
if not isdir( download_dir ):
cmd = 'tar -xzf '+tarfile
print cmd
system(cmd)
if not isdir( download_dir ):
print '[ERROR] tar failed or the database download was corrupted!'
exit(1)
cmd = 'mv {}/external/* .'.format(download_dir)
print cmd
system(cmd)
cmd = 'mv {}/db ../'.format(download_dir)
print cmd
system(cmd)
cmd = 'mv {}/datasets ../'.format(download_dir)
print cmd
system(cmd)
cmd = 'mv {}/testing_ref ../'.format(download_dir)
print cmd
system(cmd)
|
{
"content_hash": "db51abd74fb4d58d73c35a78b4ec97ab",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 129,
"avg_line_length": 28.44392523364486,
"alnum_prop": 0.6282240841136849,
"repo_name": "phbradley/tcr-dist",
"id": "f76f8611e09cd51e622ef51fef33bba1820891ae",
"size": "6314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1003952"
}
],
"symlink_target": ""
}
|
"""Tests for the private `_AutoShardDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.experimental.ops import unique
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def chunk(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
class AutoShardDatasetTest(reader_dataset_ops_test_base.TFRecordDatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(AutoShardDatasetTest, self).setUp()
self._num_files = 10
self._num_records = 10
self.test_filenames = self._createFiles()
def getAllDatasetElements(self, dataset):
actual = []
next_fn = self.getNext(dataset)
while True:
try:
actual.append(self.evaluate(next_fn()))
except errors.OutOfRangeError:
break
return actual
def assertDatasetProducesWithShuffle(self, dataset, expected, batch,
num_examples, shuffle):
if shuffle:
actual = []
next_fn = self.getNext(dataset)
for _ in range(num_examples):
elem = self.evaluate(next_fn())
if isinstance(elem, tuple):
actual.extend(elem)
else:
actual.extend(elem.tolist())
self.assertCountEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_fn())
else:
self.assertDatasetProduces(dataset, list(chunk(expected, batch)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testFlatMapReaderPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self.test_filenames, shuffle=shuffle)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (3, 8)
for r in range(0, 10)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(test_base.default_test_combinations())
def testZipReaderPipeline(self):
dataset1 = dataset_ops.Dataset.list_files(
self.test_filenames, shuffle=False)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset_ops.Dataset.list_files(
self.test_filenames, shuffle=False)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
(b"Record %d of file %d" % (r, f), b"Record %d of file %d" % (r, f)) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testConcatenateReaderPipeline(self, shuffle):
dataset1 = dataset_ops.Dataset.list_files(
self.test_filenames, shuffle=shuffle)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset1 = dataset1.batch(5)
dataset2 = dataset_ops.Dataset.list_files(
self.test_filenames, shuffle=shuffle)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset2.batch(5)
dataset = dataset1.concatenate(dataset2)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
expected += expected
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 8, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testPipelineWithMap(self, shuffle):
dataset = dataset_ops.Dataset.list_files(self.test_filenames, shuffle=False)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(test_base.default_test_combinations())
def testDirectFilenameTFRecordReaderPipeline(self):
dataset = core_readers.TFRecordDataset(self.test_filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testValidPipelineWithRangeDataset(self, shuffle):
dataset = dataset_ops.Dataset.range(self._num_files)
dataset = dataset.map(lambda n: string_ops.string_join( # pylint:disable=g-long-lambda
[self.get_temp_dir(),
string_ops.string_format("/tf_record.{}.txt", [n])]))
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(params=[(1, 0, 10, 10), (2, 1, 20, 5),
(10, 1, 1, 10)])))
def testStandardReaderPipeline(self, params):
num_epochs, index, batch_size, parallel_reads = params
dataset = readers.make_tf_record_dataset(
file_pattern=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size,
parser_fn=None,
num_parallel_reads=parallel_reads,
drop_final_batch=True,
shuffle=False)
dataset = distribute._AutoShardDataset(dataset, 2, index)
outputs = self.getNext(dataset)
self._verify_records(
outputs,
batch_size=batch_size,
file_index=[i for i in range(index, self._num_records, 2)],
num_epochs=num_epochs,
interleave_cycle_length=parallel_reads,
drop_final_batch=True,
use_parser_fn=None)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(outputs())
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testSampleResNetPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self.test_filenames, shuffle=shuffle)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(sharding_policy=[
distribute_options.AutoShardPolicy.DATA,
distribute_options.AutoShardPolicy.AUTO
])))
def testShardByDataBeforePrefetch(self, sharding_policy):
dataset = dataset_ops.Dataset.range(4)
dataset = dataset.apply(testing.assert_next(["Shard", "Prefetch"]))
dataset = dataset.prefetch(1)
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
self.assertDatasetProduces(dataset, [0, 2])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.times(combinations.combine(
sharding_policy=[distribute_options.AutoShardPolicy.DATA,
distribute_options.AutoShardPolicy.FILE]),
combinations.combine(shuffle=[True, False]))))
def testReplicateAndShardProduceDisjointData(self, shuffle, sharding_policy):
dataset = dataset_ops.Dataset.list_files(self.test_filenames,
shuffle=shuffle)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
graph_def = dataset._as_serialized_graph(
strip_device_assignment=True,
external_state_policy=distribute_options.ExternalStatePolicy.WARN)
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
ds1 = distribute._RemoteDataset(graph_def, "/device:CPU:0",
dataset.element_spec)
ds2 = distribute._RemoteDataset(graph_def, "/device:CPU:0",
dataset.element_spec)
ds1 = ds1.with_options(options)
ds2 = ds2.with_options(options)
ds1 = distribute._AutoShardDataset(ds1, 2, 0)
ds2 = distribute._AutoShardDataset(ds2, 2, 1)
elems1 = set(self.getAllDatasetElements(ds1))
elems2 = set(self.getAllDatasetElements(ds2))
self.assertEmpty(elems1.intersection(elems2))
@combinations.generate(test_base.default_test_combinations())
def testWorkersGreaterThanNumFilesWithDataSharding(self):
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = (
distribute_options.AutoShardPolicy.DATA)
dataset = core_readers._TFRecordDataset(self.test_filenames)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
# Should return "Record (0,5) of file (0 --> 9)" since we are sharding by
# individual elements, we should be able to get some data from all files.
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testAutoshardPolicyOff(self):
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = (
distribute_options.AutoShardPolicy.OFF)
dataset = core_readers._TFRecordDataset(self.test_filenames)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
# Should return every record in every file since autosharding is turned off.
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testFileShardingWithoutReaderDatasetOp(self):
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = (
distribute_options.AutoShardPolicy.FILE)
dataset = dataset_ops.Dataset.range(1024)
dataset = dataset.with_options(options)
# We are specifying that we want a file sharding policy, and this pipeline
# doesn't start with file reading, so we should error out.
with self.assertRaises(errors.NotFoundError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testWorkersGreaterThanNumFiles(self):
dataset = dataset_ops.Dataset.list_files(self.test_filenames)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 500, 499)
self.assertDatasetProduces(dataset, [])
@combinations.generate(test_base.default_test_combinations())
def testTFRecordReaderWithDirectFileNames(self):
# Using `_TFRecordDataset` creates a raw op rather than wrapping it around
# a flat_map automatically.
dataset = core_readers._TFRecordDataset(self.test_filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testTFRecordReaderWithDirectFileNamesAndShapes(self):
# Using `_TFRecordDataset` creates a raw op rather than wrapping it around
# a flat_map automatically.
dataset = core_readers._TFRecordDataset(self.test_filenames)
# BatchDataset contains `output_types` and `output_shapes`
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 5)
]
self.assertDatasetProduces(dataset, list(chunk(expected, 5)))
@combinations.generate(test_base.default_test_combinations())
def testShardOutOfRange(self):
dataset = dataset_ops.Dataset.range(5)
with self.assertRaises(errors.InvalidArgumentError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testShardOutOfRangeEmptyDataset(self):
dataset = dataset_ops.Dataset.range(0)
with self.assertRaises(errors.OutOfRangeError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testNoReaderPipelines(self):
dataset = dataset_ops.Dataset.range(1024)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
self.assertDatasetProduces(dataset, [i for i in range(1024) if i % 2 == 0])
@combinations.generate(test_base.default_test_combinations())
def testUnknownOpInPipelineStillShardsAtTheEnd(self):
dataset = dataset_ops.Dataset.list_files(self.test_filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.apply(unique.unique())
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testInvalidWorkerIndex(self):
dataset = dataset_ops.Dataset.list_files(self.test_filenames)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
with self.assertRaises(errors.InvalidArgumentError):
dataset = distribute._AutoShardDataset(dataset, 2, 2)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testAssertCardinality(self):
dataset = dataset_ops.Dataset.list_files(self.test_filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = dataset.apply(cardinality.assert_cardinality(42))
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, list(chunk(expected, 5)))
@combinations.generate(test_base.default_test_combinations())
def testMakeBatchedFeaturesDataset(self):
files = 2
records_per_file = 5
def make_record(file_index):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[file_index])),
}))
return example.SerializeToString()
filenames = []
for file_index in range(files):
filename = os.path.join(self.get_temp_dir(),
"tf_record.%d.txt" % file_index)
filenames.append(filename)
writer = python_io.TFRecordWriter(filename)
for _ in range(records_per_file):
writer.write(make_record(file_index))
writer.close()
dataset = readers.make_batched_features_dataset(
file_pattern=filenames,
batch_size=records_per_file,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
},
reader=core_readers.TFRecordDataset,
num_epochs=1)
# We should shard at the file level, so that all records come from file 0.
dataset = distribute._AutoShardDataset(dataset, 2, 0)
dataset = dataset.unbatch()
output = self.getDatasetOutput(dataset)
files = [elem["file"] for elem in output]
self.assertEqual(files, [0] * records_per_file)
class AutoShardTextLineDatasetTest(
reader_dataset_ops_test_base.TextLineDatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(AutoShardTextLineDatasetTest, self).setUp()
self._num_files = 10
self._num_records = 10
self.test_filenames = self._createFiles(self._num_files, self._num_records)
@combinations.generate(test_base.default_test_combinations())
def testDirectFilenameTextLineReaderPipeline(self):
dataset = core_readers.TextLineDataset(self.test_filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"%d: %d" % (f, r) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
class AutoShardWithRebatchDatasetTest(
reader_dataset_ops_test_base.TFRecordDatasetTestBase,
parameterized.TestCase):
def _setUpFiles(self, num_files, num_records_per_file):
self._num_files = num_files
self._num_records = num_records_per_file
self.test_filenames = self._createFiles()
@combinations.generate(test_base.default_test_combinations())
def testFileShardingWithLegacyRebatch(self):
# Tests that RebatchDatasetV1 is a passthrough op.
self._setUpFiles(num_files=5, num_records_per_file=10)
dataset = dataset_ops.Dataset.list_files(self.test_filenames, shuffle=False)
dataset = dataset.apply(
testing.assert_next(["Shard", "FlatMap", "Batch", "Rebatch"]))
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._LegacyRebatchDataset(dataset, num_replicas=5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [[self._record(3, i)] for i in range(10)]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testFileShardingWithRebatch(self):
# Tests that RebatchDatasetV2 is a passthrough op.
self._setUpFiles(num_files=3, num_records_per_file=5)
dataset = dataset_ops.Dataset.list_files(self.test_filenames, shuffle=False)
dataset = dataset.apply(
testing.assert_next(["Shard", "FlatMap", "Batch", "Rebatch"]))
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._RebatchDataset(dataset, batch_sizes=[2, 1, 2])
dataset = distribute._AutoShardDataset(dataset, 3, 1)
expected = [[self._record(1, 0), self._record(1, 1)], [self._record(1, 2)],
[self._record(1, 3), self._record(1, 4)]]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.times(
combinations.combine(sharding_policy=[
distribute_options.AutoShardPolicy.DATA,
distribute_options.AutoShardPolicy.AUTO
]), combinations.combine(with_prefetch=[True, False]))))
def testUseLegacyRebatchWithDataSharding(self, sharding_policy,
with_prefetch):
# This test simulates a distributed environment with 3 workers, each with
# 1 replica.
dataset = dataset_ops.Dataset.range(8)
dataset = dataset.batch(4)
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
dataset = dataset.with_options(options)
# We expect the auto-shard rewrite to rewrite RebatchDatasetV2 to
# RebatchDataset(V1) for correctness reasons. This will modify the output
# of the dataset.
worker_a_dataset = distribute._RebatchDataset(
dataset, batch_sizes=[2, 1, 1])
if with_prefetch:
worker_a_dataset = worker_a_dataset.prefetch(1)
worker_a_dataset = distribute._AutoShardDataset(
worker_a_dataset, 3, 0, num_replicas=3)
expected = [[0, 1], [4, 5]]
self.assertDatasetProduces(worker_a_dataset, expected)
worker_b_dataset = distribute._RebatchDataset(
dataset, batch_sizes=[1, 1, 2])
if with_prefetch:
worker_b_dataset = worker_b_dataset.prefetch(1)
worker_b_dataset = distribute._AutoShardDataset(
worker_b_dataset, 3, 1, num_replicas=3)
expected = [[2, 3], [6, 7]]
self.assertDatasetProduces(worker_b_dataset, expected)
worker_c_dataset = distribute._RebatchDataset(
dataset, batch_sizes=[1, 2, 1])
if with_prefetch:
worker_c_dataset = worker_c_dataset.prefetch(1)
worker_c_dataset = distribute._AutoShardDataset(
worker_c_dataset, 3, 2, num_replicas=3)
expected = [[], []]
self.assertDatasetProduces(worker_c_dataset, expected)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "acb234ea53c54ef68f1867bb0ef01364",
"timestamp": "",
"source": "github",
"line_count": 596,
"max_line_length": 118,
"avg_line_length": 40.149328859060404,
"alnum_prop": 0.6850265368381462,
"repo_name": "davidzchen/tensorflow",
"id": "564dda0cf11599aef7ece525fb4526f816465314",
"size": "24618",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/kernel_tests/auto_shard_dataset_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32240"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "887514"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "81865221"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867241"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "971474"
},
{
"name": "Jupyter Notebook",
"bytes": "549437"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1921657"
},
{
"name": "Makefile",
"bytes": "65901"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "316967"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "19963"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37285698"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700629"
},
{
"name": "Smarty",
"bytes": "35540"
},
{
"name": "Starlark",
"bytes": "3604653"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from logging.config import fileConfig
from logging import getLogger, root, StreamHandler
import os
def configure(name, path=None):
""" Configure logging and return a logger and the location of its logging
configuration file.
This function expects:
+ A Splunk app directory structure::
<app-root>
bin
...
default
...
local
...
+ The current working directory is *<app-root>***/bin**.
Splunk guarantees this. If you are running the app outside of Splunk, be
sure to set the current working directory to *<app-root>***/bin** before
calling.
This function looks for a logging configuration file at each of these
locations, loading the first, if any, logging configuration file that it
finds::
local/{name}.logging.conf
default/{name}.logging.conf
local/logging.conf
default/logging.conf
The current working directory is set to *<app-root>* before the logging
configuration file is loaded. Hence, paths in the logging configuration
file are relative to *<app-root>*. The current directory is reset before
return.
You may short circuit the search for a logging configuration file by
providing an alternative file location in `path`. Logging configuration
files must be in `ConfigParser format`_.
#Arguments:
:param name: Logger name
:type name: str
:param path: Location of an alternative logging configuration file or `None`
:type path: str or NoneType
:returns: A logger and the location of its logging configuration file
.. _ConfigParser format: http://goo.gl/K6edZ8
"""
app_directory = os.path.dirname(os.getcwd())
if path is None:
probing_path = [
'local/%s.logging.conf' % name,
'default/%s.logging.conf' % name,
'local/logging.conf',
'default/logging.conf']
for relative_path in probing_path:
configuration_file = os.path.join(app_directory, relative_path)
if os.path.exists(configuration_file):
path = configuration_file
break
elif not os.path.isabs(path):
found = False
for conf in 'local', 'default':
configuration_file = os.path.join(app_directory, conf, path)
if os.path.exists(configuration_file):
path = configuration_file
found = True
break
if not found:
raise ValueError(
'Logging configuration file "%s" not found in local or default '
'directory' % path)
elif not os.path.exists(path):
raise ValueError('Logging configuration file "%s" not found')
if path is not None:
working_directory = os.getcwd()
os.chdir(app_directory)
try:
path = os.path.abspath(path)
fileConfig(path)
finally:
os.chdir(working_directory)
if len(root.handlers) == 0:
root.addHandler(StreamHandler())
logger = getLogger(name)
return logger, path
|
{
"content_hash": "636d62917a96d399765ffeafccb8e541",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 32,
"alnum_prop": 0.616875,
"repo_name": "splunk/splunk-app-twitter",
"id": "6e7b0081a5a7a9943784742987b31eb28b23a151",
"size": "3782",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "twitter2/bin/splunklib/searchcommands/logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "144"
},
{
"name": "Python",
"bytes": "1405637"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import GroupAdmin
from django.contrib.auth.models import Group
from django.contrib.flatpages.admin import FlatPageAdmin
from django.contrib.flatpages.forms import FlatpageForm
from django.contrib.flatpages.models import FlatPage
from grandchallenge.core.widgets import MarkdownEditorAdminWidget
class ReadOnlyUserInLine(admin.TabularInline):
model = get_user_model().groups.through
extra = 0
can_delete = False
def has_change_permission(self, request, obj=None):
return False
def has_add_permission(self, request, obj=None):
return False
class GroupWithUsers(GroupAdmin):
inlines = [ReadOnlyUserInLine]
class MarkdownFlatPageForm(FlatpageForm):
class Meta(FlatpageForm.Meta):
widgets = {
"content": MarkdownEditorAdminWidget(),
}
class MarkdownFlatPageAdmin(FlatPageAdmin):
form = MarkdownFlatPageForm
admin.site.unregister(Group)
admin.site.register(Group, GroupWithUsers)
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, MarkdownFlatPageAdmin)
|
{
"content_hash": "ee6f8f9b622c9c056741707bc35a79c7",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 65,
"avg_line_length": 26.976744186046513,
"alnum_prop": 0.7698275862068965,
"repo_name": "comic/comic-django",
"id": "9e174691fbb4ef99532849486bfc2b3273c92c8d",
"size": "1160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/grandchallenge/core/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "94300"
},
{
"name": "HTML",
"bytes": "101108"
},
{
"name": "JavaScript",
"bytes": "122734"
},
{
"name": "PHP",
"bytes": "99155"
},
{
"name": "Python",
"bytes": "486219"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
import base64
import unittest
import jmespath
from parameterized import parameterized
from tests.charts.helm_template_generator import render_chart
class ResultBackendConnectionSecretTest(unittest.TestCase):
non_chart_database_values = {
"user": "someuser",
"pass": "somepass",
"host": "somehost",
"protocol": "postgresql",
"port": 7777,
"db": "somedb",
"sslmode": "allow",
}
def test_should_not_generate_a_document_if_using_existing_secret(self):
docs = render_chart(
values={"data": {"resultBackendSecretName": "foo"}},
show_only=["templates/secrets/result-backend-connection-secret.yaml"],
)
assert 0 == len(docs)
@parameterized.expand(
[
("CeleryExecutor", 1),
("CeleryKubernetesExecutor", 1),
("LocalExecutor", 0),
]
)
def test_should_a_document_be_generated_for_executor(self, executor, expected_doc_count):
docs = render_chart(
values={"executor": executor},
show_only=["templates/secrets/result-backend-connection-secret.yaml"],
)
assert expected_doc_count == len(docs)
def _get_connection(self, values: dict) -> str:
docs = render_chart(
values=values,
show_only=["templates/secrets/result-backend-connection-secret.yaml"],
)
encoded_connection = jmespath.search("data.connection", docs[0])
return base64.b64decode(encoded_connection).decode()
def test_default_connection(self):
connection = self._get_connection({})
assert (
"db+postgresql://postgres:postgres@RELEASE-NAME-postgresql:5432/postgres?sslmode=disable"
== connection
)
def test_should_default_to_custom_metadata_db_connection_with_pgbouncer_overrides(self):
values = {
"pgbouncer": {"enabled": True},
"data": {"metadataConnection": {**self.non_chart_database_values}},
}
connection = self._get_connection(values)
# host, port, dbname still get overridden
assert (
"db+postgresql://someuser:somepass@RELEASE-NAME-pgbouncer:6543"
"/RELEASE-NAME-result-backend?sslmode=allow" == connection
)
def test_should_set_pgbouncer_overrides_when_enabled(self):
values = {"pgbouncer": {"enabled": True}}
connection = self._get_connection(values)
# host, port, dbname get overridden
assert (
"db+postgresql://postgres:postgres@RELEASE-NAME-pgbouncer:6543"
"/RELEASE-NAME-result-backend?sslmode=disable" == connection
)
def test_should_set_pgbouncer_overrides_with_non_chart_database_when_enabled(self):
values = {
"pgbouncer": {"enabled": True},
"data": {"resultBackendConnection": {**self.non_chart_database_values}},
}
connection = self._get_connection(values)
# host, port, dbname still get overridden even with an non-chart db
assert (
"db+postgresql://someuser:somepass@RELEASE-NAME-pgbouncer:6543"
"/RELEASE-NAME-result-backend?sslmode=allow" == connection
)
def test_should_default_to_custom_metadata_db_connection(self):
values = {
"data": {"metadataConnection": {**self.non_chart_database_values}},
}
connection = self._get_connection(values)
assert "db+postgresql://someuser:somepass@somehost:7777/somedb?sslmode=allow" == connection
def test_should_correctly_use_non_chart_database(self):
values = {"data": {"resultBackendConnection": {**self.non_chart_database_values}}}
connection = self._get_connection(values)
assert "db+postgresql://someuser:somepass@somehost:7777/somedb?sslmode=allow" == connection
def test_should_support_non_postgres_db(self):
values = {
"data": {
"resultBackendConnection": {
**self.non_chart_database_values,
"protocol": "mysql",
}
}
}
connection = self._get_connection(values)
# sslmode is only added for postgresql
assert "db+mysql://someuser:somepass@somehost:7777/somedb" == connection
def test_should_correctly_use_non_chart_database_when_both_db_are_external(self):
values = {
"data": {
"metadataConnection": {**self.non_chart_database_values},
"resultBackendConnection": {
**self.non_chart_database_values,
"user": "anotheruser",
"pass": "anotherpass",
},
}
}
connection = self._get_connection(values)
assert "db+postgresql://anotheruser:anotherpass@somehost:7777/somedb?sslmode=allow" == connection
def test_should_correctly_handle_password_with_special_characters(self):
values = {
"data": {
"resultBackendConnection": {
**self.non_chart_database_values,
"user": "username@123123",
"pass": "password@!@#$^&*()",
},
}
}
connection = self._get_connection(values)
assert (
"db+postgresql://username%40123123:password%40%21%40%23$%5E&%2A%28%29@somehost:7777/"
"somedb?sslmode=allow" == connection
)
|
{
"content_hash": "29a633381b76fb21b25854abfef89815",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 105,
"avg_line_length": 35.516129032258064,
"alnum_prop": 0.5874659400544959,
"repo_name": "lyft/incubator-airflow",
"id": "d32a3bcf0e5da3fde962044e58ead5812073ca34",
"size": "6291",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/charts/test_result_backend_connection_secret.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "161328"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jinja",
"bytes": "8565"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10019710"
},
{
"name": "Shell",
"bytes": "220780"
}
],
"symlink_target": ""
}
|
"""Netowrk utils for the DHCP client implementation of the Anonymity Profile
([:rfc:`7844`])."""
import logging
import os.path
import subprocess
from dbus import SystemBus, Interface, DBusException
from pyroute2 import IPRoute
from pyroute2.netlink import NetlinkError
from .constants import RESOLVCONF, RESOLVCONF_ADMIN
logger = logging.getLogger(__name__)
def set_net(lease):
ipr = IPRoute()
try:
index = ipr.link_lookup(ifname=lease.interface)[0]
except IndexError as e:
logger.error('Interface %s not found, can not set IP.',
lease.interface)
try:
ipr.addr('add', index, address=lease.address,
mask=int(lease.subnet_mask_cidr))
except NetlinkError as e:
if ipr.get_addr(index=index)[0].\
get_attrs('IFA_ADDRESS')[0] == lease.address:
logger.debug('Interface %s is already set to IP %s' %
(lease.interface, lease.address))
else:
logger.error(e)
else:
logger.debug('Interface %s set to IP %s' %
(lease.interface, lease.address))
try:
ipr.route('add', dst='0.0.0.0', gateway=lease.router, oif=index)
except NetlinkError as e:
if ipr.get_routes(table=254)[0].\
get_attrs('RTA_GATEWAY')[0] == lease.router:
logger.debug('Default gateway is already set to %s' %
(lease.router))
else:
logger.error(e)
else:
logger.debug('Default gateway set to %s', lease.router)
ipr.close()
set_dns(lease)
def set_dns(lease):
if systemd_resolved_status() is True:
set_dns_systemd_resolved(lease)
elif os.path.exists(RESOLVCONF_ADMIN):
set_dns_resolvconf_admin(lease)
elif os.path.exists(RESOLVCONF):
set_dns_resolvconf(lease)
def set_dns_resolvconf_admin(lease):
cmd = [RESOLVCONF_ADMIN, 'add', lease.interface, lease.name_server]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
(stdout, stderr) = proc.communicate()
return True
except TypeError as e:
logger.error(e)
return False
def set_dns_resolvconf(lease):
cmd = [RESOLVCONF, '-a', lease.interface]
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = '\n'.join(['nameserver ' + nm for nm in
lease.name_server.split()])
stdin = str.encode(stdin)
try:
(stdout, stderr) = proc.communicate(stdin)
return True
except TypeError as e:
logger.error(e)
return False
def set_dns_systemd_resolved(lease):
# NOTE: if systemd-resolved is not already running, we might not want to
# run it in case there's specific system configuration for other resolvers
ipr = IPRoute()
index = ipr.link_lookup(ifname=lease.interface)[0]
# Construct the argument to pass to DBUS.
# the equivalent argument for:
# busctl call org.freedesktop.resolve1 /org/freedesktop/resolve1 \
# org.freedesktop.resolve1.Manager SetLinkDNS 'ia(iay)' 2 1 2 4 1 2 3 4
# is SetLinkDNS(2, [(2, [8, 8, 8, 8])]_
iay = [(2, [int(b) for b in ns.split('.')])
for ns in lease.name_server.split()]
# if '.' in ns
# else (10, [ord(x) for x in
# socket.inet_pton(socket.AF_INET6, ns)])
bus = SystemBus()
resolved = bus.get_object('org.freedesktop.resolve1',
'/org/freedesktop/resolve1')
manager = Interface(resolved,
dbus_interface='org.freedesktop.resolve1.Manager')
try:
manager.SetLinkDNS(index, iay)
return True
except DBusException as e:
logger.error(e)
return False
def systemd_resolved_status():
bus = SystemBus()
systemd = bus.get_object('org.freedesktop.systemd1',
'/org/freedesktop/systemd1')
manager = Interface(systemd,
dbus_interface='org.freedesktop.systemd1.Manager')
unit = manager.LoadUnit('sytemd-resolved.service')
proxy = bus.get_object('org.freedesktop.systemd1', str(unit))
r = proxy.Get('org.freedesktop.systemd1.Unit',
'ActiveState',
dbus_interface='org.freedesktop.DBus.Properties')
if str(r) == 'active':
return True
return False
|
{
"content_hash": "1cb80aa6fb769b589ee34e8cacdeff1c",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 78,
"avg_line_length": 35.046875,
"alnum_prop": 0.603655818100758,
"repo_name": "juga0/dhcpcanon",
"id": "20cc6bc1f12f146505e4052941ae1116dfa7f530",
"size": "4605",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dhcpcanon/netutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "4183"
},
{
"name": "Python",
"bytes": "113427"
},
{
"name": "Shell",
"bytes": "15246"
}
],
"symlink_target": ""
}
|
import unittest
import numpy.testing as npt
import pandas as pd
import titanic
AGE_MEDIAN = 28.0
class TestStuff(unittest.TestCase):
def assert_preconditions(self, td):
npt.assert_array_equal(
['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp',
'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked'],
td.columns,
"Expected columns")
self.assertTrue(
td['Age'].isnull().values.sum() > 100,
"has missing age values")
self.assertEqual(
AGE_MEDIAN,
td['Age'].median())
npt.assert_array_equal(
['male', 'female'],
td['Sex'].unique(),
"non numerical values for sex")
def test_preconditions(self):
td = pd.read_csv('train.csv')
self.assert_preconditions(td)
def test_preprocess(self):
td = pd.read_csv('train.csv')
preprocess_fn = titanic.make_preprocesser(td)
td_preprocessed = preprocess_fn(td, scale=False)
cabin_sectors = ["cabin_sector_{}".format(l) for l in 'bcde']
embarked_hot = ["embarked_{}".format(l) for l in "CQS"]
npt.assert_array_equal(
['PassengerId', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare'] + cabin_sectors + embarked_hot,
td_preprocessed.columns,
'Picked columns')
# Age
self.assertEqual(
td_preprocessed['Age'].isnull().values.sum(),
0,
"no missing values")
missing_age_idxs = td[['Age']].isnull().any(axis=1)
npt.assert_array_equal(
[AGE_MEDIAN],
td_preprocessed[missing_age_idxs]['Age'].unique(),
'previous NaN age values should be filled in by median')
# Sex
orig_sex = td['Sex'].head(10)
mapped_sex = td_preprocessed['Sex'].head(10)
npt.assert_array_equal(
[{'male': 0, 'female': 1}[gender] for gender in orig_sex],
mapped_sex,
'should map male/female to 0/1')
# make sure we haven't mutated the original
self.assert_preconditions(td)
def test_preprocess_with_scaling(self):
td = pd.read_csv('train.csv')
preprocess_fn = titanic.make_preprocesser(td)
td_preprocessed = preprocess_fn(td, scale=True)
|
{
"content_hash": "fc7f4867f41337c2c409a53094290c84",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 109,
"avg_line_length": 31.675675675675677,
"alnum_prop": 0.556740614334471,
"repo_name": "krosaen/ml-study",
"id": "df4d98884bbc65ead4de112a61cd4373ce7cd8b5",
"size": "2344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kaggle/titanic2/titanic_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6432466"
},
{
"name": "Python",
"bytes": "39206"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from app.openapi_server.models.base_model_ import Model
from openapi_server import util
class SwapSpaceMonitorMemoryUsage2(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, _class: str=None, available_physical_memory: int=None, available_swap_space: int=None, total_physical_memory: int=None, total_swap_space: int=None): # noqa: E501
"""SwapSpaceMonitorMemoryUsage2 - a model defined in Swagger
:param _class: The _class of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type _class: str
:param available_physical_memory: The available_physical_memory of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type available_physical_memory: int
:param available_swap_space: The available_swap_space of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type available_swap_space: int
:param total_physical_memory: The total_physical_memory of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type total_physical_memory: int
:param total_swap_space: The total_swap_space of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type total_swap_space: int
"""
self.swagger_types = {
'_class': str,
'available_physical_memory': int,
'available_swap_space': int,
'total_physical_memory': int,
'total_swap_space': int
}
self.attribute_map = {
'_class': '_class',
'available_physical_memory': 'availablePhysicalMemory',
'available_swap_space': 'availableSwapSpace',
'total_physical_memory': 'totalPhysicalMemory',
'total_swap_space': 'totalSwapSpace'
}
self.__class = _class
self._available_physical_memory = available_physical_memory
self._available_swap_space = available_swap_space
self._total_physical_memory = total_physical_memory
self._total_swap_space = total_swap_space
@classmethod
def from_dict(cls, dikt) -> 'SwapSpaceMonitorMemoryUsage2':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The SwapSpaceMonitorMemoryUsage2 of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:rtype: SwapSpaceMonitorMemoryUsage2
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self) -> str:
"""Gets the _class of this SwapSpaceMonitorMemoryUsage2.
:return: The _class of this SwapSpaceMonitorMemoryUsage2.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""Sets the _class of this SwapSpaceMonitorMemoryUsage2.
:param _class: The _class of this SwapSpaceMonitorMemoryUsage2.
:type _class: str
"""
self.__class = _class
@property
def available_physical_memory(self) -> int:
"""Gets the available_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:return: The available_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:rtype: int
"""
return self._available_physical_memory
@available_physical_memory.setter
def available_physical_memory(self, available_physical_memory: int):
"""Sets the available_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:param available_physical_memory: The available_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:type available_physical_memory: int
"""
self._available_physical_memory = available_physical_memory
@property
def available_swap_space(self) -> int:
"""Gets the available_swap_space of this SwapSpaceMonitorMemoryUsage2.
:return: The available_swap_space of this SwapSpaceMonitorMemoryUsage2.
:rtype: int
"""
return self._available_swap_space
@available_swap_space.setter
def available_swap_space(self, available_swap_space: int):
"""Sets the available_swap_space of this SwapSpaceMonitorMemoryUsage2.
:param available_swap_space: The available_swap_space of this SwapSpaceMonitorMemoryUsage2.
:type available_swap_space: int
"""
self._available_swap_space = available_swap_space
@property
def total_physical_memory(self) -> int:
"""Gets the total_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:return: The total_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:rtype: int
"""
return self._total_physical_memory
@total_physical_memory.setter
def total_physical_memory(self, total_physical_memory: int):
"""Sets the total_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:param total_physical_memory: The total_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:type total_physical_memory: int
"""
self._total_physical_memory = total_physical_memory
@property
def total_swap_space(self) -> int:
"""Gets the total_swap_space of this SwapSpaceMonitorMemoryUsage2.
:return: The total_swap_space of this SwapSpaceMonitorMemoryUsage2.
:rtype: int
"""
return self._total_swap_space
@total_swap_space.setter
def total_swap_space(self, total_swap_space: int):
"""Sets the total_swap_space of this SwapSpaceMonitorMemoryUsage2.
:param total_swap_space: The total_swap_space of this SwapSpaceMonitorMemoryUsage2.
:type total_swap_space: int
"""
self._total_swap_space = total_swap_space
|
{
"content_hash": "06317dd9acf53f362ef1fe55bb05c511",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 185,
"avg_line_length": 35.174698795180724,
"alnum_prop": 0.6655249186504538,
"repo_name": "cliffano/swaggy-jenkins",
"id": "3bd0d03e51f4ee32fc77e5ead075178fe27aeb61",
"size": "5856",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "clients/python-blueplanet/generated/app/openapi_server/models/swap_space_monitor_memory_usage2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "569823"
},
{
"name": "Apex",
"bytes": "741346"
},
{
"name": "Batchfile",
"bytes": "14792"
},
{
"name": "C",
"bytes": "971274"
},
{
"name": "C#",
"bytes": "5131336"
},
{
"name": "C++",
"bytes": "7799032"
},
{
"name": "CMake",
"bytes": "20609"
},
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Clojure",
"bytes": "129018"
},
{
"name": "Crystal",
"bytes": "864941"
},
{
"name": "Dart",
"bytes": "876777"
},
{
"name": "Dockerfile",
"bytes": "7385"
},
{
"name": "Eiffel",
"bytes": "424642"
},
{
"name": "Elixir",
"bytes": "139252"
},
{
"name": "Elm",
"bytes": "187067"
},
{
"name": "Emacs Lisp",
"bytes": "191"
},
{
"name": "Erlang",
"bytes": "373074"
},
{
"name": "F#",
"bytes": "556012"
},
{
"name": "Gherkin",
"bytes": "951"
},
{
"name": "Go",
"bytes": "345227"
},
{
"name": "Groovy",
"bytes": "89524"
},
{
"name": "HTML",
"bytes": "2367424"
},
{
"name": "Haskell",
"bytes": "680841"
},
{
"name": "Java",
"bytes": "12164874"
},
{
"name": "JavaScript",
"bytes": "1959006"
},
{
"name": "Kotlin",
"bytes": "1280953"
},
{
"name": "Lua",
"bytes": "322316"
},
{
"name": "Makefile",
"bytes": "11882"
},
{
"name": "Nim",
"bytes": "65818"
},
{
"name": "OCaml",
"bytes": "94665"
},
{
"name": "Objective-C",
"bytes": "464903"
},
{
"name": "PHP",
"bytes": "4383673"
},
{
"name": "Perl",
"bytes": "743304"
},
{
"name": "PowerShell",
"bytes": "678274"
},
{
"name": "Python",
"bytes": "5529523"
},
{
"name": "QMake",
"bytes": "6915"
},
{
"name": "R",
"bytes": "840841"
},
{
"name": "Raku",
"bytes": "10945"
},
{
"name": "Ruby",
"bytes": "328360"
},
{
"name": "Rust",
"bytes": "1735375"
},
{
"name": "Scala",
"bytes": "1387368"
},
{
"name": "Shell",
"bytes": "407167"
},
{
"name": "Swift",
"bytes": "342562"
},
{
"name": "TypeScript",
"bytes": "3060093"
}
],
"symlink_target": ""
}
|
import sqlalchemy as sa
from sqlalchemy_continuum import count_versions, versioning_manager
from tests import TestCase
class TestInsert(TestCase):
def _insert(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
return article
def test_insert_creates_version(self):
article = self._insert()
version = article.versions.all()[-1]
assert version.name == u'Some article'
assert version.content == u'Some content'
assert version.transaction.id == version.transaction_id
def test_stores_operation_type(self):
article = self._insert()
assert article.versions[0].operation_type == 0
def test_multiple_consecutive_flushes(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.flush()
article2 = self.Article()
article2.name = u'Some article'
article2.content = u'Some content'
self.session.add(article2)
self.session.flush()
self.session.commit()
assert article.versions.count() == 1
assert article2.versions.count() == 1
class TestInsertWithDeferredColumn(TestCase):
def create_models(self):
class TextItem(self.Model):
__tablename__ = 'text_item'
__versioned__ = {}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.orm.deferred(sa.Column(sa.Unicode(255)))
self.TextItem = TextItem
def test_insert(self):
item = self.TextItem()
self.session.add(item)
self.session.commit()
assert count_versions(item) == 1
class TestInsertNonVersionedObject(TestCase):
def create_models(self):
class TextItem(self.Model):
__tablename__ = 'text_item'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.orm.deferred(sa.Column(sa.Unicode(255)))
class Tag(self.Model):
__tablename__ = 'tag'
__versioned__ = {}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.orm.deferred(sa.Column(sa.Unicode(255)))
self.TextItem = TextItem
def test_does_not_create_transaction(self):
item = self.TextItem()
self.session.add(item)
self.session.commit()
assert self.session.query(
versioning_manager.transaction_cls
).count() == 0
|
{
"content_hash": "f2292af8447200b47ea91edb8c021029",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 76,
"avg_line_length": 32.28048780487805,
"alnum_prop": 0.6135247449943332,
"repo_name": "avilaton/sqlalchemy-continuum",
"id": "12b0bf048a1ded1cc1805e40f2ae3321ba396578",
"size": "2647",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_insert.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "304946"
}
],
"symlink_target": ""
}
|
import os
import shutil
import sys
import time
import unittest
from argparse import Namespace
from rnglib import SimpleRNG
from fusegen import invoke_shell, make_fuse_pkg, SH
from merkletree import MerkleTree
class TestFuseGeneration(unittest.TestCase):
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
# FUNCTIONS TO MODIFY IN-MEMORY DATA STRUCTURE AND DISK IMAGE
# XXX STUB XXX
# FUNCTIONS TO DETERMINE EQUALITY OF IN-MEMORY DATA STRUCTURE
# AND DISK IMAGE (either under mountPoint or rootDir)
# XXX STUB XXX
def fiddle_with_files(self, pkg_name, path_to_pkg, umount_cmd):
""" Enter with the file system mounted """
work_dir = os.path.join(path_to_pkg, 'workdir')
mount_point = os.path.join(work_dir, 'mount_point')
root_dir = os.path.join(work_dir, 'rootdir')
# Devise a directory structure, say M files wide, N directories deep.
# The files are of random-ish length, populated with random-ish data.
sample_name = self.rng.next_file_name(16)
path_to_sample = os.path.join(mount_point, sample_name)
# builds a directory tree with a depth of 4, 5 files (including
# directories) at each level, and 16 <= file length <= 128
self.rng.next_data_dir(path_to_dir=path_to_sample,
depth=4, width=5, max_len=128, min_len=16)
# DEBUG
print("creating tree1")
sys.stdout.flush()
# END
tree1 = MerkleTree.create_from_file_system(path_to_sample)
self.assertTrue(tree1 is not None)
# If this succeeds, we have written the directory structure on the
# mount point.
# Delete some files, modifying in-memory data structure accordingly
# Shorten some files, modifying in-memory data structure accordingly
# Lengthen some files, modifying in-memory data structure accordingly
# Unmount the file system
chatter = invoke_shell(umount_cmd)
# Verify that the expected directory structure appears below
# the root directory.
path_via_root = os.path.join(root_dir, sample_name)
# DEBUG
print("creating tree2")
sys.stdout.flush()
# END
tree2 = MerkleTree.create_from_file_system(path_via_root)
self.assertTrue(tree2 is not None)
self.assertTrue(tree1 == tree2)
# DEBUG
print("directory trees are equal")
sys.stdout.flush()
# END
return chatter
def exercise_file_system(self, pkg_name, path_to_pkg):
dir_now = os.getcwd()
os.chdir(path_to_pkg)
path_to_bin = os.path.join(path_to_pkg, 'bin')
mount_cmd = [
SH,
os.path.join(
path_to_bin,
'mount%s' %
pkg_name.upper()),
]
umount_cmd = [
SH,
os.path.join(
path_to_bin,
'umount%s' %
pkg_name.upper()),
]
chatter = ''
try:
chatter = invoke_shell(mount_cmd)
chatter += self.fiddle_with_files(pkg_name,
path_to_pkg, umount_cmd)
except Exception as exc:
print(exc)
else:
# XXX STUB XXX
pass
finally:
# unmount the file system, ignoring any exceptions
# DEBUG
print("enter finally block")
sys.stdout.flush()
# END
try:
invoke_shell(umount_cmd)
except BaseException:
pass
if chatter and chatter != '':
print(chatter)
os.chdir(dir_now)
# DEBUG
print("after fiddling with files we are back in %s" % dir_now)
sys.stdout.flush()
# END
def do_bae_test(self, logging=False, instrumenting=False):
"""
Build the selected type of file system under devDir and
then run exerciseFileSystem() on it.
"""
dev_dir = '/home/jdd/dev/c'
pkg_name = 'xxxfs'
if instrumenting:
pkg_name += 'I'
if logging:
pkg_name += 'L'
path_to_pkg = os.path.join(dev_dir, pkg_name)
if os.path.exists(path_to_pkg):
# WORKING HERE
print("directory exists: %s" % path_to_pkg)
# END HERE
shutil.rmtree(path_to_pkg)
cmds = Namespace()
setattr(cmds, 'ac_prereq', '2.6.9')
setattr(cmds, 'dev_dir', dev_dir)
setattr(cmds, 'email_addr', 'jddixon at gmail dot com')
setattr(cmds, 'force', True)
setattr(cmds, 'instrumenting', instrumenting)
setattr(cmds, 'logging', logging)
setattr(cmds, 'my_date', "%04d-%02d-%02d" % time.gmtime()[:3])
setattr(cmds, 'my_version', '1.2.3')
setattr(cmds, 'path_to_pkg', path_to_pkg)
setattr(cmds, 'pkg_name', pkg_name)
setattr(cmds, 'lc_name', pkg_name.lower())
setattr(cmds, 'uc_name', pkg_name.upper())
setattr(cmds, 'testing', False)
setattr(cmds, 'verbose', False)
# DEBUG
print(cmds)
# END
# create the target file system
make_fuse_pkg(cmds)
# invoke the build command
dir_now = os.getcwd()
os.chdir(path_to_pkg)
cmd = [SH, os.path.join(path_to_pkg, 'build'), ]
chatter = ''
try:
chatter = invoke_shell(cmd)
except Exception as exc:
print(exc)
if chatter and chatter != '':
print(chatter)
os.chdir(dir_now)
# DEBUG
print("we are back in %s" % dir_now)
# END
# run test verifying that the file system works as expected
self.exercise_file_system(pkg_name, path_to_pkg)
def do_instruments_test(self):
self.do_bae_test(instrumenting=True)
def do_logging_test(self):
self.do_bae_test(logging=True)
def do_test_logging_and_instrumented(self):
self.do_bae_test(logging=True, instrumenting=True)
def test_fuse_generation(self):
self.do_bae_test()
self.do_instruments_test()
self.do_logging_test()
self.do_test_logging_and_instrumented()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "f18ce57b33e312885fb22d305bc49e56",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 77,
"avg_line_length": 31.33170731707317,
"alnum_prop": 0.5606414448077223,
"repo_name": "jddixon/fusegen",
"id": "e58e8c6e5b8783dc781327f81e5ef3a4d62f4375",
"size": "6480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_fuse_generation.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "54612"
},
{
"name": "CSS",
"bytes": "6648"
},
{
"name": "Python",
"bytes": "84646"
},
{
"name": "Shell",
"bytes": "50121"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function
import numpy as np
import bosehubbard # model base
import graph # forcedirectedgraph layout
import scipy.linalg as linalg
import scipy.sparse as sparse
# $$\ $$\ $$\ $$\
# $$$\ $$$ | $$ | $$ |
# $$$$\ $$$$ | $$$$$$\ $$$$$$$ | $$$$$$\ $$ |
# $$\$$\$$ $$ |$$ __$$\ $$ __$$ |$$ __$$\ $$ |
# $$ \$$$ $$ |$$ / $$ |$$ / $$ |$$$$$$$$ |$$ |
# $$ |\$ /$$ |$$ | $$ |$$ | $$ |$$ ____|$$ |
# $$ | \_/ $$ |\$$$$$$ |\$$$$$$$ |\$$$$$$$\ $$ |
# \__| \__| \______/ \_______| \_______|\__|
class Model(bosehubbard.Model):
"""
Extended version of Bose-Hubbard model with caching of number-sectors.
"""
def __init__(self, Es, links, Us, W=None):
"""
Initiate our scattering structure.
Parameters
----------
Es : list
List of onsite energies.
links : list of lists
List of links on the form [site_1, site_2, strength].
Us : list
Onsite interaction strengths
"""
bosehubbard.Model.__init__(self, Es, links, Us, W)
self.reset()
def numbersector(self, nb):
"""
Returns a specific particle number sector object based on this model.
Cache the result for later retrieval.
Parameters
----------
nb : int
Number of bosons in the given number sector
"""
if nb not in self._cache['ns']:
self._cache['ns'][nb] = bosehubbard.NumberSector(self.n, nb, model=self)
return self._cache['ns'][nb]
def reset(self):
"""
Remove all cached sectors
"""
self._cache = {'ns': {}}
def draw(self, fig=None, ax=None):
"""
Clever force directed plot of any graph.
"""
g = graph.Graph(self.Es, None, self.links)
g.forcedirectedlayout()
g.plot(fig, ax)
# $$$$$$\ $$\ $$\
# $$ __$$\ $$ | $$ |
# $$ / \__|$$$$$$$\ $$$$$$\ $$$$$$$\ $$$$$$$\ $$$$$$\ $$ |
# $$ | $$ __$$\ \____$$\ $$ __$$\ $$ __$$\ $$ __$$\ $$ |
# $$ | $$ | $$ | $$$$$$$ |$$ | $$ |$$ | $$ |$$$$$$$$ |$$ |
# $$ | $$\ $$ | $$ |$$ __$$ |$$ | $$ |$$ | $$ |$$ ____|$$ |
# \$$$$$$ |$$ | $$ |\$$$$$$$ |$$ | $$ |$$ | $$ |\$$$$$$$\ $$ |
# \______/ \__| \__| \_______|\__| \__|\__| \__| \_______|\__|
class Channel:
"""
Coupling between a single channel and one or *more* sites.
"""
def __init__(self, site=None, sites=None, strength=None, strengths=None, positions=None):
"""
Initialize coupling object.
Parameters
----------
channel : int
Channel index
site : int
Site index
sites : list
list of site indices for each coupling to this channel
strength : float
Coupling strength
strengths : list
list of coupling strengths for each coupling
positions: list
list of positions of each coupling coordinate
"""
# set sites and strenths
sites = np.atleast_1d(sites if site is None else site)
strengths = np.atleast_1d(strengths if strength is None else strength)
# set positions.
if positions is None:
positions = [0] * len(sites)
positions = np.atleast_1d(positions)
# indices of non-zero strength
idx = strengths != 0
# re-index everything
self.sites = sites[idx]
self.strengths = strengths[idx]
self.positions = positions[idx]
# number of couplings
self.n = len(self.sites)
# is the coupling local or quasi-local
self.local = np.allclose(self.positions, self.positions[0] * np.ones((self.n, )), 1e-8)
# if all couplings are local, simplify the results by fixing the positions to zero
if self.local:
self.positions = np.zeros((self.n, ), dtype=np.float64)
def gtilde(self, phi=0):
"""
Effective coupling strengths dressed by phase factors.
Note that gtilde() explicitly implements the prefactor to b^dagger and **not** b.
Parameters
----------
phi : float
The energy/wavenumber parameter in units of inverse length.
"""
return self.strengths * np.exp(1j * phi * self.positions)
@property
def gs(self):
"""Alias for all coupling strengths."""
return self.strengths
@property
def xs(self):
"""Alias for positions."""
return self.positions
# $$$$$$\ $$\
# $$ __$$\ $$ |
# $$ / \__| $$$$$$\ $$$$$$\ $$\ $$\ $$$$$$\
# \$$$$$$\ $$ __$$\\_$$ _| $$ | $$ |$$ __$$\
# \____$$\ $$$$$$$$ | $$ | $$ | $$ |$$ / $$ |
# $$\ $$ |$$ ____| $$ |$$\ $$ | $$ |$$ | $$ |
# \$$$$$$ |\$$$$$$$\ \$$$$ |\$$$$$$ |$$$$$$$ |
# \______/ \_______| \____/ \______/ $$ ____/
# $$ |
# $$ |
# \__|
class Setup:
"""
A complete quasi local scattering Setup with
scattering structure (model), channels, and parasitic couplings.
This setup allow for a quasi-locally coupled scatterer.
We employ the Markov approximation for propagation
within the channels.
We retain the phase acquired between coupling sites, as well as
a certain directionality in terms of off-diagonal elements of the
coupling Hamiltonian. In addition we describe dynamics within the
scatterer exactly.
"""
def __init__(self, model, channels, parasites=None):
"""
Initialize the scattering setup.
Parameters
----------
model : Model object
Describes the bosehubbard scattering centre
channels : list of channels objects
List of channels
parasites : List of Coupling objects
List of parasitic coupling objects
"""
self.model = model
self.channels = tuple(channels)
self.parasites = tuple(parasites) if parasites is not None else ()
# is the setup local?
self.local = all([channel.local for channel in self.channels])
# reset all caches
self.reset(model=False)
def reset(self, model=True):
"""Delete all caches."""
if model:
self.model.reset()
self._cache = {'eigen': {}, 'trans': {}, 'trsn': {}, 'sigma': {}}
def eigenbasis(self, nb, phi=0):
"""
Calculates the generalized eigen-energies along with
the left and right eigen-basis.
Parameters
----------
nb : int
Number of bosons
phi : float
Phase factor for the relevant photonic state
"""
phi = 0 if self.local else phi
ckey = '{}-{}'.format(nb, phi)
if ckey not in self._cache['eigen']:
# generate number sector
ns1 = self.model.numbersector(nb)
# get the size of the basis
ns1size = ns1.basis.len # length of the number sector basis
# G1i = xrange(ns1size) # our Greens function?
# self energy
sigma = self.sigma(nb, phi)
# Effective Hamiltonian
H1n = ns1.hamiltonian + sigma
# Complete diagonalization
E1, psi1r = linalg.eig(H1n.toarray(), left=False)
psi1l = np.conj(np.linalg.inv(psi1r)).T
# check for dark states (throw a warning if one shows up)
# if (nb > 0):
# Setup.check_for_dark_states(nb, E1)
self._cache['eigen'][ckey] = (E1, psi1l, psi1r)
return self._cache['eigen'][ckey]
@staticmethod
def check_for_dark_states(nb, Es):
"""Check for dark states, throws a warning if it finds one."""
dark_state_indices = np.where(np.abs(np.imag(Es)) < 10 * np.spacing(1))
if len(dark_state_indices[0]) == 0:
return
import warnings
warnings.warn('The {} block contains {} dark state(s) with generalized eigenenergie(s): {}'.format(nb, len(dark_state_indices), Es[dark_state_indices]))
def sigma(self, nb, phi=0):
"""
Local and quasi-local self energy
Parameters
----------
nb : int
number sector, number of bosons
phi : float
phase contribution in units of energy per length
"""
# Local systems have no phases
phi = 0 if self.local else phi
# Cache the local part
ckey = '{}-{}'.format(nb, phi)
if ckey not in self._cache['sigma']:
# cache the results
self._cache['sigma'][ckey] = Setup.sigma_local(self.model, self.channels + self.parasites, nb)
# Load local sigma from cache
sigmal = self._cache['sigma'][ckey]
# if it is only local: break off calculation here
if self.local:
return sigmal
# generate the additional quasi-local contribution to the self energy
# if nb == 0:
# sigmaql = np.zeros((1,))
# else:
sigmaql = Setup.sigma_quasi_local(self.model, self.channels, nb, phi)
# return local and quasi-local contribution to the self-energy
return sigmal + sigmaql
@staticmethod
def sigma_local(model, channels, nb):
"""
Computes the local self-energy
Parameters
----------
model : Model object
Model object
channels: List of Channel objects
Contains all channels (also )
nb : int
number of bosons/photons
"""
Gams = np.zeros((model.n, model.n), dtype=np.complex128)
# iterate over all sites
for channel in channels:
for n, sn in enumerate(channel.sites):
# diagonal elements
Gams[sn, sn] += - 1j * np.pi * np.abs(channel.strengths[n]) ** 2
for m, sm in enumerate(channel.sites[(n + 1):]):
# off-diagonal elements
if channel.xs[sn] == channel.xs[sm]:
Gams[sn, sm] += - 1j * np.pi * np.conjugate(channel.strengths[m]) * channel.strengths[n]
Gams[sm, sn] += - 1j * np.pi * np.conjugate(channel.strengths[n]) * channel.strengths[m]
# nb numbersector
ns = model.numbersector(nb)
# generate relevant hamiltonian
Ski, Skj, Skv = ns.hopping_hamiltonian(ns.basis, Gams, ns.basis.vs)
# construct dense matrix
Sigma = sparse.coo_matrix((Skv, (Ski, Skj)), shape=[ns.basis.len] * 2).tocsr()
return Sigma
@staticmethod
def sigma_quasi_local(model, channels, nb, phi=0):
"""
Quasi-local self-energy
Parameters
----------
model : Model object
Model object
channels: List of Channel objects
Contains all channels (also )
nb : int
number of bosons/photons
phi : float
phase contribution in units of energy per length
"""
Gams = np.zeros((model.n, model.n), dtype=np.complex128)
# iterate over all channels
for channel in channels:
# skip local channels
if channel.local is True:
continue
# iterate all couplings to this channel
for n, sn in enumerate(channel.sites):
posn = channel.positions[n]
gn = channel.strengths[n]
# iterate all couplings to this channel
for m, sm in enumerate(channel.sites):
posm = channel.positions[m]
gms = np.conjugate(channel.strengths[m])
# only one channel chirality contributes.
# I.e. "leave the model through one channel
# and return from a point further down that same channel".
if posn > posm:
Gams[sn, sm] += - 2 * 1j * np.pi * gms * gn * np.exp(1j * phi * (posn - posm))
# nb numbersector
ns = model.numbersector(nb)
# generate relevant hamiltonian
Ski, Skj, Skv = ns.hopping_hamiltonian(ns.basis, Gams, ns.basis.vs)
# construct dense matrix
Sigma = sparse.coo_matrix((Skv, (Ski, Skj)), shape=[ns.basis.len] * 2).tocsr()
return Sigma
def eigenenergies(self, nb, phi=0):
"""
Return a list of eigenenergies in a given number sector.
Parameter
---------
nb : int
Number of bosons in given number sector
phi: float
phase related to the photonic energy
"""
# no phases for local setups
phi = 0 if self.local else phi
# cache
ckey = '{}-{}'.format(nb, phi)
if ckey not in self._cache['eigen']:
self.eigenbasis(nb, phi)
# load cached result
return self._cache['eigen'][ckey][0]
def transition(self, ni, channel, nf, phi=0):
"""
Generalized transition matrix elements for a single channel
Parameters
----------
ni : int
initial charge sector
channel : int
channel index
nf : int
final number sector
phis : dict
dict of phase parameters for coupling constants (g),
incoming/initial photon number sector (i), and final
photon number sector (f): {i: value, g: value, f: value}.
"""
# no phases for local setups
phi = 0 if self.local else phi
# cache
ckey = '{}-{}-{}-{}'.format(nf, channel, ni, phi)
if ckey not in self._cache['trans']:
# Effective coupling constant in front of b^\dagger
gt = self.channels[channel].gtilde(phi)
if nf < ni:
gt = np.conj(gt) # b = (b^dagger)^dagger
gen = (gt[i] * self.trsn(ni, self.channels[channel].sites[i], nf, phi) for i in range(self.channels[channel].n))
self._cache['trans'][ckey] = sum(gen)
return self._cache['trans'][ckey]
# gt = self.channels[channel].gtilde(phi)
# gen = (self.channels[channel].strengths[i] * self.trsn(ni, self.channels[channel].sites[i], nf) for i in xrange(len(self.channels[channel].sites)))
# self._cache['trans'][key] = sum(gen)
# return np.sum([gt[i] * self.trsn(ni, self.channels[channel].sites[i], nf, phi) for i in xrange(self.channels[channel].n)])
def trsn(self, ni, site, nf, phi=0):
"""
Bare transition matrix elements in the sites basis.
Parameters
----------
ni : int
initial charge sector
site : int
Model site index
nf : int
final number sector
phis : dict
dict of phase parameters for
incoming/initial photon number sector (i), and final
photon number sector (f): {i: value, f: value}.
"""
# no phases in local setups
phi = 0 if self.local else phi
# cache
ckey = '{}-{}-{}-{}'.format(nf, site, ni, phi)
if ckey not in self._cache['trsn']:
# initial
nsi = self.model.numbersector(ni)
Ei, psiil, psiir = self.eigenbasis(ni, phi)
# final
nsf = self.model.numbersector(nf)
Ef, psifl, psifr = self.eigenbasis(nf, phi)
# transition
A = np.zeros((nsf.basis.len, nsi.basis.len), dtype=np.complex128)
for i in range(nsi.basis.len):
A[:, i] = psifl.conj().T.dot(
bosehubbard.transition(site, psiir[:, i], nsi.basis, nsf.basis)
)
self._cache['trsn'][ckey] = A
return self._cache['trsn'][ckey]
# UTILITIES
def discrete_energies(E, dE, N=1024, WE=8.):
"""
Discretization of scattering energies.
Parameters
----------
E : float
Total two-particle energy
dE : float
Two-particle energy difference
N : int
Number of discretization points
WE : float
Half width of the scattering energy spectrum
"""
# Discrete energies that contains the "elastic" points:
# nu0=nu2 and nu0=nu3.
NdE = np.ceil(N / 2 / (dE / 2 + WE) * dE / 2)
Lq = N / 2 / NdE * dE / 2 if np.abs(dE) > 0 else WE
qs = np.linspace(-Lq, Lq, N, endpoint=False) + E / 2
return qs
|
{
"content_hash": "8deb0ab9a3a9e0d703af2a7e18f10332",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 160,
"avg_line_length": 32.79961089494164,
"alnum_prop": 0.5027581707100065,
"repo_name": "georglind/babusca",
"id": "cf01a79add88a2ec0ab46bc09dc6260ed1514425",
"size": "16859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scattering.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "123567"
}
],
"symlink_target": ""
}
|
import os
APP_HOME = os.environ['GDD_HOME']
import sys
sys.path.append('%s/code' % APP_HOME)
import data_util as dutil
import argparse
### ATTENTION!!!! PLEASE PIPE THE OUTPUT OF THIS SCRIPT THROUGH sort | uniq !!! ###
### Doing it within python is a waste of resources. Linux does it much faster. ###
if __name__ == '__main__':
hpo_dag = dutil.read_hpo_dag()
parser = argparse.ArgumentParser()
parser.add_argument('--only-abnormalities', required=False, action="store_true")
args = parser.parse_args()
for line in sys.stdin:
toks = line.strip().split()
hpo_id = toks[0]
ensemble_gene = toks[1]
parent_ids = dutil.get_parents(hpo_id, hpo_dag) # includes the original hpo_id
assert hpo_id in parent_ids
if args.only_abnormalities:
if 'HP:0000118' not in parent_ids:
sys.stderr.write('"{0}": not a phenotypic abnormality\n'.format(hpo_id.strip()))
continue
parent_ids.remove('HP:0000118')
for parent_id in parent_ids:
sys.stdout.write('{0}\t{1}\n'.format(parent_id, ensemble_gene))
sys.stdout.flush()
|
{
"content_hash": "84d98a77341a1858eb24dc409a08b102",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 88,
"avg_line_length": 35.96666666666667,
"alnum_prop": 0.6617238183503243,
"repo_name": "HazyResearch/dd-genomics",
"id": "2e4404aced58ad49d97e67c5eeb49ee9b0f7714e",
"size": "1103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onto/canonicalize_gene_phenotype.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "111"
},
{
"name": "HTML",
"bytes": "22186"
},
{
"name": "Java",
"bytes": "25863"
},
{
"name": "JavaScript",
"bytes": "10928"
},
{
"name": "Jupyter Notebook",
"bytes": "19968"
},
{
"name": "Python",
"bytes": "510253"
},
{
"name": "Shell",
"bytes": "196808"
}
],
"symlink_target": ""
}
|
def main(j, params, service, tags, tasklet):
"""
Create or update Alert object
"""
import time
eco = params.value
session = params.session
alertservice = j.core.osis.cmds._getOsisInstanceForCat('system', 'alert')
alerts = alertservice.search({'eco':eco['guid']}, session=session)[1:]
alert = {'eco': eco['guid'],
'errormessage': eco['errormessage'],
'errormessagePub': eco['errormessagePub'],
'category': eco['category'],
'gid': eco['gid'],
'nid': eco['nid'],
'lasttime': eco['lasttime']}
if not alerts:
alert['inittime'] = eco['epoch']
alert['state'] = 'ALERT'
alert['epoch'] = eco['lasttime']
alert['level'] = 1
alertobj = alertservice.new()
alertobj.load(alert)
alert = alertobj.dump()
else:
alertdata = alerts[0]
if alertdata['state'] in ['RESOLVED', 'CLOSED']:
alertdata['state'] = 'ALERT'
alertdata.update(alert)
alert = alertdata
alertservice.set(None, alert, session=session)
def match(j, params, service, tags, tasklet):
eco = params.value
return params.action == 'set' and eco['level'] < 3 # only critical and warning
|
{
"content_hash": "25b8d985c6d1ecdb78c7f5c9461ce02e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 82,
"avg_line_length": 33.89473684210526,
"alnum_prop": 0.5582298136645962,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "a26ec7f72ccd0ff7a8dc996013afb68c8d1f7964",
"size": "1288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/osis/logic/system/eco/tasklets/5_escalate.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
}
|
__author__ = 'benjaminwatson'
|
{
"content_hash": "4cb423c5a18897a83f849ab1cd0e07ad",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 29,
"avg_line_length": 30,
"alnum_prop": 0.6666666666666666,
"repo_name": "0xr0ot/lobotomy",
"id": "4330f3f111475c320ccbcefdea22f4d96ace15a5",
"size": "30",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "framework/brains/surgical/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "173"
},
{
"name": "HTML",
"bytes": "3966"
},
{
"name": "Python",
"bytes": "67822"
},
{
"name": "Shell",
"bytes": "765"
}
],
"symlink_target": ""
}
|
import base64
import webbrowser
from floo.common.handlers import floo_handler
from floo.common import msg, utils, shared as G
class AgentConnection(floo_handler.FlooHandler):
def __init__(self, owner, workspace, emacs_handler, auth, join_action):
super(AgentConnection, self).__init__(owner, workspace, auth, join_action)
self.emacs_handler = emacs_handler
def get_view_text_by_path(self, rel_path):
return self.emacs_handler.get_view_text_by_path(rel_path)
def stop(self):
super(AgentConnection, self).stop()
self.emacs_handler.stop()
def get_view(self, buf_id):
return self.emacs_handler.get_view(buf_id)
def ok_cancel_dialog(self, prompt, cb):
return self.emacs_handler.ui.user_y_or_n(self.emacs_handler, prompt, '', cb)
def to_emacs(self, name, data):
data['name'] = name
self.emacs_handler.send(data)
def stomp_prompt(self, changed_bufs, missing_bufs, new_files, ignored, cb):
def pluralize(arg):
return arg != 1 and 's' or ''
overwrite_local = ''
overwrite_remote = ''
missing = [buf['path'] for buf in missing_bufs]
changed = [buf['path'] for buf in changed_bufs]
to_remove = set(missing + ignored)
to_upload = set(new_files + changed).difference(to_remove)
to_fetch = changed + missing
to_upload_len = len(to_upload)
to_remove_len = len(to_remove)
remote_len = to_remove_len + to_upload_len
to_fetch_len = len(to_fetch)
msg.log('To fetch: ', ', '.join(to_fetch))
msg.log('To upload: ', ', '.join(to_upload))
msg.log('To remove: ', ', '.join(to_remove))
if not to_fetch:
overwrite_local = 'Fetch nothing'
elif to_fetch_len < 5:
overwrite_local = 'Fetch %s' % ', '.join(to_fetch)
else:
overwrite_local = 'Fetch %s file%s' % (to_fetch_len, pluralize(to_fetch_len))
if to_upload_len < 5:
to_upload_str = 'upload %s' % ', '.join(to_upload)
else:
to_upload_str = 'upload %s' % to_upload_len
if to_remove_len < 5:
to_remove_str = 'remove %s' % ', '.join(to_remove)
else:
to_remove_str = 'remove %s' % to_remove_len
if to_upload:
overwrite_remote += to_upload_str
if to_remove:
overwrite_remote += ' and '
if to_remove:
overwrite_remote += to_remove_str
if remote_len >= 5 and overwrite_remote:
overwrite_remote += ' files'
overwrite_remote = overwrite_remote.capitalize()
action = 'Overwrite'
# TODO: change action based on numbers of stuff
choices = [
'%s %s remote file%s (%s).' % (action, remote_len, pluralize(remote_len), overwrite_remote),
'%s %s local file%s (%s).' % (action, to_fetch_len, pluralize(to_fetch_len), overwrite_local),
'Cancel',
]
prompt = 'Your copy of %s/%s is out of sync. Do you want to:' % (self.owner, self.workspace)
self.emacs_handler.ui.user_select(self.emacs_handler, prompt, choices, None, lambda c, i: cb(i))
@utils.inlined_callbacks
def prompt_join_hangout(self, hangout_url):
join = yield self.ok_cancel_dialog, 'This workspace is being edited in a hangout. Would you like to join the hangout?'
if not join:
return
try:
webbrowser.open(hangout_url, new=2, autoraise=True)
except Exception as e:
msg.error("Couldn't open a browser: %s" % (str(e)))
def _on_room_info(self, data):
def send_room_info():
self.to_emacs('room_info', {
'perms': data['perms'],
'project_path': G.PROJECT_PATH,
'workspace_name': data['room_name']
})
self.once('room_info', send_room_info)
super(AgentConnection, self)._on_room_info(data)
def _on_create_buf(self, data):
if data['encoding'] == 'base64':
data['buf'] = base64.b64decode(data['buf'])
self.bufs[data['id']] = data
self.paths_to_ids[data['path']] = data['id']
abs_path = utils.get_full_path(data['path'])
self.to_emacs('create_buf', {
'full_path': utils.get_full_path(data['path']),
'path': data['path'],
'username': data.get('username', ''),
})
if abs_path not in self.emacs_handler.emacs_bufs:
utils.save_buf(data)
return
text = self.emacs_handler.emacs_bufs.get(abs_path)[0]
if text == data['buf']:
return
self.emacs_handler.bufs_changed.append(data['id'])
def _on_delete_buf(self, data):
buf_id = int(data['id'])
buf = self.bufs[buf_id]
path = buf['path']
try:
super(AgentConnection, self)._on_delete_buf(data)
except Exception as e:
msg.debug('Unable to delete buf %s: %s' % (path, str(e)))
else:
self.to_emacs('delete_buf', {
'full_path': utils.get_full_path(path),
'path': path,
'username': data.get('username', ''),
})
def _on_rename_buf(self, data):
# This can screw up if someone else renames the buffer around the same time as us. Oh well.
msg.debug('asdf %s' % data)
buf = self.get_buf_by_path(utils.get_full_path(data['old_path']))
if buf:
return super(AgentConnection, self)._on_rename_buf(data)
msg.debug('We already renamed %s. Skipping' % data['old_path'])
def highlight(self, user=None, **kwargs):
# Emacs stores highlight state separately, outside of python
if user is not None:
self.to_emacs('follow_user', {'username': user})
msg.log("Sent %s to emacs follow_user" % user)
def _on_highlight(self, data):
buf = self.bufs[data['id']]
# TODO: save highlights for when user opens the buffer in emacs
self.to_emacs('highlight', {
'full_path': utils.get_full_path(buf['path']),
'ranges': data['ranges'],
'user_id': data['user_id'],
'username': data.get('username', 'unknown user'),
'following': data.get('following', False),
'ping': data.get('ping', False)
})
def _on_msg(self, data):
msg.log('msg')
|
{
"content_hash": "9260d093ebee20e302adbe8f5466f6cf",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 126,
"avg_line_length": 36.610169491525426,
"alnum_prop": 0.5600308641975309,
"repo_name": "Floobits/floobits-emacs",
"id": "63971f869641ac53b59c8ed6ea3f7f039b9e57aa",
"size": "6480",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "floo/agent_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Emacs Lisp",
"bytes": "31291"
},
{
"name": "Python",
"bytes": "241085"
}
],
"symlink_target": ""
}
|
from .decision import ClassifiedObject
from .decision import DecisionTreeGenerator
from .decision import DecisionTreeEnv
from .mutator import Mutator
|
{
"content_hash": "82fa6b82d4498430278365a5efa66ebe",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 43,
"avg_line_length": 37.5,
"alnum_prop": 0.8666666666666667,
"repo_name": "smcl/grf",
"id": "61b4ba420d3e69ad655e1cdb6b5951bf3213320b",
"size": "150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grf/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11586"
}
],
"symlink_target": ""
}
|
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("RandomForestClassifier" , "FourClass_10" , "hive")
|
{
"content_hash": "3df508ebe58b2ee24784be9bb211d2aa",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 37,
"alnum_prop": 0.7905405405405406,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "d95278c0c4d2afe980ae40358e31e0f49318fc45",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/classification/FourClass_10/ws_FourClass_10_RandomForestClassifier_hive_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import uuid
from copy import deepcopy
from django.core.exceptions import FieldError
from django.db import DatabaseError, connection, models, transaction
from django.db.models import TimeField, UUIDField
from django.db.models.aggregates import (
Avg, Count, Max, Min, StdDev, Sum, Variance,
)
from django.db.models.expressions import (
F, Case, Col, Date, DateTime, Func, OrderBy, Random, RawSQL, Ref, Value,
When,
)
from django.db.models.functions import (
Coalesce, Concat, Length, Lower, Substr, Upper,
)
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from django.utils.timezone import utc
from .models import UUID, Company, Employee, Experiment, Number, Time
class BasicExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10)
)
Company.objects.create(
name="Foobar Ltd.", num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20)
)
Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Max", lastname="Mustermann", salary=30)
)
def setUp(self):
self.company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by(
"name", "num_employees", "num_chairs"
)
def test_annotate_values_aggregate(self):
companies = Company.objects.annotate(
salaries=F('ceo__salary'),
).values('num_employees', 'salaries').aggregate(
result=Sum(F('salaries') + F('num_employees'),
output_field=models.IntegerField()),
)
self.assertEqual(companies['result'], 2395)
def test_filter_inter_attribute(self):
# We can filter on attribute relationships on same model obj, e.g.
# find companies where the number of employees is greater
# than the number of chairs.
self.assertQuerysetEqual(
self.company_query.filter(num_employees__gt=F("num_chairs")), [
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{
"num_chairs": 1,
"name": "Test GmbH",
"num_employees": 32
},
],
lambda o: o
)
def test_update(self):
# We can set one field to have the value of another field
# Make sure we have enough chairs
self.company_query.update(num_chairs=F("num_employees"))
self.assertQuerysetEqual(
self.company_query, [
{
"num_chairs": 2300,
"name": "Example Inc.",
"num_employees": 2300
},
{
"num_chairs": 3,
"name": "Foobar Ltd.",
"num_employees": 3
},
{
"num_chairs": 32,
"name": "Test GmbH",
"num_employees": 32
}
],
lambda o: o
)
def test_arithmetic(self):
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
self.company_query.update(num_chairs=F("num_employees") + 2)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 2302,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 5,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 34,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_order_of_operations(self):
# Law of order of operations is followed
self. company_query.update(
num_chairs=F('num_employees') + 2 * F('num_employees')
)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 6900,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 9,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 96,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_parenthesis_priority(self):
# Law of order of operations can be overridden by parentheses
self.company_query.update(
num_chairs=((F('num_employees') + 2) * F('num_employees'))
)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 5294600,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 15,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 1088,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_update_with_fk(self):
# ForeignKey can become updated with the value of another ForeignKey.
self.assertEqual(
Company.objects.update(point_of_contact=F('ceo')),
3
)
self.assertQuerysetEqual(
Company.objects.all(), [
"Joe Smith",
"Frank Meyer",
"Max Mustermann",
],
lambda c: six.text_type(c.point_of_contact),
ordered=False
)
def test_update_with_none(self):
Number.objects.create(integer=1, float=1.0)
Number.objects.create(integer=2)
Number.objects.filter(float__isnull=False).update(float=Value(None))
self.assertQuerysetEqual(
Number.objects.all(), [
None,
None,
],
lambda n: n.float,
ordered=False
)
def test_filter_with_join(self):
# F Expressions can also span joins
Company.objects.update(point_of_contact=F('ceo'))
c = Company.objects.all()[0]
c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
c.save()
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")), [
"Foobar Ltd.",
"Test GmbH",
],
lambda c: c.name,
ordered=False
)
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name="foo")
self.assertEqual(
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
"foo",
)
with transaction.atomic():
with self.assertRaises(FieldError):
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name=F('point_of_contact__lastname'))
def test_object_update(self):
# F expressions can be used to update attributes on single objects
test_gmbh = Company.objects.get(name="Test GmbH")
self.assertEqual(test_gmbh.num_employees, 32)
test_gmbh.num_employees = F("num_employees") + 4
test_gmbh.save()
test_gmbh = Company.objects.get(pk=test_gmbh.pk)
self.assertEqual(test_gmbh.num_employees, 36)
def test_object_update_fk(self):
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh = Company.objects.get(name="Test GmbH")
def test():
test_gmbh.point_of_contact = F("ceo")
self.assertRaises(ValueError, test)
test_gmbh.point_of_contact = test_gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__last_name")
self.assertRaises(FieldError, test_gmbh.save)
def test_object_update_unsaved_objects(self):
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
test_gmbh = Company.objects.get(name="Test GmbH")
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5,
ceo=test_gmbh.ceo
)
acme.num_employees = F("num_employees") + 16
self.assertRaises(TypeError, acme.save)
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F('lastname'))
self.assertQuerysetEqual(queryset, ["<Employee: Test test>"])
@skipIfDBFeature('has_case_insensitive_like')
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertQuerysetEqual(
Employee.objects.filter(lastname__startswith=F('firstname')),
[e2], lambda x: x)
self.assertQuerysetEqual(
Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk'),
[e2, e3], lambda x: x)
def test_ticket_18375_join_reuse(self):
# Test that reverse multijoin F() references and the lookup target
# the same join. Pre #18375 the F() join was generated first, and the
# lookup couldn't reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'),
company_ceo_set__num_chairs__gte=1)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk'),
pk=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_chained_filters(self):
# Test that F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk')
).filter(
company_ceo_set__num_employees=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
class ExpressionsTests(TestCase):
def test_F_object_deepcopy(self):
"""
Make sure F objects can be deepcopied (#23492)
"""
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_f_reuse(self):
f = F('id')
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
"""
Test that special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%Joh\\n"),
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="John"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__contains=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__startswith=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__endswith=F('lastname')),
["<Employee: Jean-Claude Claude>"],
ordered=False)
def test_insensitive_patterns_escape(self):
"""
Test that special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%joh\\n"),
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="john"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__icontains=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__istartswith=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__iendswith=F('lastname')),
["<Employee: Jean-Claude claude>"],
ordered=False)
class ExpressionsNumericTests(TestCase):
def setUp(self):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
self.assertEqual(Number.objects.update(float=F('integer')), 3)
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 42, 42.000>',
'<Number: 1337, 1337.000>'
],
ordered=False
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.exclude(float=F('integer')),
[
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(Number.objects.filter(pk=n.pk).update(
float=F('integer') + F('float') * 2), 1)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3))
def test_incorrect_field_expression(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword u?'nope' into field.*"):
list(Employee.objects.filter(firstname=F('nope')))
class ExpressionOperatorTests(TestCase):
def setUp(self):
self.n = Number.objects.create(integer=42, float=15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F('integer') + 15,
float=F('float') + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15,
float=F('float') - 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3))
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15,
float=F('float') * 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2,
float=F('float') / 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3))
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
@skipUnlessDBFeature('supports_bitwise_or')
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_power(self):
# LH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2,
float=F('float') ** 1.5)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'),
float=42.7 + F('float'))
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'),
float=42.7 - F('float'))
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3))
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'),
float=42.7 * F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'),
float=42.7 / F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3))
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_righthand_power(self):
# RH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'),
float=1.5 ** F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3))
class FTimeDeltaTests(TestCase):
def setUp(self):
sday = datetime.date(2010, 6, 25)
stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
# Test data is set so that deltas and delays will be
# strictly increasing.
self.deltas = []
self.delays = []
self.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
e0 = Experiment.objects.create(name='e0', assigned=sday, start=stime,
end=end, completed=end.date(), estimated_time=delta0)
self.deltas.append(delta0)
self.delays.append(e0.start -
datetime.datetime.combine(e0.assigned, midnight))
self.days_long.append(e0.completed - e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite. This Experiment is only
# included in the test data when the DB supports microsecond
# precision.
if connection.features.supports_microsecond_precision:
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(name='e1', assigned=sday,
start=stime + delay, end=end, completed=end.date(), estimated_time=delta1)
self.deltas.append(delta1)
self.delays.append(e1.start -
datetime.datetime.combine(e1.assigned, midnight))
self.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(name='e2',
assigned=sday - datetime.timedelta(3), start=stime, end=end,
completed=end.date(), estimated_time=datetime.timedelta(hours=1))
self.deltas.append(delta2)
self.delays.append(e2.start -
datetime.datetime.combine(e2.assigned, midnight))
self.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(name='e3',
assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3)
self.deltas.append(delta3)
self.delays.append(e3.start -
datetime.datetime.combine(e3.assigned, midnight))
self.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(name='e4',
assigned=sday - datetime.timedelta(10), start=stime, end=end,
completed=end.date(), estimated_time=delta4 - datetime.timedelta(1))
self.deltas.append(delta4)
self.delays.append(e4.start -
datetime.datetime.combine(e4.assigned, midnight))
self.days_long.append(e4.completed - e4.assigned)
self.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643
qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
def test_delta_add(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lt=delta + F('start'))]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_subtract(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(start__gt=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__gte=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_exclude(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.exclude(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i:])
test_set = [e.name for e in
Experiment.objects.exclude(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i + 1:])
def test_date_comparison(self):
for i in range(len(self.days_long)):
days = self.days_long[i]
test_set = [e.name for e in
Experiment.objects.filter(completed__lt=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(completed__lte=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i + 1])
@skipUnlessDBFeature("supports_mixed_date_datetime_comparisons")
def test_mixed_comparisons1(self):
for i in range(len(self.delays)):
delay = self.delays[i]
if not connection.features.supports_microsecond_precision:
delay = datetime.timedelta(delay.days, delay.seconds)
test_set = [e.name for e in
Experiment.objects.filter(assigned__gt=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(assigned__gte=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_mixed_comparisons2(self):
delays = [datetime.timedelta(delay.days) for delay in self.delays]
for i in range(len(delays)):
delay = delays[i]
test_set = [e.name for e in
Experiment.objects.filter(start__lt=F('assigned') + delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__lte=F('assigned') + delay +
datetime.timedelta(1))]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_update(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F('start') + delta, end=F('end') + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_invalid_operator(self):
with self.assertRaises(DatabaseError):
list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0)))
def test_durationfield_add(self):
zeros = [e.name for e in
Experiment.objects.filter(start=F('start') + F('estimated_time'))]
self.assertEqual(zeros, ['e0'])
end_less = [e.name for e in
Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))]
self.assertEqual(end_less, ['e2'])
delta_math = [e.name for e in
Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1))]
self.assertEqual(delta_math, ['e4'])
@skipUnlessDBFeature("has_native_duration_field")
def test_date_subtraction(self):
under_estimate = [e.name for e in
Experiment.objects.filter(estimated_time__gt=F('end') - F('start'))]
self.assertEqual(under_estimate, ['e2'])
over_estimate = [e.name for e in
Experiment.objects.filter(estimated_time__lt=F('end') - F('start'))]
self.assertEqual(over_estimate, ['e4'])
class ValueTests(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField()))
self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012'))
class ReprTests(TestCase):
def test_expressions(self):
self.assertEqual(
repr(Case(When(a=1))),
"<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>"
)
self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)")
self.assertEqual(repr(Date('published', 'exact')), "Date(published, exact)")
self.assertEqual(repr(DateTime('published', 'exact', utc)), "DateTime(published, exact, %s)" % utc)
self.assertEqual(repr(F('published')), "F(published)")
self.assertEqual(repr(F('cost') + F('tax')), "<Expression: F(cost) + F(tax)>")
self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)")
self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)')
self.assertEqual(repr(Random()), "Random()")
self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])")
self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))")
self.assertEqual(repr(Value(1)), "Value(1)")
def test_functions(self):
self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))")
self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))")
self.assertEqual(repr(Length('a')), "Length(F(a))")
self.assertEqual(repr(Lower('a')), "Lower(F(a))")
self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))")
self.assertEqual(repr(Upper('a')), "Upper(F(a))")
def test_aggregates(self):
self.assertEqual(repr(Avg('a')), "Avg(F(a))")
self.assertEqual(repr(Count('a')), "Count(F(a), distinct=False)")
self.assertEqual(repr(Max('a')), "Max(F(a))")
self.assertEqual(repr(Min('a')), "Min(F(a))")
self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)")
self.assertEqual(repr(Sum('a')), "Sum(F(a))")
self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)")
|
{
"content_hash": "558b08662356e338e1305e0e2badd1b1",
"timestamp": "",
"source": "github",
"line_count": 871,
"max_line_length": 112,
"avg_line_length": 40.954075774971294,
"alnum_prop": 0.5768271144627288,
"repo_name": "ojengwa/django-1",
"id": "59fd0631034921de39e816784bb9be8657fda58a",
"size": "35671",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/expressions/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43000"
},
{
"name": "Gettext Catalog",
"bytes": "9145447"
},
{
"name": "HTML",
"bytes": "168786"
},
{
"name": "JavaScript",
"bytes": "105614"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "10622535"
},
{
"name": "Shell",
"bytes": "3056"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import logging
from son_editor.app.database import db_session
from son_editor.app.exceptions import InvalidArgument
from son_editor.impl.private_catalogue_impl import publish_private_nsfs, query_private_nsfs
from son_editor.models.descriptor import Service, Function
from son_editor.models.project import Project
from son_editor.models.repository import Platform
from son_editor.models.workspace import Workspace
from son_editor.util.publishutil import pack_project, push_to_platform, deploy_on_platform
logger = logging.getLogger(__name__)
def publish_referenced_functions(ws_id, proj_id, descriptor):
vnfs = descriptor["network_functions"]
session = db_session()
for vnf in vnfs:
function = session.query(Function).join(Project). \
filter(Project.id == proj_id). \
filter(Function.name == vnf['vnf_name']). \
filter(Function.vendor == vnf["vnf_vendor"]). \
filter(Function.version == vnf["vnf_version"]).first()
publish_private_nsfs(ws_id, function.as_dict()["descriptor"], True)
def create_service_on_platform(ws_id, platform_id, service_data):
"""
Deploys the service on the referenced Platform
:param ws_id:
:param platform_id:
:param service_data:
:return: A message if the function was deployed successfully
"""
service_id = int(service_data['id'])
session = db_session()
try:
workspace = session.query(Workspace).filter(Workspace.id == ws_id).first()
project = session.query(Project). \
join(Service). \
filter(Project.services.any(Service.id == service_id)). \
filter(Project.workspace == workspace). \
first() # type: Project
if not len(project.services) == 1:
raise InvalidArgument(
"Project must have exactly one service "
"to push to platform. Number of services: {}".format(
len(project.services)))
platform = session.query(Platform).filter(Platform.id == platform_id). \
filter(Platform.workspace == workspace).first()
package_path = pack_project(project)
service_uuid = push_to_platform(package_path, platform)
logger.info("Pushed to platform: " + str(service_uuid))
message = deploy_on_platform(service_uuid, platform)
# deploy to private catalogue
service = project.services[0].as_dict()
publish_private_nsfs(ws_id, service["descriptor"], is_vnf=False)
publish_referenced_functions(ws_id, project.id, service["descriptor"])
return {'message': 'Deployed successfully: {}'.format(message)}
finally:
session.commit()
|
{
"content_hash": "513d1a7f77125fd08d8c4d82ff658d61",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 91,
"avg_line_length": 43.5,
"alnum_prop": 0.6588802373007044,
"repo_name": "Jmanuel4SandMan/upb-son-editor-backend",
"id": "f9fcabc215678c70b5e8f616b7e799ce197b31bc",
"size": "2697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/son_editor/impl/platform_connector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "788"
},
{
"name": "HTML",
"bytes": "1459"
},
{
"name": "Makefile",
"bytes": "611"
},
{
"name": "Nginx",
"bytes": "492"
},
{
"name": "Python",
"bytes": "246505"
},
{
"name": "Shell",
"bytes": "332"
}
],
"symlink_target": ""
}
|
import logging
from oidc_provider.lib.errors import *
from oidc_provider.lib.utils.params import *
from oidc_provider.lib.utils.token import *
from oidc_provider.models import *
logger = logging.getLogger(__name__)
class AuthorizeEndpoint(object):
def __init__(self, request):
self.request = request
self.params = Params()
# Because in this endpoint we handle both GET
# and POST request.
self.query_dict = (self.request.POST if self.request.method == 'POST'
else self.request.GET)
self._extract_params()
# Determine which flow to use.
if self.params.response_type in ['code']:
self.grant_type = 'authorization_code'
elif self.params.response_type in ['id_token', 'id_token token']:
self.grant_type = 'implicit'
self._extract_implicit_params()
else:
self.grant_type = None
def _extract_params(self):
"""
Get all the params used by the Authorization Code Flow
(and also for the Implicit).
See: http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest
"""
self.params.client_id = self.query_dict.get('client_id', '')
self.params.redirect_uri = self.query_dict.get('redirect_uri', '')
self.params.response_type = self.query_dict.get('response_type', '')
self.params.scope = self.query_dict.get('scope', '').split()
self.params.state = self.query_dict.get('state', '')
def _extract_implicit_params(self):
"""
Get specific params used by the Implicit Flow.
See: http://openid.net/specs/openid-connect-core-1_0.html#ImplicitAuthRequest
"""
self.params.nonce = self.query_dict.get('nonce', '')
def validate_params(self):
if not self.params.redirect_uri:
raise RedirectUriError()
if not ('openid' in self.params.scope):
raise AuthorizeError(
self.params.redirect_uri,
'invalid_scope',
self.grant_type)
try:
self.client = Client.objects.get(client_id=self.params.client_id)
if not (self.params.redirect_uri in self.client.redirect_uris):
raise RedirectUriError()
if not self.grant_type or not (self.params.response_type == self.client.response_type):
raise AuthorizeError(
self.params.redirect_uri,
'unsupported_response_type',
self.grant_type)
except Client.DoesNotExist:
raise ClientIdError()
def create_response_uri(self, allow):
if not allow:
raise AuthorizeError(
self.params.redirect_uri,
'access_denied',
self.grant_type)
try:
self.validate_params()
if self.grant_type == 'authorization_code':
code = create_code(
user=self.request.user,
client=self.client,
scope=self.params.scope)
code.save()
# Create the response uri.
uri = self.params.redirect_uri + '?code={0}'.format(code.code)
else: # Implicit Flow
id_token_dic = create_id_token(
user=self.request.user,
aud=self.client.client_id)
token = create_token(
user=self.request.user,
client=self.client,
id_token_dic=id_token_dic,
scope=self.params.scope)
# Store the token.
token.save()
id_token = encode_id_token(
id_token_dic, self.client.client_secret)
# Create the response uri.
uri = self.params.redirect_uri + \
'#token_type={0}&id_token={1}&expires_in={2}'.format(
'bearer',
id_token,
60 * 10,
)
# Check if response_type is 'id_token token' then
# add access_token to the fragment.
if self.params.response_type == 'id_token token':
uri += '&access_token={0}'.format(token.access_token)
except:
logger.error('Authorization server error, grant_type: %s' %self.grant_type, extra={
'redirect_uri': self.redirect_uri,
'state': self.params.state
})
raise AuthorizeError(
self.params.redirect_uri,
'server_error',
self.grant_type)
# Add state if present.
uri += ('&state={0}'.format(self.params.state) if self.params.state else '')
return uri
|
{
"content_hash": "dcd426183db96bf5ebd6659ab4b91a60",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 99,
"avg_line_length": 33.40136054421769,
"alnum_prop": 0.5307535641547861,
"repo_name": "django-py/django-openid-provider",
"id": "2ea36656e4beccca558886141d89372c4f2a83dc",
"size": "4910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oidc_provider/lib/endpoints/authorize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10784"
},
{
"name": "Python",
"bytes": "66177"
}
],
"symlink_target": ""
}
|
"""Conversion tool from EDF+,BDF to FIF
"""
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Martin Billinger <martin.billinger@tugraz.at>
#
# License: BSD (3-clause)
import os
import calendar
import datetime
import re
import warnings
from math import ceil, floor
import warnings
import numpy as np
from scipy.interpolate import interp1d
from ...utils import verbose, logger
from ..base import _BaseRaw, _check_update_montage
from ..meas_info import Info
from ..pick import pick_types
from ..constants import FIFF
from ...filter import resample
from ...externals.six.moves import zip
class RawEDF(_BaseRaw):
"""Raw object from EDF+,BDF file
Parameters
----------
input_fname : str
Path to the EDF+,BDF file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0).
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the electrodes in the
edf file. Default is None.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes in the
edf file. Default is None.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel (default).
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
tal_channel : int | None
The channel index (starting at 0).
Index of the channel containing EDF+ annotations.
-1 corresponds to the last channel.
If None, the annotation channel is not used.
Note: this is overruled by the annotation file if specified.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, montage, eog=None, misc=None,
stim_channel=-1, annot=None, annotmap=None, tal_channel=None,
preload=False, verbose=None):
logger.info('Extracting edf Parameters from %s...' % input_fname)
input_fname = os.path.abspath(input_fname)
self.info, self._edf_info = _get_edf_info(input_fname, stim_channel,
annot, annotmap, tal_channel,
eog, misc, preload)
logger.info('Creating Raw.info structure...')
_check_update_montage(self.info, montage)
if bool(annot) != bool(annotmap):
warnings.warn(("Stimulus Channel will not be annotated. "
"Both 'annot' and 'annotmap' must be specified."))
# Raw attributes
self.verbose = verbose
self.preload = False
self._filenames = list()
self._projector = None
self.first_samp = 0
self.last_samp = self._edf_info['nsamples'] - 1
self.comp = None # no compensation for EDF
self.proj = False
self._first_samps = np.array([self.first_samp])
self._last_samps = np.array([self.last_samp])
self._raw_lengths = np.array([self._edf_info['nsamples']])
self.rawdirs = np.array([])
self.cals = np.array([ch['cal'] for ch in self.info['chs']])
self.orig_format = 'int'
if preload:
self.preload = preload
logger.info('Reading raw data from %s...' % input_fname)
self._data, _ = self._read_segment()
assert len(self._data) == self.info['nchan']
# Add time info
self.first_samp, self.last_samp = 0, self._data.shape[1] - 1
self._times = np.arange(self.first_samp, self.last_samp + 1,
dtype=np.float64)
self._times /= self.info['sfreq']
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs'
% (self.first_samp, self.last_samp,
float(self.first_samp) / self.info['sfreq'],
float(self.last_samp) / self.info['sfreq']))
logger.info('Ready.')
def __repr__(self):
n_chan = self.info['nchan']
data_range = self.last_samp - self.first_samp + 1
s = ('%r' % os.path.basename(self.info['file_id']),
"n_channels x n_times : %s x %s" % (n_chan, data_range))
return "<RawEDF | %s>" % ', '.join(s)
def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
projector=None):
"""Read a chunk of raw data
Parameters
----------
start : int, (optional)
first sample to include (first is 0). If omitted, defaults to the
first sample in data.
stop : int, (optional)
First sample to not include.
If omitted, data is included to the end.
sel : array, optional
Indices of channels to select.
projector : array
SSP operator to apply to the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data : array, [channels x samples]
the data matrix (channels x samples).
times : array, [samples]
returns the time values corresponding to the samples.
"""
if sel is None:
sel = list(range(self.info['nchan']))
elif len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
return (666, 666)
if projector is not None:
raise NotImplementedError('Currently does not handle projections.')
if stop is None:
stop = self.last_samp + 1
elif stop > self.last_samp + 1:
stop = self.last_samp + 1
# Initial checks
start = int(start)
stop = int(stop)
sfreq = self.info['sfreq']
n_chan = self.info['nchan']
data_size = self._edf_info['data_size']
data_offset = self._edf_info['data_offset']
stim_channel = self._edf_info['stim_channel']
tal_channel = self._edf_info['tal_channel']
annot = self._edf_info['annot']
annotmap = self._edf_info['annotmap']
# this is used to deal with indexing in the middle of a sampling period
blockstart = int(floor(float(start) / sfreq) * sfreq)
blockstop = int(ceil(float(stop) / sfreq) * sfreq)
if blockstop > self.last_samp:
blockstop = self.last_samp + 1
if start >= stop:
raise ValueError('No data in this range')
logger.info('Reading %d ... %d = %9.3f ... %9.3f secs...' %
(start, stop - 1, start / float(sfreq),
(stop - 1) / float(sfreq)))
# gain constructor
physical_range = np.array([ch['range'] for ch in self.info['chs']])
cal = np.array([ch['cal'] for ch in self.info['chs']], float)
gains = np.atleast_2d(self._edf_info['units'] * (physical_range / cal))
with open(self.info['file_id'], 'rb') as fid:
# extract data
fid.seek(data_offset)
buffer_size = blockstop - blockstart
pointer = blockstart * n_chan * data_size
fid.seek(data_offset + pointer)
datas = np.zeros((n_chan, buffer_size), dtype=float)
blocks = int(ceil(float(buffer_size) / sfreq))
if 'n_samps' in self._edf_info:
n_samps = self._edf_info['n_samps']
# bdf data: 24bit data
if self._edf_info['subtype'] == '24BIT':
# loop over 10s increment to not tax the memory
buffer_step = int(sfreq * 10)
for k, block in enumerate(range(buffer_size, 0, -buffer_step)):
step = buffer_step
if block < step:
step = block
samp = int(step * n_chan * data_size)
blocks = int(ceil(float(step) / sfreq))
data = np.fromfile(fid, dtype=np.uint8, count=samp)
data = data.reshape(-1, 3).astype(np.int32)
# this converts to 24-bit little endian integer
# # no support in numpy
data = (data[:, 0] + (data[:, 1] << 8) +
(data[:, 2] << 16))
# 24th bit determines the sign
data[data >= (1 << 23)] -= (1 << 24)
data = data.reshape((int(sfreq), n_chan, blocks),
order='F')
for i in range(blocks):
start_pt = int((sfreq * i) + (k * buffer_step))
stop_pt = int(start_pt + sfreq)
datas[:, start_pt:stop_pt] = data[:, :, i].T
else:
# complicated edf: various sampling rates within file
if 'n_samps' in self._edf_info:
data = []
for i in range(blocks):
for samp in n_samps:
chan_data = np.fromfile(fid, dtype='<i2',
count=samp)
data.append(chan_data)
for j, samp in enumerate(n_samps):
chan_data = data[j::n_chan]
chan_data = np.hstack(chan_data)
if j == tal_channel:
# don't resample tal_channel,
# pad with zeros instead.
n_missing = int(sfreq - samp) * blocks
chan_data = np.hstack([chan_data, [0] * n_missing])
elif j == stim_channel and samp < sfreq:
if annot and annotmap or tal_channel is not None:
# don't bother with resampling the stim channel
# because it gets overwritten later on.
chan_data = np.zeros(sfreq)
else:
warnings.warn('Interpolating stim channel. '
'Events may jitter.')
oldrange = np.linspace(0, 1, samp * blocks + 1,
True)
newrange = np.linspace(0, 1, sfreq * blocks,
False)
chan_data = interp1d(oldrange,
np.append(chan_data, 0),
kind='zero')(newrange)
elif samp != sfreq:
mult = sfreq / samp
chan_data = resample(x=chan_data, up=mult,
down=1, npad=0)
stop_pt = chan_data.shape[0]
datas[j, :stop_pt] = chan_data
# simple edf
else:
data = np.fromfile(fid, dtype='<i2',
count=buffer_size * n_chan)
data = data.reshape((int(sfreq), n_chan, blocks),
order='F')
for i in range(blocks):
start_pt = int(sfreq * i)
stop_pt = int(start_pt + sfreq)
datas[:, start_pt:stop_pt] = data[:, :, i].T
datas *= gains.T
if stim_channel is not None:
if annot and annotmap:
datas[stim_channel] = 0
evts = _read_annot(annot, annotmap, sfreq, self.last_samp)
datas[stim_channel, :evts.size] = evts[start:stop]
elif tal_channel is not None:
evts = _parse_tal_channel(datas[tal_channel])
self._edf_info['events'] = evts
unique_annots = sorted(set([e[2] for e in evts]))
mapping = dict((a, n + 1) for n, a in enumerate(unique_annots))
datas[stim_channel] = 0
for t_start, t_duration, annotation in evts:
evid = mapping[annotation]
n_start = int(t_start * sfreq)
n_stop = int(t_duration * sfreq) + n_start - 1
# make sure events without duration get one sample
n_stop = n_stop if n_stop > n_start else n_start + 1
if any(datas[stim_channel][n_start:n_stop]):
raise NotImplementedError('EDF+ with overlapping '
'events not supported.')
datas[stim_channel][n_start:n_stop] = evid
else:
stim = np.array(datas[stim_channel], int)
mask = 255 * np.ones(stim.shape, int)
stim = np.bitwise_and(stim, mask)
datas[stim_channel] = stim
datastart = start - blockstart
datastop = stop - blockstart
datas = datas[sel, datastart:datastop]
logger.info('[done]')
times = np.arange(start, stop, dtype=float) / self.info['sfreq']
return datas, times
def _parse_tal_channel(tal_channel_data):
"""Parse time-stamped annotation lists (TALs) in stim_channel
and return list of events.
Parameters
----------
tal_channel_data : ndarray, shape = [n_samples]
channel data in EDF+ TAL format
Returns
-------
events : list
List of events. Each event contains [start, duration, annotation].
References
----------
http://www.edfplus.info/specs/edfplus.html#tal
"""
# convert tal_channel to an ascii string
tals = bytearray()
for s in tal_channel_data:
i = int(s)
tals.extend([i % 256, i // 256])
regex_tal = '([+-]\d+\.?\d*)(\x15(\d+\.?\d*))?(\x14.*?)\x14\x00'
tal_list = re.findall(regex_tal, tals.decode('ascii'))
events = []
for ev in tal_list:
onset = float(ev[0])
duration = float(ev[2]) if ev[2] else 0
for annotation in ev[3].split('\x14')[1:]:
if annotation:
events.append([onset, duration, annotation])
return events
def _get_edf_info(fname, stim_channel, annot, annotmap, tal_channel,
eog, misc, preload):
"""Extracts all the information from the EDF+,BDF file.
Parameters
----------
fname : str
Raw EDF+,BDF file to be read.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel.
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
tal_channel : int | None
The channel index (starting at 0).
Index of the channel containing EDF+ annotations.
-1 corresponds to the last channel.
If None, the annotation channel is not used.
Note: this is overruled by the annotation file if specified.
eog : list of str | None
Names of channels that should be designated EOG channels. Names should
correspond to the electrodes in the edf file. Default is None.
misc : list of str | None
Names of channels that should be designated MISC channels. Names
should correspond to the electrodes in the edf file. Default is None.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
Returns
-------
info : instance of Info
The measurement info.
edf_info : dict
A dict containing all the EDF+,BDF specific parameters.
"""
if eog is None:
eog = []
if misc is None:
misc = []
info = Info()
info['file_id'] = fname
# Add info for fif object
info['meas_id'] = None
info['projs'] = []
info['comps'] = []
info['bads'] = []
info['acq_pars'], info['acq_stim'] = None, None
info['filename'] = fname
info['ctf_head_t'] = None
info['dev_ctf_t'] = []
info['dig'] = None
info['dev_head_t'] = None
info['proj_id'] = None
info['proj_name'] = None
info['experimenter'] = None
info['line_freq'] = None
info['subject_info'] = None
edf_info = dict()
edf_info['annot'] = annot
edf_info['annotmap'] = annotmap
edf_info['events'] = []
with open(fname, 'rb') as fid:
assert(fid.tell() == 0)
fid.seek(8)
_ = fid.read(80).strip().decode() # subject id
_ = fid.read(80).strip().decode() # recording id
day, month, year = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
hour, minute, sec = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
date = datetime.datetime(year + 2000, month, day, hour, minute, sec)
info['meas_date'] = calendar.timegm(date.utctimetuple())
edf_info['data_offset'] = header_nbytes = int(fid.read(8).decode())
subtype = fid.read(44).strip().decode()[:5]
edf_info['subtype'] = subtype
edf_info['n_records'] = n_records = int(fid.read(8).decode())
# record length in seconds
edf_info['record_length'] = record_length = float(fid.read(8).decode())
info['nchan'] = nchan = int(fid.read(4).decode())
channels = list(range(info['nchan']))
ch_names = [fid.read(16).strip().decode() for _ in channels]
_ = [fid.read(80).strip().decode() for _ in channels] # transducer
units = [fid.read(8).strip().decode() for _ in channels]
for i, unit in enumerate(units):
if unit == 'uV':
units[i] = 1e-6
else:
units[i] = 1
edf_info['units'] = units
physical_min = np.array([float(fid.read(8).decode())
for _ in channels])
physical_max = np.array([float(fid.read(8).decode())
for _ in channels])
digital_min = np.array([float(fid.read(8).decode())
for _ in channels])
digital_max = np.array([float(fid.read(8).decode())
for _ in channels])
prefiltering = [fid.read(80).strip().decode() for _ in channels][:-1]
highpass = np.ravel([re.findall('HP:\s+(\w+)', filt)
for filt in prefiltering])
lowpass = np.ravel([re.findall('LP:\s+(\w+)', filt)
for filt in prefiltering])
high_pass_default = 0.
if highpass.size == 0:
info['highpass'] = high_pass_default
elif all(highpass):
if highpass[0] == 'NaN':
info['highpass'] = high_pass_default
elif highpass[0] == 'DC':
info['highpass'] = 0.
else:
info['highpass'] = float(highpass[0])
else:
info['highpass'] = float(np.min(highpass))
warnings.warn('%s' % ('Channels contain different highpass'
+ 'filters. Highest filter setting will'
+ 'be stored.'))
if lowpass.size == 0:
info['lowpass'] = None
elif all(lowpass):
if lowpass[0] == 'NaN':
info['lowpass'] = None
else:
info['lowpass'] = float(lowpass[0])
else:
info['lowpass'] = float(np.min(lowpass))
warnings.warn('%s' % ('Channels contain different lowpass filters.'
' Lowest filter setting will be stored.'))
# number of samples per record
n_samps = np.array([int(fid.read(8).decode()) for _ in channels])
if np.unique(n_samps).size != 1:
edf_info['n_samps'] = n_samps
if not preload:
raise RuntimeError('%s' % ('Channels contain different'
'sampling rates. '
'Must set preload=True'))
fid.read(32 * info['nchan']).decode() # reserved
assert fid.tell() == header_nbytes
physical_ranges = physical_max - physical_min
cals = digital_max - digital_min
# Some keys to be consistent with FIF measurement info
info['description'] = None
info['buffer_size_sec'] = 10.
info['orig_blocks'] = None
if edf_info['subtype'] == '24BIT':
edf_info['data_size'] = 3 # 24-bit (3 byte) integers
else:
edf_info['data_size'] = 2 # 16-bit (2 byte) integers
# Creates a list of dicts of eeg channels for raw.info
logger.info('Setting channel info structure...')
info['chs'] = []
info['ch_names'] = ch_names
if stim_channel == -1:
stim_channel = info['nchan'] - 1
for idx, ch_info in enumerate(zip(ch_names, physical_ranges, cals)):
ch_name, physical_range, cal = ch_info
chan_info = {}
chan_info['cal'] = cal
chan_info['logno'] = idx + 1
chan_info['scanno'] = idx + 1
chan_info['range'] = physical_range
chan_info['unit_mul'] = 0.
chan_info['ch_name'] = ch_name
chan_info['unit'] = FIFF.FIFF_UNIT_V
chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
chan_info['kind'] = FIFF.FIFFV_EEG_CH
chan_info['eeg_loc'] = np.zeros(3)
chan_info['loc'] = np.zeros(12)
if ch_name in eog or idx in eog or idx - nchan in eog:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['kind'] = FIFF.FIFFV_EOG_CH
if ch_name in misc or idx in misc or idx - nchan in misc:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['kind'] = FIFF.FIFFV_MISC_CH
check1 = stim_channel == ch_name
check2 = stim_channel == idx
check3 = info['nchan'] > 1
stim_check = np.logical_and(np.logical_or(check1, check2), check3)
if stim_check:
chan_info['range'] = 1
chan_info['cal'] = 1
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['kind'] = FIFF.FIFFV_STIM_CH
chan_info['ch_name'] = 'STI 014'
info['ch_names'][idx] = chan_info['ch_name']
if isinstance(stim_channel, str):
stim_channel = idx
info['chs'].append(chan_info)
edf_info['stim_channel'] = stim_channel
# sfreq defined as the max sampling rate of eeg
picks = pick_types(info, meg=False, eeg=True)
info['sfreq'] = n_samps[picks].max() / float(record_length)
edf_info['nsamples'] = int(n_records * info['sfreq'])
if info['lowpass'] is None:
info['lowpass'] = info['sfreq'] / 2.
# TODO: automatic detection of the tal_channel?
if tal_channel == -1:
edf_info['tal_channel'] = info['nchan'] - 1
else:
edf_info['tal_channel'] = tal_channel
if tal_channel and not preload:
raise RuntimeError('%s' % ('EDF+ Annotations (TAL) channel needs to be'
' parsed completely on loading.'
'Must set preload=True'))
return info, edf_info
def _read_annot(annot, annotmap, sfreq, data_length):
"""Annotation File Reader
Parameters
----------
annot : str
Path to annotation file.
annotmap : str
Path to annotation map file containing mapping from label to trigger.
sfreq : float
Sampling frequency.
data_length : int
Length of the data file.
Returns
-------
stim_channel : ndarray
An array containing stimulus trigger events.
"""
pat = '([+/-]\d+.\d+),(\w+)'
annot = open(annot).read()
triggers = re.findall(pat, annot)
times, values = zip(*triggers)
times = [float(time) * sfreq for time in times]
pat = '(\w+):(\d+)'
annotmap = open(annotmap).read()
mappings = re.findall(pat, annotmap)
maps = {}
for mapping in mappings:
maps[mapping[0]] = mapping[1]
triggers = [int(maps[value]) for value in values]
stim_channel = np.zeros(data_length)
for time, trigger in zip(times, triggers):
stim_channel[time] = trigger
return stim_channel
def read_raw_edf(input_fname, montage=None, eog=None, misc=None,
stim_channel=-1, annot=None, annotmap=None, tal_channel=None,
preload=False, verbose=None):
"""Reader function for EDF+, BDF conversion to FIF
Parameters
----------
input_fname : str
Path to the EDF+,BDF file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0).
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the electrodes in the
edf file. Default is None.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes in the
edf file. Default is None.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel (default).
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
tal_channel : int | None
The channel index (starting at 0).
Index of the channel containing EDF+ annotations.
-1 corresponds to the last channel.
If None, the annotation channel is not used.
Note: this is overruled by the annotation file if specified.
preload : bool
If True, all data are loaded at initialization.
If False, data are not read until save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawEDF(input_fname=input_fname, montage=montage, eog=eog, misc=misc,
stim_channel=stim_channel, annot=annot, annotmap=annotmap,
tal_channel=tal_channel, preload=preload, verbose=verbose)
|
{
"content_hash": "b084cc9070c2460754d4007d2d677133",
"timestamp": "",
"source": "github",
"line_count": 683,
"max_line_length": 79,
"avg_line_length": 40.825768667642755,
"alnum_prop": 0.5322407115191508,
"repo_name": "effigies/mne-python",
"id": "a3e535b691af02dd3106f69fc5063479a0dfec9a",
"size": "27884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/io/edf/edf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16734"
},
{
"name": "Makefile",
"bytes": "3645"
},
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "3718090"
},
{
"name": "Shell",
"bytes": "4057"
}
],
"symlink_target": ""
}
|
import ape.lib.basecommand
import os
import argparse
class Command(ape.lib.basecommand.BaseCommand):
prog_name = 'hello'
prog_description = 'Speak to an AI.'
def add_arguments(self):
self.parser.add_argument('bot',
default = 'chimpbot',
help = "Name of the bot to talk to. Available bots: chimpbot (default), eliza.",
nargs="?")
def call(self):
"""Start a conversation with a bot."""
bot = self.args.bot
if bot == "chimpbot":
print("\nType 'quit' to exit.\n")
from ape.lib.chimpbot import Chimpbot
bot = Chimpbot(os.path.dirname(os.path.realpath(__file__)) + '/../../data/default_dict.dat')
bot.talk()
# chimpbot.talk()
elif bot == 'eliza':
print("\nType '(quit)' to exit.\n")
import ape.lib.eliza
ape.lib.eliza.start()
else:
print('Who?')
if __name__ == "__main__":
Command().run('')
|
{
"content_hash": "35dbe165476d0fb3bb5d128a59db9566",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 104,
"avg_line_length": 25.70731707317073,
"alnum_prop": 0.5104364326375711,
"repo_name": "mrchimp/ape",
"id": "4323e96fd2846f3d36dc6b5ae765bbaadff21306",
"size": "1055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ape/commands/hello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45941"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
}
|
__version__=''' $Id $ '''
__doc__='''Superclass for renderers to factor out common functionality and default implementations.'''
from reportlab.graphics.shapes import *
from reportlab.lib.validators import DerivedValue
from reportlab import rl_config
def inverse(A):
"For A affine 2D represented as 6vec return 6vec version of A**(-1)"
# I checked this RGB
det = float(A[0]*A[3] - A[2]*A[1])
R = [A[3]/det, -A[1]/det, -A[2]/det, A[0]/det]
return tuple(R+[-R[0]*A[4]-R[2]*A[5],-R[1]*A[4]-R[3]*A[5]])
def mmult(A, B):
"A postmultiplied by B"
# I checked this RGB
# [a0 a2 a4] [b0 b2 b4]
# [a1 a3 a5] * [b1 b3 b5]
# [ 1 ] [ 1 ]
#
return (A[0]*B[0] + A[2]*B[1],
A[1]*B[0] + A[3]*B[1],
A[0]*B[2] + A[2]*B[3],
A[1]*B[2] + A[3]*B[3],
A[0]*B[4] + A[2]*B[5] + A[4],
A[1]*B[4] + A[3]*B[5] + A[5])
def getStateDelta(shape):
"""Used to compute when we need to change the graphics state.
For example, if we have two adjacent red shapes we don't need
to set the pen color to red in between. Returns the effect
the given shape would have on the graphics state"""
delta = {}
for prop, value in shape.getProperties().items():
if prop in STATE_DEFAULTS:
delta[prop] = value
return delta
class StateTracker:
"""Keeps a stack of transforms and state
properties. It can contain any properties you
want, but the keys 'transform' and 'ctm' have
special meanings. The getCTM()
method returns the current transformation
matrix at any point, without needing to
invert matrixes when you pop."""
def __init__(self, defaults=None):
# one stack to keep track of what changes...
self._deltas = []
# and another to keep track of cumulative effects. Last one in
# list is the current graphics state. We put one in to simplify
# loops below.
self._combined = []
if defaults is None:
defaults = STATE_DEFAULTS.copy()
#ensure that if we have a transform, we have a CTM
if 'transform' in defaults:
defaults['ctm'] = defaults['transform']
self._combined.append(defaults)
def push(self,delta):
"""Take a new state dictionary of changes and push it onto
the stack. After doing this, the combined state is accessible
through getState()"""
newstate = self._combined[-1].copy()
for key, value in delta.items():
if key == 'transform': #do cumulative matrix
newstate['transform'] = delta['transform']
newstate['ctm'] = mmult(self._combined[-1]['ctm'], delta['transform'])
#print 'statetracker transform = (%0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f)' % tuple(newstate['transform'])
#print 'statetracker ctm = (%0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f)' % tuple(newstate['ctm'])
else: #just overwrite it
newstate[key] = value
self._combined.append(newstate)
self._deltas.append(delta)
def pop(self):
"""steps back one, and returns a state dictionary with the
deltas to reverse out of wherever you are. Depending
on your back end, you may not need the return value,
since you can get the complete state afterwards with getState()"""
del self._combined[-1]
newState = self._combined[-1]
lastDelta = self._deltas[-1]
del self._deltas[-1]
#need to diff this against the last one in the state
reverseDelta = {}
#print 'pop()...'
for key, curValue in lastDelta.items():
#print ' key=%s, value=%s' % (key, curValue)
prevValue = newState[key]
if prevValue != curValue:
#print ' state popping "%s"="%s"' % (key, curValue)
if key == 'transform':
reverseDelta[key] = inverse(lastDelta['transform'])
else: #just return to previous state
reverseDelta[key] = prevValue
return reverseDelta
def getState(self):
"returns the complete graphics state at this point"
return self._combined[-1]
def getCTM(self):
"returns the current transformation matrix at this point"""
return self._combined[-1]['ctm']
def __getitem__(self,key):
"returns the complete graphics state value of key at this point"
return self._combined[-1][key]
def __setitem__(self,key,value):
"sets the complete graphics state value of key to value"
self._combined[-1][key] = value
def testStateTracker():
print('Testing state tracker')
defaults = {'fillColor':None, 'strokeColor':None,'fontName':None, 'transform':[1,0,0,1,0,0]}
from reportlab.graphics.shapes import _baseGFontName
deltas = [
{'fillColor':'red'},
{'fillColor':'green', 'strokeColor':'blue','fontName':_baseGFontName},
{'transform':[0.5,0,0,0.5,0,0]},
{'transform':[0.5,0,0,0.5,2,3]},
{'strokeColor':'red'}
]
st = StateTracker(defaults)
print('initial:', st.getState())
print()
for delta in deltas:
print('pushing:', delta)
st.push(delta)
print('state: ',st.getState(),'\n')
for delta in deltas:
print('popping:',st.pop())
print('state: ',st.getState(),'\n')
def _expandUserNode(node,canvas):
if isinstance(node, UserNode):
try:
if hasattr(node,'_canvas'):
ocanvas = 1
else:
node._canvas = canvas
ocanvas = None
onode = node
node = node.provideNode()
finally:
if not ocanvas: del onode._canvas
return node
def renderScaledDrawing(d):
renderScale = d.renderScale
if renderScale!=1.0:
o = d
d = d.__class__(o.width*renderScale,o.height*renderScale)
d.__dict__ = o.__dict__.copy()
d.scale(renderScale,renderScale)
d.renderScale = 1.0
return d
class Renderer:
"""Virtual superclass for graphics renderers."""
def __init__(self):
self._tracker = StateTracker()
self._nodeStack = [] #track nodes visited
def undefined(self, operation):
raise ValueError("%s operation not defined at superclass class=%s" %(operation, self.__class__))
def draw(self, drawing, canvas, x=0, y=0, showBoundary=rl_config._unset_):
"""This is the top level function, which draws the drawing at the given
location. The recursive part is handled by drawNode."""
#stash references for ease of communication
if showBoundary is rl_config._unset_: showBoundary=rl_config.showBoundary
self._canvas = canvas
canvas.__dict__['_drawing'] = self._drawing = drawing
drawing._parent = None
try:
#bounding box
if showBoundary: canvas.rect(x, y, drawing.width, drawing.height)
canvas.saveState()
self.initState(x,y) #this is the push()
self.drawNode(drawing)
self.pop()
canvas.restoreState()
finally:
#remove any circular references
del self._canvas, self._drawing, canvas._drawing, drawing._parent
def initState(self,x,y):
deltas = STATE_DEFAULTS.copy()
deltas['transform'] = [1,0,0,1,x,y]
self._tracker.push(deltas)
self.applyStateChanges(deltas, {})
def pop(self):
self._tracker.pop()
def drawNode(self, node):
"""This is the recursive method called for each node
in the tree"""
# Undefined here, but with closer analysis probably can be handled in superclass
self.undefined("drawNode")
def getStateValue(self, key):
"""Return current state parameter for given key"""
currentState = self._tracker._combined[-1]
return currentState[key]
def fillDerivedValues(self, node):
"""Examine a node for any values which are Derived,
and replace them with their calculated values.
Generally things may look at the drawing or their
parent.
"""
for key, value in node.__dict__.items():
if isinstance(value, DerivedValue):
#just replace with default for key?
#print ' fillDerivedValues(%s)' % key
newValue = value.getValue(self, key)
#print ' got value of %s' % newValue
node.__dict__[key] = newValue
def drawNodeDispatcher(self, node):
"""dispatch on the node's (super) class: shared code"""
canvas = getattr(self,'_canvas',None)
# replace UserNode with its contents
try:
node = _expandUserNode(node,canvas)
if not node: return
if hasattr(node,'_canvas'):
ocanvas = 1
else:
node._canvas = canvas
ocanvas = None
self.fillDerivedValues(node)
dtcb = getattr(node,'_drawTimeCallback',None)
if dtcb:
dtcb(node,canvas=canvas,renderer=self)
#draw the object, or recurse
if isinstance(node, Line):
self.drawLine(node)
elif isinstance(node, Image):
self.drawImage(node)
elif isinstance(node, Rect):
self.drawRect(node)
elif isinstance(node, Circle):
self.drawCircle(node)
elif isinstance(node, Ellipse):
self.drawEllipse(node)
elif isinstance(node, PolyLine):
self.drawPolyLine(node)
elif isinstance(node, Polygon):
self.drawPolygon(node)
elif isinstance(node, Path):
self.drawPath(node)
elif isinstance(node, String):
self.drawString(node)
elif isinstance(node, Group):
self.drawGroup(node)
elif isinstance(node, Wedge):
self.drawWedge(node)
else:
print('DrawingError','Unexpected element %s in drawing!' % str(node))
finally:
if not ocanvas: del node._canvas
_restores = {'stroke':'_stroke','stroke_width': '_lineWidth','stroke_linecap':'_lineCap',
'stroke_linejoin':'_lineJoin','fill':'_fill','font_family':'_font',
'font_size':'_fontSize'}
def drawGroup(self, group):
# just do the contents. Some renderers might need to override this
# if they need a flipped transform
canvas = getattr(self,'_canvas',None)
for node in group.getContents():
node = _expandUserNode(node,canvas)
if not node: continue
#here is where we do derived values - this seems to get everything. Touch wood.
self.fillDerivedValues(node)
try:
if hasattr(node,'_canvas'):
ocanvas = 1
else:
node._canvas = canvas
ocanvas = None
node._parent = group
self.drawNode(node)
finally:
del node._parent
if not ocanvas: del node._canvas
def drawWedge(self, wedge):
# by default ask the wedge to make a polygon of itself and draw that!
#print "drawWedge"
P = wedge.asPolygon()
if isinstance(P,Path):
self.drawPath(P)
else:
self.drawPolygon(P)
def drawPath(self, path):
polygons = path.asPolygons()
for polygon in polygons:
self.drawPolygon(polygon)
def drawRect(self, rect):
# could be implemented in terms of polygon
self.undefined("drawRect")
def drawLine(self, line):
self.undefined("drawLine")
def drawCircle(self, circle):
self.undefined("drawCircle")
def drawPolyLine(self, p):
self.undefined("drawPolyLine")
def drawEllipse(self, ellipse):
self.undefined("drawEllipse")
def drawPolygon(self, p):
self.undefined("drawPolygon")
def drawString(self, stringObj):
self.undefined("drawString")
def applyStateChanges(self, delta, newState):
"""This takes a set of states, and outputs the operators
needed to set those properties"""
self.undefined("applyStateChanges")
if __name__=='__main__':
print("this file has no script interpretation")
print(__doc__)
|
{
"content_hash": "92c326286375aedda6d6cadd6d73988f",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 123,
"avg_line_length": 35.90934844192635,
"alnum_prop": 0.5686336383717261,
"repo_name": "Distrotech/reportlab",
"id": "64a64cc8255463c7d79e33b3154e771007af0b90",
"size": "12869",
"binary": false,
"copies": "29",
"ref": "refs/heads/master",
"path": "src/reportlab/graphics/renderbase.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "721758"
},
{
"name": "C++",
"bytes": "668"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "2988317"
},
{
"name": "Shell",
"bytes": "2506"
}
],
"symlink_target": ""
}
|
import os
import logging.config
import pkg_resources
import enum
from functools import wraps
import pytoml as toml
import click
from .settings import settings
from .connect import Reddit # noqa: F401
__version__ = '0.1.0'
def _setup_logging():
try:
with open('logging.toml') as f:
raw = f.read()
except FileNotFoundError:
try:
with open(os.path.expanduser('~/.config/ion/logging.toml')) as f:
raw = f.read()
except FileNotFoundError:
raw = pkg_resources.resource_string(__name__, 'data/logging.toml')
conf = toml.loads(raw)
logging.config.dictConfig(conf)
_setup_logging()
# that's all of em. I'm sure we can trim it down a bit
OAUTH_SCOPE = [
'account',
'creddits',
'edit',
'flair',
'history',
'identity',
'livemanage',
'modconfig',
'modcontributors',
'modflair',
'modlog',
'modothers',
'modposts',
'modself',
'modwiki',
'mysubreddits',
'privatemessages',
'read',
'report',
'save',
'submit',
'subscribe',
'vote',
'wikiedit',
'wikiread',
]
@enum.unique
class AcceptFlag(enum.Enum):
OK = 1
OK_IF_WHITELISTED = 2
BAD = 3
def _update_settings(ctx, param, value):
for p in value:
settings.load(p)
def command(f):
@wraps(f)
def wrapper(*args, **kwargs):
g = click.command()(
click.option(
'-c', '--config', help='additional configuration file',
multiple=True, is_eager=True, expose_value=False,
callback=_update_settings, type=click.Path(exists=True))(f))
return g(*args, **kwargs)
return wrapper
|
{
"content_hash": "1f3e8b0a8016e760df4fc02bf68a8b43",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 78,
"avg_line_length": 20.152941176470588,
"alnum_prop": 0.5808523058960887,
"repo_name": "amici-ursi/ImagesOfNetwork",
"id": "5391333728a99dd841f997abb1d4e97b2470f6f1",
"size": "1713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "images_of/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67132"
}
],
"symlink_target": ""
}
|
"""Tests for converters for producing compute counter messages from
notification events.
"""
import copy
from oslotest import base
from ceilometer.compute.notifications import cpu
METRICS_UPDATE = {
u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650',
u'_context_quota_class': None,
u'event_type': u'compute.metrics.update',
u'_context_service_catalog': [],
u'_context_auth_token': None,
u'_context_user_id': None,
u'payload': {
u'metrics': [
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.frequency', 'value': 1600,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.user.time', 'value': 17421440000000L,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.kernel.time', 'value': 7852600000000L,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.idle.time', 'value': 1307374400000000L,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.iowait.time', 'value': 11697470000000L,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.user.percent', 'value': 0.012959045637294348,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.kernel.percent', 'value': 0.005841204961898534,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.idle.percent', 'value': 0.9724985141658965,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.iowait.percent', 'value': 0.008701235234910634,
'source': 'libvirt.LibvirtDriver'},
{'timestamp': u'2013-07-29T06:51:34.472416',
'name': 'cpu.percent', 'value': 0.027501485834103515,
'source': 'libvirt.LibvirtDriver'}],
u'nodename': u'tianst.sh.intel.com',
u'host': u'tianst',
u'host_id': u'10.0.1.1'},
u'priority': u'INFO',
u'_context_is_admin': True,
u'_context_user': None,
u'publisher_id': u'compute.tianst.sh.intel.com',
u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee',
u'_context_remote_address': None,
u'_context_roles': [],
u'timestamp': u'2013-07-29 06:51:34.474815',
u'_context_timestamp': u'2013-07-29T06:51:34.348091',
u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2',
u'_context_project_name': None,
u'_context_read_deleted': u'no',
u'_context_tenant': None,
u'_context_instance_lock_checked': False,
u'_context_project_id': None,
u'_context_user_name': None
}
RES_ID = '%s_%s' % (METRICS_UPDATE['payload']['host'],
METRICS_UPDATE['payload']['nodename'])
class TestMetricsNotifications(base.BaseTestCase):
def _process_notification(self, ic):
self.assertIn(METRICS_UPDATE['event_type'],
ic.event_types)
samples = list(ic.process_notification(METRICS_UPDATE))
self.assertEqual(RES_ID, samples[0].resource_id)
return samples[0]
def test_compute_metrics(self):
ERROR_METRICS = copy.copy(METRICS_UPDATE)
ERROR_METRICS['payload'] = {"metric_err": []}
ic = cpu.CpuFrequency(None)
info = ic._get_sample(METRICS_UPDATE, 'cpu.frequency')
info_none = ic._get_sample(METRICS_UPDATE, 'abc.efg')
info_error = ic._get_sample(ERROR_METRICS, 'cpu.frequency')
self.assertEqual('cpu.frequency', info['payload']['name'])
self.assertIsNone(info_none)
self.assertIsNone(info_error)
def test_compute_cpu_frequency(self):
c = self._process_notification(cpu.CpuFrequency(None))
self.assertEqual('compute.node.cpu.frequency', c.name)
self.assertEqual(1600, c.volume)
def test_compute_cpu_user_time(self):
c = self._process_notification(cpu.CpuUserTime(None))
self.assertEqual('compute.node.cpu.user.time', c.name)
self.assertEqual(17421440000000L, c.volume)
def test_compute_cpu_kernel_time(self):
c = self._process_notification(cpu.CpuKernelTime(None))
self.assertEqual('compute.node.cpu.kernel.time', c.name)
self.assertEqual(7852600000000L, c.volume)
def test_compute_cpu_idle_time(self):
c = self._process_notification(cpu.CpuIdleTime(None))
self.assertEqual('compute.node.cpu.idle.time', c.name)
self.assertEqual(1307374400000000L, c.volume)
def test_compute_cpu_iowait_time(self):
c = self._process_notification(cpu.CpuIowaitTime(None))
self.assertEqual('compute.node.cpu.iowait.time', c.name)
self.assertEqual(11697470000000L, c.volume)
def test_compute_cpu_kernel_percent(self):
c = self._process_notification(cpu.CpuKernelPercent(None))
self.assertEqual('compute.node.cpu.kernel.percent', c.name)
self.assertEqual(0.5841204961898534, c.volume)
def test_compute_cpu_idle_percent(self):
c = self._process_notification(cpu.CpuIdlePercent(None))
self.assertEqual('compute.node.cpu.idle.percent', c.name)
self.assertEqual(97.24985141658965, c.volume)
def test_compute_cpu_user_percent(self):
c = self._process_notification(cpu.CpuUserPercent(None))
self.assertEqual('compute.node.cpu.user.percent', c.name)
self.assertEqual(1.2959045637294348, c.volume)
def test_compute_cpu_iowait_percent(self):
c = self._process_notification(cpu.CpuIowaitPercent(None))
self.assertEqual('compute.node.cpu.iowait.percent', c.name)
self.assertEqual(0.8701235234910634, c.volume)
def test_compute_cpu_percent(self):
c = self._process_notification(cpu.CpuPercent(None))
self.assertEqual('compute.node.cpu.percent', c.name)
self.assertEqual(2.7501485834103515, c.volume)
|
{
"content_hash": "5a84da6ba4c3bee71277cddf55dd59c9",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 73,
"avg_line_length": 43.36363636363637,
"alnum_prop": 0.6308659893565554,
"repo_name": "Juniper/ceilometer",
"id": "09e91416dcb3c44904091aac3778edbd4472ea7b",
"size": "6773",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ceilometer/tests/compute/notifications/test_cpu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "6027"
},
{
"name": "Python",
"bytes": "2857750"
},
{
"name": "Shell",
"bytes": "4136"
}
],
"symlink_target": ""
}
|
import sys, os
sys.path = [os.path.join(os.getcwd(), "..", "..") ] + sys.path
from flapp.app import App
from flapp.glRenderer3D import glRenderer3D
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL import GLUT
class glInitObj:
def draw(self):
glEnable(GL_DEPTH_TEST)
class ScreenClearer:
def draw(self, renderer):
glClearColor(.5,.8,0.5,0.)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
def run():
windowWidth = 320
windowHeight = 280
app = App(windowWidth, windowHeight)
renderer = glRenderer3D()
renderer.camera.setPos(5, 5, 10)
renderer.addGlSetupObj(glInitObj())
renderer.addFrameSetupObj(ScreenClearer())
app.setRenderer(renderer)
app.initialize()
renderer.init(windowWidth, windowHeight)
glDisable(GL_LIGHTING)
class glBox:
def __init__(self):
self.hasDrawFunc=True
self.hasEraseDrawFunc=True
self.visible = True
def update(self, app, secs):
pass
def eraseDraw(self, app):
pass
def draw(self, renderer):
glClearColor(.8, .8, .8, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0, 0, 0)
GLUT.glutSolidCube(2)
box = glBox()
print "after make box"
app.addDynamicObject(box)
print "after adddyn"
app.drawBounds = 0
app.appDoesCollisionChecks = False
print "Running app"
app.run()
#app.runWithTwisted()
if __name__ == "__main__":
run()
|
{
"content_hash": "d89d808134b51db08b4172c0ec1c71e8",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 62,
"avg_line_length": 22.761194029850746,
"alnum_prop": 0.6131147540983607,
"repo_name": "rpwagner/tiled-display",
"id": "bd5747acabdaac20014faf4af61cea4367d3e974",
"size": "1525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flapp/test/test3D.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path
import time
from ambari_commons import constants
from resource_management.core import shell
from resource_management.core.source import Template
from resource_management.core.resources.system import File, Execute, Directory
from resource_management.core.resources.service import Service
from resource_management.libraries.functions import namenode_ha_utils
from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
from resource_management.libraries.functions import Direction
from ambari_commons import OSCheck, OSConst
from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
from utils import get_dfsadmin_base_command
from utils import set_up_zkfc_security
if OSCheck.is_windows_family():
from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from utils import service, safe_zkfc_op, is_previous_fs_image
from setup_ranger_hdfs import setup_ranger_hdfs, create_ranger_audit_hdfs_directories
import namenode_upgrade
def wait_for_safemode_off(hdfs_binary, afterwait_sleep=0, execute_kinit=False, retries=115, sleep_seconds=10):
"""
During NonRolling (aka Express Upgrade), after starting NameNode, which is still in safemode, and then starting
all of the DataNodes, we need for NameNode to receive all of the block reports and leave safemode.
If HA is present, then this command will run individually on each NameNode, which checks for its own address.
"""
import params
sleep_minutes = int(sleep_seconds * retries / 60)
Logger.info("Waiting up to {0} minutes for the NameNode to leave Safemode...".format(sleep_minutes))
if params.security_enabled and execute_kinit:
kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}")
Execute(kinit_command, user=params.hdfs_user, logoutput=True)
try:
# Note, this fails if namenode_address isn't prefixed with "params."
dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary, use_specific_namenode=True)
is_namenode_safe_mode_off = dfsadmin_base_command + " -safemode get | grep 'Safe mode is OFF'"
# Wait up to 30 mins
Execute(is_namenode_safe_mode_off, tries=retries, try_sleep=sleep_seconds,
user=params.hdfs_user, logoutput=True)
# Wait a bit more since YARN still depends on block reports coming in.
# Also saw intermittent errors with HBASE service check if it was done too soon.
time.sleep(afterwait_sleep)
except Fail:
Logger.error("The NameNode is still in Safemode. Please be careful with commands that need Safemode OFF.")
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
upgrade_suspended=False, env=None):
if action is None:
raise Fail('"action" parameter is required for function namenode().')
if action in ["start", "stop"] and hdfs_binary is None:
raise Fail('"hdfs_binary" parameter is required for function namenode().')
if action == "configure":
import params
#we need this directory to be present before any action(HA manual steps for
#additional namenode)
create_name_dirs(params.dfs_name_dir)
# set up failover / secure zookeper ACLs, this feature is supported from HDP 2.6 ownwards
set_up_zkfc_security(params)
elif action == "start":
Logger.info("Called service {0} with upgrade_type: {1}".format(action, str(upgrade_type)))
setup_ranger_hdfs(upgrade_type=upgrade_type)
import params
File(params.exclude_file_path,
content=Template("exclude_hosts_list.j2"),
owner=params.hdfs_user,
group=params.user_group
)
if params.hdfs_include_file:
File(params.include_file_path,
content=Template("include_hosts_list.j2"),
owner=params.hdfs_user,
group=params.user_group
)
pass
if do_format and not params.hdfs_namenode_format_disabled:
format_namenode()
pass
if params.dfs_ha_enabled and \
params.dfs_ha_namenode_standby is not None and \
(params.hostname == params.dfs_ha_namenode_standby or params.public_hostname == params.dfs_ha_namenode_standby):
# if the current host is the standby NameNode in an HA deployment
# run the bootstrap command, to start the NameNode in standby mode
# this requires that the active NameNode is already up and running,
# so this execute should be re-tried upon failure, up to a timeout
success = bootstrap_standby_namenode(params)
if not success:
raise Fail("Could not bootstrap standby namenode")
if upgrade_type == constants.UPGRADE_TYPE_ROLLING and params.dfs_ha_enabled:
# Most likely, ZKFC is up since RU will initiate the failover command. However, if that failed, it would have tried
# to kill ZKFC manually, so we need to start it if not already running.
safe_zkfc_op(action, env)
options = ""
if upgrade_type == constants.UPGRADE_TYPE_ROLLING:
if params.upgrade_direction == Direction.UPGRADE:
options = "-rollingUpgrade started"
elif params.upgrade_direction == Direction.DOWNGRADE:
options = "-rollingUpgrade downgrade"
elif upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING:
is_previous_image_dir = is_previous_fs_image()
Logger.info("Previous file system image dir present is {0}".format(str(is_previous_image_dir)))
if params.upgrade_direction == Direction.UPGRADE:
options = "-rollingUpgrade started"
elif params.upgrade_direction == Direction.DOWNGRADE:
options = "-rollingUpgrade downgrade"
elif upgrade_type == constants.UPGRADE_TYPE_HOST_ORDERED:
# nothing special to do for HOU - should be very close to a normal restart
pass
elif upgrade_type is None and upgrade_suspended is True:
# the rollingUpgrade flag must be passed in during a suspended upgrade when starting NN
if os.path.exists(namenode_upgrade.get_upgrade_in_progress_marker()):
options = "-rollingUpgrade started"
else:
Logger.info("The NameNode upgrade marker file {0} does not exist, yet an upgrade is currently suspended. "
"Assuming that the upgrade of NameNode has not occurred yet.".format(namenode_upgrade.get_upgrade_in_progress_marker()))
Logger.info("Options for start command are: {0}".format(options))
service(
action="start",
name="namenode",
user=params.hdfs_user,
options=options,
create_pid_dir=True,
create_log_dir=True
)
if params.security_enabled:
Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
user = params.hdfs_user)
# ___Scenario___________|_Expected safemode state__|_Wait for safemode OFF____|
# no-HA | ON -> OFF | Yes |
# HA and active | ON -> OFF | Yes |
# HA and standby | no change | No |
# RU with HA on active | ON -> OFF | Yes |
# RU with HA on standby | ON -> OFF | Yes |
# EU with HA on active | ON -> OFF | No |
# EU with HA on standby | ON -> OFF | No |
# EU non-HA | ON -> OFF | No |
# because we do things like create directories after starting NN,
# the vast majority of the time this should be True - it should only
# be False if this is HA and we are the Standby NN
ensure_safemode_off = True
# True if this is the only NameNode (non-HA) or if its the Active one in HA
is_active_namenode = True
if params.dfs_ha_enabled:
Logger.info("Waiting for the NameNode to broadcast whether it is Active or Standby...")
if is_this_namenode_active() is False:
# we are the STANDBY NN
is_active_namenode = False
# we are the STANDBY NN and this restart is not part of an upgrade
if upgrade_type is None:
ensure_safemode_off = False
# During an Express Upgrade, NameNode will not leave SafeMode until the DataNodes are started,
# so always disable the Safemode check
if upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING:
ensure_safemode_off = False
# some informative logging separate from the above logic to keep things a little cleaner
if ensure_safemode_off:
Logger.info("Waiting for this NameNode to leave Safemode due to the following conditions: HA: {0}, isActive: {1}, upgradeType: {2}".format(
params.dfs_ha_enabled, is_active_namenode, upgrade_type))
else:
Logger.info("Skipping Safemode check due to the following conditions: HA: {0}, isActive: {1}, upgradeType: {2}".format(
params.dfs_ha_enabled, is_active_namenode, upgrade_type))
# wait for Safemode to end
if ensure_safemode_off:
if params.rolling_restart and params.rolling_restart_safemode_exit_timeout:
calculated_retries = int(params.rolling_restart_safemode_exit_timeout) / 30
wait_for_safemode_off(hdfs_binary, afterwait_sleep=30, retries=calculated_retries, sleep_seconds=30)
else:
wait_for_safemode_off(hdfs_binary)
# Always run this on the "Active" NN unless Safemode has been ignored
# in the case where safemode was ignored (like during an express upgrade), then
# NN will be in SafeMode and cannot have directories created
if is_active_namenode and ensure_safemode_off:
create_hdfs_directories()
create_ranger_audit_hdfs_directories()
else:
Logger.info("Skipping creation of HDFS directories since this is either not the Active NameNode or we did not wait for Safemode to finish.")
elif action == "stop":
import params
service(
action="stop", name="namenode",
user=params.hdfs_user
)
elif action == "status":
import status_params
check_process_status(status_params.namenode_pid_file)
elif action == "decommission":
decommission()
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
upgrade_suspended=False, env=None):
if action is None:
raise Fail('"action" parameter is required for function namenode().')
if action in ["start", "stop"] and hdfs_binary is None:
raise Fail('"hdfs_binary" parameter is required for function namenode().')
if action == "configure":
pass
elif action == "start":
import params
#TODO: Replace with format_namenode()
namenode_format_marker = os.path.join(params.hadoop_conf_dir,"NN_FORMATTED")
if not os.path.exists(namenode_format_marker):
hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd"))
Execute("%s namenode -format" % (hadoop_cmd), logoutput=True)
open(namenode_format_marker, 'a').close()
Service(params.namenode_win_service_name, action=action)
elif action == "stop":
import params
Service(params.namenode_win_service_name, action=action)
elif action == "status":
import status_params
check_windows_service_status(status_params.namenode_win_service_name)
elif action == "decommission":
decommission()
def create_name_dirs(directories):
import params
dirs = directories.split(",")
Directory(dirs,
mode=0755,
owner=params.hdfs_user,
group=params.user_group,
create_parents = True,
cd_access="a",
)
def create_hdfs_directories():
import params
params.HdfsResource(params.hdfs_tmp_dir,
type="directory",
action="create_on_execute",
owner=params.hdfs_user,
mode=0777,
)
params.HdfsResource(params.smoke_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.smoke_user,
mode=params.smoke_hdfs_user_mode,
)
params.HdfsResource(None,
action="execute",
)
def format_namenode(force=None):
import params
old_mark_dir = params.namenode_formatted_old_mark_dirs
mark_dir = params.namenode_formatted_mark_dirs
dfs_name_dir = params.dfs_name_dir
hdfs_user = params.hdfs_user
hadoop_conf_dir = params.hadoop_conf_dir
if not params.dfs_ha_enabled:
if force:
ExecuteHadoop('namenode -format',
bin_dir=params.hadoop_bin_dir,
conf_dir=hadoop_conf_dir,
logoutput=True)
else:
if not is_namenode_formatted(params):
Execute(format("hdfs --config {hadoop_conf_dir} namenode -format -nonInteractive"),
user = params.hdfs_user,
path = [params.hadoop_bin_dir],
logoutput=True
)
for m_dir in mark_dir:
Directory(m_dir,
create_parents = True
)
else:
if params.dfs_ha_namenode_active is not None and \
(params.hostname == params.dfs_ha_namenode_active or params.public_hostname == params.dfs_ha_namenode_active):
# check and run the format command in the HA deployment scenario
# only format the "active" namenode in an HA deployment
if force:
ExecuteHadoop('namenode -format',
bin_dir=params.hadoop_bin_dir,
conf_dir=hadoop_conf_dir,
logoutput=True)
else:
nn_name_dirs = params.dfs_name_dir.split(',')
if not is_namenode_formatted(params):
try:
Execute(format("hdfs --config {hadoop_conf_dir} namenode -format -nonInteractive"),
user = params.hdfs_user,
path = [params.hadoop_bin_dir],
logoutput=True
)
except Fail:
# We need to clean-up mark directories, so we can re-run format next time.
for nn_name_dir in nn_name_dirs:
Execute(format("rm -rf {nn_name_dir}/*"),
user = params.hdfs_user,
)
raise
for m_dir in mark_dir:
Directory(m_dir,
create_parents = True
)
def is_namenode_formatted(params):
old_mark_dirs = params.namenode_formatted_old_mark_dirs
mark_dirs = params.namenode_formatted_mark_dirs
nn_name_dirs = params.dfs_name_dir.split(',')
marked = False
# Check if name directories have been marked as formatted
for mark_dir in mark_dirs:
if os.path.isdir(mark_dir):
marked = True
Logger.info(format("{mark_dir} exists. Namenode DFS already formatted"))
# Ensure that all mark dirs created for all name directories
if marked:
for mark_dir in mark_dirs:
Directory(mark_dir,
create_parents = True
)
return marked
# Move all old format markers to new place
for old_mark_dir in old_mark_dirs:
if os.path.isdir(old_mark_dir):
for mark_dir in mark_dirs:
Execute(('cp', '-ar', old_mark_dir, mark_dir),
sudo = True
)
marked = True
Directory(old_mark_dir,
action = "delete"
)
elif os.path.isfile(old_mark_dir):
for mark_dir in mark_dirs:
Directory(mark_dir,
create_parents = True,
)
Directory(old_mark_dir,
action = "delete"
)
marked = True
if marked:
return True
# Check if name dirs are not empty
for name_dir in nn_name_dirs:
code, out = shell.call(("ls", name_dir))
dir_exists_and_valid = bool(not code)
if not dir_exists_and_valid: # situations if disk exists but is crashed at the moment (ls: reading directory ...: Input/output error)
Logger.info(format("NameNode will not be formatted because the directory {name_dir} is missing or cannot be checked for content. {out}"))
return True
try:
Execute(format("ls {name_dir} | wc -l | grep -q ^0$"),
)
except Fail:
Logger.info(format("NameNode will not be formatted since {name_dir} exists and contains content"))
return True
return False
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def decommission():
import params
hdfs_user = params.hdfs_user
conf_dir = params.hadoop_conf_dir
user_group = params.user_group
nn_kinit_cmd = params.nn_kinit_cmd
File(params.exclude_file_path,
content=Template("exclude_hosts_list.j2"),
owner=hdfs_user,
group=user_group
)
if params.hdfs_include_file:
File(params.include_file_path,
content=Template("include_hosts_list.j2"),
owner=params.hdfs_user,
group=params.user_group
)
pass
if not params.update_files_only:
Execute(nn_kinit_cmd,
user=hdfs_user
)
if params.dfs_ha_enabled:
# due to a bug in hdfs, refreshNodes will not run on both namenodes so we
# need to execute each command scoped to a particular namenode
nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
else:
nn_refresh_cmd = format('dfsadmin -fs {namenode_address} -refreshNodes')
ExecuteHadoop(nn_refresh_cmd,
user=hdfs_user,
conf_dir=conf_dir,
bin_dir=params.hadoop_bin_dir)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def decommission():
import params
hdfs_user = params.hdfs_user
conf_dir = params.hadoop_conf_dir
File(params.exclude_file_path,
content=Template("exclude_hosts_list.j2"),
owner=hdfs_user
)
if params.hdfs_include_file:
File(params.include_file_path,
content=Template("include_hosts_list.j2"),
owner=params.hdfs_user
)
pass
if params.dfs_ha_enabled:
# due to a bug in hdfs, refreshNodes will not run on both namenodes so we
# need to execute each command scoped to a particular namenode
nn_refresh_cmd = format('cmd /c hadoop dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
else:
nn_refresh_cmd = format('cmd /c hadoop dfsadmin -fs {namenode_address} -refreshNodes')
Execute(nn_refresh_cmd, user=hdfs_user)
def bootstrap_standby_namenode(params, use_path=False):
mark_dirs = params.namenode_bootstrapped_mark_dirs
bin_path = os.path.join(params.hadoop_bin_dir, '') if use_path else ""
try:
iterations = 50
bootstrapped = False
bootstrap_cmd = format("{bin_path}hdfs namenode -bootstrapStandby -nonInteractive")
# Blue print based deployments start both NN in parallel and occasionally
# the first attempt to bootstrap may fail. Depending on how it fails the
# second attempt may not succeed (e.g. it may find the folder and decide that
# bootstrap succeeded). The solution is to call with -force option but only
# during initial start
if params.command_phase == "INITIAL_START":
# force bootstrap in INITIAL_START phase
bootstrap_cmd = format("{bin_path}hdfs namenode -bootstrapStandby -nonInteractive -force")
elif is_namenode_bootstrapped(params):
# Once out of INITIAL_START phase bootstrap only if we couldnt bootstrap during cluster deployment
return True
Logger.info("Boostrapping standby namenode: %s" % (bootstrap_cmd))
for i in range(iterations):
Logger.info('Try %d out of %d' % (i+1, iterations))
code, out = shell.call(bootstrap_cmd, logoutput=False, user=params.hdfs_user)
if code == 0:
Logger.info("Standby namenode bootstrapped successfully")
bootstrapped = True
break
elif code == 5:
Logger.info("Standby namenode already bootstrapped")
bootstrapped = True
break
else:
Logger.warning('Bootstrap standby namenode failed with %d error code. Will retry' % (code))
except Exception as ex:
Logger.error('Bootstrap standby namenode threw an exception. Reason %s' %(str(ex)))
if bootstrapped:
for mark_dir in mark_dirs:
Directory(mark_dir,
create_parents = True
)
return bootstrapped
def is_namenode_bootstrapped(params):
mark_dirs = params.namenode_bootstrapped_mark_dirs
nn_name_dirs = params.dfs_name_dir.split(',')
marked = False
# Check if name directories have been marked as formatted
for mark_dir in mark_dirs:
if os.path.isdir(mark_dir):
marked = True
Logger.info(format("{mark_dir} exists. Standby Namenode already bootstrapped"))
break
# Ensure that all mark dirs created for all name directories
if marked:
for mark_dir in mark_dirs:
Directory(mark_dir,
create_parents = True
)
return marked
def find_timeout():
import params
if isinstance(params.command_timeout, (int, long)):
return params.command_timeout
return int(params.command_timeout)
@retry(sleep_time=5, backoff_factor=2, err_class=Fail, timeout_func=find_timeout)
def is_this_namenode_active():
"""
Gets whether the current NameNode is Active. This function will wait until the NameNode is
listed as being either Active or Standby before returning a value. This is to ensure that
that if the other NameNode is Active, we ensure that this NameNode has fully loaded and
registered in the event that the other NameNode is going to be restarted. This prevents
a situation where we detect the other NameNode as Active before this NameNode has fully booted.
If the other Active NameNode is then restarted, there can be a loss of service if this
NameNode has not entered Standby.
"""
import params
# returns ([('nn1', 'c6401.ambari.apache.org:50070')], [('nn2', 'c6402.ambari.apache.org:50070')], [])
# 0 1 2
# or
# returns ([], [('nn1', 'c6401.ambari.apache.org:50070')], [('nn2', 'c6402.ambari.apache.org:50070')], [])
# 0 1 2
#
namenode_states = namenode_ha_utils.get_namenode_states(params.hdfs_site, params.security_enabled,
params.hdfs_user, times=5, sleep_time=5, backoff_factor=2)
# unwraps [('nn1', 'c6401.ambari.apache.org:50070')]
active_namenodes = [] if len(namenode_states[0]) < 1 else namenode_states[0]
# unwraps [('nn2', 'c6402.ambari.apache.org:50070')]
standby_namenodes = [] if len(namenode_states[1]) < 1 else namenode_states[1]
# check to see if this is the active NameNode
for entry in active_namenodes:
if params.namenode_id in entry:
return True
# if this is not the active NameNode, then we must wait for it to register as standby
for entry in standby_namenodes:
if params.namenode_id in entry:
return False
# this this point, this NameNode is neither active nor standby - we must wait to ensure it
# enters at least one of these roles before returning a verdict - the annotation will catch
# this failure and retry the fuction automatically
raise Fail(format("The NameNode {namenode_id} is not listed as Active or Standby, waiting..."))
|
{
"content_hash": "54c1a03ff275f648753b1b1176399689",
"timestamp": "",
"source": "github",
"line_count": 611,
"max_line_length": 146,
"avg_line_length": 39.91653027823241,
"alnum_prop": 0.6637418508343925,
"repo_name": "radicalbit/ambari",
"id": "cac6e9c89295118845dc3bba0936140081d42780",
"size": "24389",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "42212"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "1287531"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "88056"
},
{
"name": "HTML",
"bytes": "5098825"
},
{
"name": "Java",
"bytes": "29006663"
},
{
"name": "JavaScript",
"bytes": "17274453"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "314333"
},
{
"name": "PowerShell",
"bytes": "2087991"
},
{
"name": "Python",
"bytes": "14584206"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "14478"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "741459"
},
{
"name": "Vim script",
"bytes": "5813"
}
],
"symlink_target": ""
}
|
import numpy as np
import warnings
from ._base import NeighborsBase
from ._base import KNeighborsMixin
from ..base import OutlierMixin
from numbers import Real
from ..utils._param_validation import Interval, StrOptions
from ..utils.metaestimators import available_if
from ..utils.validation import check_is_fitted
from ..utils import check_array
__all__ = ["LocalOutlierFactor"]
class LocalOutlierFactor(KNeighborsMixin, OutlierMixin, NeighborsBase):
"""Unsupervised Outlier Detection using the Local Outlier Factor (LOF).
The anomaly score of each sample is called the Local Outlier Factor.
It measures the local deviation of the density of a given sample with respect
to its neighbors.
It is local in that the anomaly score depends on how isolated the object
is with respect to the surrounding neighborhood.
More precisely, locality is given by k-nearest neighbors, whose distance
is used to estimate the local density.
By comparing the local density of a sample to the local densities of its
neighbors, one can identify samples that have a substantially lower density
than their neighbors. These are considered outliers.
.. versionadded:: 0.19
Parameters
----------
n_neighbors : int, default=20
Number of neighbors to use by default for :meth:`kneighbors` queries.
If n_neighbors is larger than the number of samples provided,
all samples will be used.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf is size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`, in which
case only "nonzero" elements may be considered neighbors.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
p : int, default=2
Parameter for the Minkowski metric from
:func:`sklearn.metrics.pairwise.pairwise_distances`. When p = 1, this
is equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
contamination : 'auto' or float, default='auto'
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. When fitting this is used to define the
threshold on the scores of the samples.
- if 'auto', the threshold is determined as in the
original paper,
- if a float, the contamination should be in the range (0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
novelty : bool, default=False
By default, LocalOutlierFactor is only meant to be used for outlier
detection (novelty=False). Set novelty to True if you want to use
LocalOutlierFactor for novelty detection. In this case be aware that
you should only use predict, decision_function and score_samples
on new unseen data and not on the training set; and note that the
results obtained this way may differ from the standard LOF results.
.. versionadded:: 0.20
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
negative_outlier_factor_ : ndarray of shape (n_samples,)
The opposite LOF of the training samples. The higher, the more normal.
Inliers tend to have a LOF score close to 1
(``negative_outlier_factor_`` close to -1), while outliers tend to have
a larger LOF score.
The local outlier factor (LOF) of a sample captures its
supposed 'degree of abnormality'.
It is the average of the ratio of the local reachability density of
a sample and those of its k-nearest neighbors.
n_neighbors_ : int
The actual number of neighbors used for :meth:`kneighbors` queries.
offset_ : float
Offset used to obtain binary labels from the raw scores.
Observations having a negative_outlier_factor smaller than `offset_`
are detected as abnormal.
The offset is set to -1.5 (inliers score around -1), except when a
contamination parameter different than "auto" is provided. In that
case, the offset is defined in such a way we obtain the expected
number of outliers in training.
.. versionadded:: 0.20
effective_metric_ : str
The effective metric used for the distance computation.
effective_metric_params_ : dict
The effective additional keyword arguments for the metric function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
It is the number of samples in the fitted data.
See Also
--------
sklearn.svm.OneClassSVM: Unsupervised Outlier Detection using
Support Vector Machine.
References
----------
.. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
LOF: identifying density-based local outliers. In ACM sigmod record.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import LocalOutlierFactor
>>> X = [[-1.1], [0.2], [101.1], [0.3]]
>>> clf = LocalOutlierFactor(n_neighbors=2)
>>> clf.fit_predict(X)
array([ 1, 1, -1, 1])
>>> clf.negative_outlier_factor_
array([ -0.9821..., -1.0370..., -73.3697..., -0.9821...])
"""
_parameter_constraints: dict = {
**NeighborsBase._parameter_constraints,
"contamination": [
StrOptions({"auto"}),
Interval(Real, 0, 0.5, closed="right"),
],
"novelty": ["boolean"],
}
_parameter_constraints.pop("radius")
def __init__(
self,
n_neighbors=20,
*,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
contamination="auto",
novelty=False,
n_jobs=None,
):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.contamination = contamination
self.novelty = novelty
def _check_novelty_fit_predict(self):
if self.novelty:
msg = (
"fit_predict is not available when novelty=True. Use "
"novelty=False if you want to predict on the training set."
)
raise AttributeError(msg)
return True
@available_if(_check_novelty_fit_predict)
def fit_predict(self, X, y=None):
"""Fit the model to the training set X and return the labels.
**Not available for novelty detection (when novelty is set to True).**
Label is 1 for an inlier and -1 for an outlier according to the LOF
score and the contamination parameter.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
# As fit_predict would be different from fit.predict, fit_predict is
# only available for outlier detection (novelty=False)
return self.fit(X)._predict()
def fit(self, X, y=None):
"""Fit the local outlier factor detector from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : LocalOutlierFactor
The fitted local outlier factor detector.
"""
self._validate_params()
self._fit(X)
n_samples = self.n_samples_fit_
if self.n_neighbors > n_samples:
warnings.warn(
"n_neighbors (%s) is greater than the "
"total number of samples (%s). n_neighbors "
"will be set to (n_samples - 1) for estimation."
% (self.n_neighbors, n_samples)
)
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
self._distances_fit_X_, _neighbors_indices_fit_X_ = self.kneighbors(
n_neighbors=self.n_neighbors_
)
self._lrd = self._local_reachability_density(
self._distances_fit_X_, _neighbors_indices_fit_X_
)
# Compute lof score over training samples to define offset_:
lrd_ratios_array = (
self._lrd[_neighbors_indices_fit_X_] / self._lrd[:, np.newaxis]
)
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
if self.contamination == "auto":
# inliers score around -1 (the higher, the less abnormal).
self.offset_ = -1.5
else:
self.offset_ = np.percentile(
self.negative_outlier_factor_, 100.0 * self.contamination
)
return self
def _check_novelty_predict(self):
if not self.novelty:
msg = (
"predict is not available when novelty=False, use "
"fit_predict if you want to predict on training data. Use "
"novelty=True if you want to use LOF for novelty detection "
"and predict on new unseen data."
)
raise AttributeError(msg)
return True
@available_if(_check_novelty_predict)
def predict(self, X=None):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
**Only available for novelty detection (when novelty is set to True).**
This method allows to generalize prediction to *new observations* (not
in the training set). Note that the result of ``clf.fit(X)`` then
``clf.predict(X)`` with ``novelty=True`` may differ from the result
obtained by ``clf.fit_predict(X)`` with ``novelty=False``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
return self._predict(X)
def _predict(self, X=None):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
If X is None, returns the same as fit_predict(X_train).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples. If None, makes prediction on the
training data without considering them as their own neighbors.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self)
if X is not None:
X = check_array(X, accept_sparse="csr")
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self.decision_function(X) < 0] = -1
else:
is_inlier = np.ones(self.n_samples_fit_, dtype=int)
is_inlier[self.negative_outlier_factor_ < self.offset_] = -1
return is_inlier
def _check_novelty_decision_function(self):
if not self.novelty:
msg = (
"decision_function is not available when novelty=False. "
"Use novelty=True if you want to use LOF for novelty "
"detection and compute decision_function for new unseen "
"data. Note that the opposite LOF of the training samples "
"is always available by considering the "
"negative_outlier_factor_ attribute."
)
raise AttributeError(msg)
return True
@available_if(_check_novelty_decision_function)
def decision_function(self, X):
"""Shifted opposite of the Local Outlier Factor of X.
Bigger is better, i.e. large values correspond to inliers.
**Only available for novelty detection (when novelty is set to True).**
The shift offset allows a zero threshold for being an outlier.
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
shifted_opposite_lof_scores : ndarray of shape (n_samples,)
The shifted opposite of the Local Outlier Factor of each input
samples. The lower, the more abnormal. Negative scores represent
outliers, positive scores represent inliers.
"""
return self.score_samples(X) - self.offset_
def _check_novelty_score_samples(self):
if not self.novelty:
msg = (
"score_samples is not available when novelty=False. The "
"scores of the training samples are always available "
"through the negative_outlier_factor_ attribute. Use "
"novelty=True if you want to use LOF for novelty detection "
"and compute score_samples for new unseen data."
)
raise AttributeError(msg)
return True
@available_if(_check_novelty_score_samples)
def score_samples(self, X):
"""Opposite of the Local Outlier Factor of X.
It is the opposite as bigger is better, i.e. large values correspond
to inliers.
**Only available for novelty detection (when novelty is set to True).**
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point. Because of this, the scores obtained via ``score_samples`` may
differ from the standard LOF scores.
The standard LOF scores for the training data is available via the
``negative_outlier_factor_`` attribute.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : ndarray of shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse="csr")
distances_X, neighbors_indices_X = self.kneighbors(
X, n_neighbors=self.n_neighbors_
)
X_lrd = self._local_reachability_density(distances_X, neighbors_indices_X)
lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]
# as bigger is better:
return -np.mean(lrd_ratios_array, axis=1)
def _local_reachability_density(self, distances_X, neighbors_indices):
"""The local reachability density (LRD)
The LRD of a sample is the inverse of the average reachability
distance of its k-nearest neighbors.
Parameters
----------
distances_X : ndarray of shape (n_queries, self.n_neighbors)
Distances to the neighbors (in the training samples `self._fit_X`)
of each query point to compute the LRD.
neighbors_indices : ndarray of shape (n_queries, self.n_neighbors)
Neighbors indices (of each query point) among training samples
self._fit_X.
Returns
-------
local_reachability_density : ndarray of shape (n_queries,)
The local reachability density of each sample.
"""
dist_k = self._distances_fit_X_[neighbors_indices, self.n_neighbors_ - 1]
reach_dist_array = np.maximum(distances_X, dist_k)
# 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_:
return 1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10)
|
{
"content_hash": "12e52fb26bf58183d0bf588b5c9f1427",
"timestamp": "",
"source": "github",
"line_count": 493,
"max_line_length": 86,
"avg_line_length": 38.62068965517241,
"alnum_prop": 0.6169642857142857,
"repo_name": "vinayak-mehta/scikit-learn",
"id": "ecfb1712f85039b781e0439558df761394d01801",
"size": "19197",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sklearn/neighbors/_lof.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "668672"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10468304"
},
{
"name": "Shell",
"bytes": "41758"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import sys
import FSEvents
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
header = """# -*- coding: utf-8 -*-
# File generated by watchdog/scripts/dump_mac_constants.py
class Constants(object):
"""
def dump_constants(header):
output = StringIO()
output.write(header)
for attribute in dir(FSEvents):
value = getattr(FSEvents, attribute)
if attribute.startswith('k') and isinstance(value, int):
output.write(" %s = %s\n" % (attribute, hex(value)))
content = output.getvalue()
output.close()
return content
def write_constants_to_file(filename):
content = dump_constants(header)
with open(filename, 'wb') as f:
f.write(content)
if __name__ == "__main__":
if len(sys.argv) > 1:
output_file = sys.argv[1]
else:
print("Usage: scripts/dump_mac_constants.py <output_file>")
sys.exit(1)
write_constants_to_file(output_file)
|
{
"content_hash": "efb68a1a767f07161f9aa47b7f0af341",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 67,
"avg_line_length": 23.630434782608695,
"alnum_prop": 0.6384544618215271,
"repo_name": "teleyinex/watchdog",
"id": "3d27459cfefd58a70650a88ae338101c7a15049f",
"size": "1130",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tools/dump_fsevents_constants.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "19969"
},
{
"name": "Makefile",
"bytes": "2497"
},
{
"name": "Python",
"bytes": "251908"
},
{
"name": "Shell",
"bytes": "846"
}
],
"symlink_target": ""
}
|
from collections import deque
from re import sub, UNICODE
from earwigbot.wiki.copyvios.markov import EMPTY_INTERSECTION
from markupsafe import escape
__all__ = ["highlight_delta"]
def highlight_delta(context, chain, delta):
degree = chain.degree - 1
highlights = [False] * degree
block = deque([chain.START] * degree)
if not delta:
delta = EMPTY_INTERSECTION
for word in chain.text.split() + ([chain.END] * degree):
word = _strip_word(chain, word)
block.append(word)
if tuple(block) in delta.chain:
highlights[-1 * degree:] = [True] * degree
highlights.append(True)
else:
highlights.append(False)
block.popleft()
i = degree
numwords = len(chain.text.split())
result = []
paragraphs = deque(chain.text.split("\n"))
while paragraphs:
words = []
for i, word in enumerate(_get_next(paragraphs), i):
if highlights[i]:
before = highlights[i - 1]
after = highlights[i + 1]
first = i == degree
last = i - degree + 1 == numwords
words.append(_highlight_word(word, before, after, first, last))
else:
words.append(unicode(escape(word)))
result.append(u" ".join(words))
i += 1
return u"<br /><br />".join(result)
def _get_next(paragraphs):
body = []
while paragraphs and not body:
body = paragraphs.popleft().split()
if body and len(body) <= 3:
while paragraphs:
next = paragraphs[0].split()
if len(next) <= 3:
body += next
paragraphs.popleft()
else:
break
return body
def _highlight_word(word, before, after, first, last):
if before and after:
# Word is in the middle of a highlighted block:
res = unicode(escape(word))
if first:
res = u'<span class="cv-hl">' + res
if last:
res += u'</span>'
elif after:
# Word is the first in a highlighted block:
res = u'<span class="cv-hl">' + _fade_word(word, u"in")
if last:
res += u"</span>"
elif before:
# Word is the last in a highlighted block:
res = _fade_word(word, u"out") + u"</span>"
if first:
res = u'<span class="cv-hl">' + res
else:
res = unicode(escape(word))
return res
def _fade_word(word, dir):
if len(word) <= 4:
word = unicode(escape(word))
return u'<span class="cv-hl-{0}">{1}</span>'.format(dir, word)
if dir == u"out":
before, after = unicode(escape(word[:-4])), unicode(escape(word[-4:]))
base = u'{0}<span class="cv-hl-out">{1}</span>'
return base.format(before, after)
else:
before, after = unicode(escape(word[:4])), unicode(escape(word[4:]))
base = u'<span class="cv-hl-in">{0}</span>{1}'
return base.format(before, after)
def _strip_word(chain, word):
if word == chain.START or word == chain.END:
return word
return sub("[^\w\s-]", "", word.lower(), flags=UNICODE)
|
{
"content_hash": "0bfc5081af7624776a24bc8c8b11c395",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 79,
"avg_line_length": 32.71134020618557,
"alnum_prop": 0.5455404979514655,
"repo_name": "earwig/copyvios",
"id": "a3fb21f6b29af02176ed2fede74ea061ec931982",
"size": "3199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "copyvios/highlighter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10387"
},
{
"name": "JavaScript",
"bytes": "4819"
},
{
"name": "Mako",
"bytes": "49824"
},
{
"name": "Python",
"bytes": "41989"
}
],
"symlink_target": ""
}
|
from math import log2 #For converting numbers to log base 2
'''PIPE TO EXTERNAL FILE WITH > filename.txt'''
letters = 'abcdefghijklmnopqrstuvwxyz'
'''File to read in data from, change this name to read from other files'''
file_name = "typos20.data"
test_file = "typos20Test.data"
'''
NOTE: Spaces are uncorrupted. Words always have the same number of letters and transition to spaces at the end of the word
'''
#Converts input file in format of columns 1 = correct word, columns 2 = space, column 3 = wrong word. One letter column, words separated by "_ _"
#Retuns two lists, first list is words in first column, second list is words in second column
def data_parser(name):
#Store columns
first_col = []
second_col = []
#Temporarily store words as they are built
word1 = ""
word2 = ""
#Emission dict
#Dictionary that stores the intended letter as key, and observed letters with frequencies as value
emis_freq = {}
#Fill dictionary with dictionaries, and those with letter entries (init to 0)
for i in letters:
emis_freq[i] = {}
for j in letters:
emis_freq[i][j] = 0
#Transition dictionary
#Dictionary that stores the first letter (t) as the key, and second letter (t+1) as the second key with frequencies as value
tran_freq = {}
#Fill dictionary with dictionaries, and those with letter entries (init to 0)
for i in (letters+"_"):
tran_freq[i] = {}
for j in (letters+"_"):
tran_freq[i][j] = 0
#Initial dictionary
#Dictionary to store frequency that a letter occurs in the first col (hidden, actual)
init_freq = {}
#Fill dictionary with letter entries (init to 0)
for i in (letters+"_"):
init_freq[i] = 0
#Open the file
with open(name,"r") as data_in:
#Store the last char
last_char = ""
#Bool to see if this is the rist char
first_char = True
#Iterate through the file line by line
for i in data_in.readlines():
#Initial
#Increment the first col characters frequency in the intial dict
init_freq[i[0]] += 1
#Transition
#Make sure this isn't the first
if first_char:
first_char = False
#Otherwise add to the transition frequency dict
else:
tran_freq[last_char][i[0]] += 1
#Set the last char to be the current first col char that we have added to the dict
last_char = i[0]
#Check if this line is a separation between words ("_")
if i[0] == "_":
#Append word to list of words
first_col.append(word1)
second_col.append(word2)
#Reset temperary word storage
word1 = ""
word2 = ""
#Otherwise line is letter
else:
#Append letters to their temporary storage containers
word1 += i[0]
word2 += i[2]
if i[2] in emis_freq[i[0]]:
emis_freq[i[0]][i[2]] += 1
else:
emis_freq[i[0]][i[2]] = 1
#Cleanup since data file doesn't end in a "_ _" line
first_col.append(word1)
second_col.append(word2)
'''Emission Calulations'''
#Add entry to dict 'tot' that holds the total number of times the letter appears
#Iterate through keys (actual letters)
for i in emis_freq:
#Reset total
tot = 0
#Iterate through evidence keys for letter i
for j in emis_freq[i]:
tot += emis_freq[i][j]
#Add 'tot' entry to dict
emis_freq[i]["tot"] = tot
#Now take this data (total) and create a probability dictionary
emis_prob = {}
#Iterate through keys (actual letters)
for i in emis_freq:
#Create dictionary for this actual letter in new dict
emis_prob[i] = {}
#Iterate through evidence keys for letter i
for j in emis_freq[i]:
#Add one to the numerator and 26 (num of letters) to the denominator
emis_prob[i][j] = (emis_freq[i][j]+1)/(emis_freq[i]["tot"]+26)
#Add the very small, basically 0 chance of a "_" getting in the mix (chance is 0 in reality)
emis_prob[i]["_"] = 1/(emis_freq[i]["tot"]+26)
#Remove 'tot' key from probability dict
del emis_prob[i]["tot"]
'''Spaces are immutable, uncorruptable beasts, and have an emission probability of 1. They are not counted'''
emis_prob['_'] = {}
emis_prob['_']['_'] = 0.9999999999999999
for i in letters:
emis_prob['_'][i] = 0.0000000000000001
'''Transition Calulations'''
#Add entry to dict 'tot' that holds the total number of times the letter appears
#Iterate through keys (actual letters)
for i in tran_freq:
#Reset total
tot = 0
#Iterate through evidence keys for letter i
for j in tran_freq[i]:
tot += tran_freq[i][j]
#Add 'tot' entry to dict
tran_freq[i]["tot"] = tot
#Now take this data (total) and create a probability dictionary
tran_prob = {}
#Iterate through keys (actual letters)
for i in tran_freq:
#Create dictionary for this actual letter in new dict
tran_prob[i] = {}
#Iterate through evidence keys for letter i
for j in tran_freq[i]:
#Add one to the numerator and 27 (num of letters + '_') to the denominator
tran_prob[i][j] = (tran_freq[i][j]+1)/(tran_freq[i]["tot"]+27)
#Remove 'tot' key from probability dict
del tran_prob[i]["tot"]
'''Initial Calculations'''
#Count the total number of characters in the first col (hidden)
tot = 0
for i in init_freq:
tot += init_freq[i]
#Dict that stores the probabilities of each letter
init_prob = {}
for i in init_freq:
init_prob[i] = (init_freq[i]/tot)#(init_freq[i]/len("_".join(first_col)))
#Return both lists as and probability dtionary
return first_col,second_col,emis_prob,tran_prob,init_prob
#Viterbi algorithm, returns final prob of getting to end and likely route (sequence of letters)
#Takes in: Evid (observed state sequence, one giant string with underscores for spaces), hidd (list of hidden states, eg. list of possible letters), star (dict of starting probabilities), tran (transition probability dict), emis (emission probability dict)
#Tran must be in format tran[prev][cur]
#Emis must be in format emis[hidden][observed]
def furby(evid, hidd, star, tran, emis):
'''Spaces have a 1.0 emission prob, since they are uncorrupted'''
'''Use math libraries log2 to convert to log base 2 for math. Convert back with math libraries pow(2, num) if desired'''
'''Log2 can still use max. log2(0.8) > log2(0.2)'''
#Create list that uses the time as the index and the value is a dict to store probability
P = [{}]
#Create a dict for the path
path = {}
#Create dict for t(0) (seed dict with inital entries)
#Iterate through start dict (Contains all states that sequence can start with)
for i in star:
#Calculate probability with start[letter]*emission (add instead of multiply with log numbers)
P[0][i] = log2(star[i])+log2(emis[i][evid[0]])
path[i] = [i]
#Run for t > 1, start at second letter
for i in range(1,len(evid)):
#Create new dict at end of list of dicts (dict for each time value)
P.append({})
#Dict to temporarily store path for this iteration
temp_path = {}
#Iterate through all possible states that are connected to the previous state chosen
for j in hidd:
#Use list comprehension to iterate through states, calculate trans*emis*P[t-1] for each possible state, find max and store that in path
(prob, state) = max((P[i-1][k] + log2(tran[k][j]) + log2(emis[j][evid[i]]), k) for k in hidd)
P[i][j] = prob
temp_path[j] = path[state] + [j]
# Don't need to remember the old paths
path = temp_path
#Find max prob in the last iteration of the list of dicts (P)
n = len(evid)-1
(prob, state) = max((P[n][y], y) for y in hidd)
#Return the probability for the best last state and the path for it as a list of 1 char strings
return prob,path[state]
#Function that takes in 2 strings of equal length and returns the error percent. String 1 is the correct string, string 2 is checked for errors
def error_rate(correct, check):
errors = 0
for i in range(0,len(correct)):
if correct[i] != check[i]:
errors += 1
return errors/len(correct)
if __name__ == "__main__":
#Set correct and actual as lists to hold words in each column
correct,actual,conditional,transitional,initial = data_parser(file_name)
#Get the data from another file to run the algorithm on. Get the 1st and 3rd column as strings
#String that had the hidden state sequence (1st column)
test_hidden = ""
#String that stores the observed column (3rd column)
test_observed = ""
#Open file to get data from
with open(test_file,"r") as test_in:
#Iterate through lines of file
for i in test_in.readlines():
#Store first column letter
test_hidden += i[0]
#Store third column letter
test_observed += i[2]
#Run Viterbi
prob, path = furby(test_observed, letters+"_", initial, transitional, conditional)
#Calculate error rates
print("Error rate of", test_file, "before Viterbi:",error_rate(test_hidden,test_observed)*100,"%")
print("Error rate of", test_file, "after Viterbi:",error_rate(test_hidden,path)*100,"%")
print("--------State Sequence--------")
#Print final sequence in more readable format by joining list
print("".join(path))
#Print the probability of the final state for fun
print("--------Final State Probability--------")
print("In Log2:", prob)
print("In Decimal:", pow(2,prob))
''' Part 1
#Print conditional
print("----------------Condition----------------")
#Iterate through keys of a sorted dictionary
for i in sorted(conditional):
print("--------Hidden:",i,"--------")
#Iterate through keys of dict in dict (value dict to the key "i")
for j in sorted(conditional[i]):
#Print the number of occurances
print(j, conditional[i][j])
#Print transitional
print("----------------Transition----------------")
#Iterate through keys of a sorted dictionary
for i in sorted(transitional):
print("--------Previous:",i,"--------")
#Iterate through keys of dict in dict (value dict to the key "i")
for j in sorted(transitional[i]):
#Print the number of occurances
print(j, transitional[i][j])
#Print Initial
print("----------------Initial (Using Hidden)----------------")
#Iterate through key of sorted dict
for i in sorted(initial):
print(i, initial[i])
'''
|
{
"content_hash": "091446ba4f0dc8aaebc20b6d4b61a6f8",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 256,
"avg_line_length": 37,
"alnum_prop": 0.6828358208955224,
"repo_name": "mitchtz/ai3202",
"id": "7e7e13d9c9d1d094e7bba2e96f128b20c1429362",
"size": "10004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Assignment 8 - Hidden Markov Models/Zinser_Assignment8.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49193"
}
],
"symlink_target": ""
}
|
from .directory import *
from .shunt import *
from .utils import *
from .context import *
from .data import *
from .timeouts import *
from .processes import *
|
{
"content_hash": "6978e14824f078a621b1cefb55311c47",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 24,
"avg_line_length": 22.714285714285715,
"alnum_prop": 0.7358490566037735,
"repo_name": "ravenac95/testkit",
"id": "47b922194c3d68bfa7f452cae712082ae798cb57",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "testkit/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42548"
}
],
"symlink_target": ""
}
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
description='sbsmanip is a set of utilities designed to help '
'clean a Space Engineers savefile for performance reasons',
author='Victor Robertson',
license='MIT License',
url='https://github.com/vmrob/sbsmanip',
author_email='victor.robertson.iv@gmail.com',
version='0.3.1',
install_requires=['nose'],
packages=['segc', 'sbsmanip'],
entry_points={
'console_scripts': [
'segc = segc.app:main'
]
},
name='sbsmanip'
)
|
{
"content_hash": "3659c64f383cb3458ba08a618cf163ed",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 75,
"avg_line_length": 27.636363636363637,
"alnum_prop": 0.6282894736842105,
"repo_name": "vmrob/sbsmanip",
"id": "60201d60e8169502c5cdcf11e2b0e28248f406fc",
"size": "608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34008"
}
],
"symlink_target": ""
}
|
"""
=========================================================
Legacy discrete Fourier transforms (:mod:`scipy.fftpack`)
=========================================================
.. warning::
This submodule is now considered legacy, new code should use
:mod:`scipy.fft`.
Fast Fourier Transforms (FFTs)
==============================
.. autosummary::
:toctree: generated/
fft - Fast (discrete) Fourier Transform (FFT)
ifft - Inverse FFT
fft2 - 2-D FFT
ifft2 - 2-D inverse FFT
fftn - N-D FFT
ifftn - N-D inverse FFT
rfft - FFT of strictly real-valued sequence
irfft - Inverse of rfft
dct - Discrete cosine transform
idct - Inverse discrete cosine transform
dctn - N-D Discrete cosine transform
idctn - N-D Inverse discrete cosine transform
dst - Discrete sine transform
idst - Inverse discrete sine transform
dstn - N-D Discrete sine transform
idstn - N-D Inverse discrete sine transform
Differential and pseudo-differential operators
==============================================
.. autosummary::
:toctree: generated/
diff - Differentiation and integration of periodic sequences
tilbert - Tilbert transform: cs_diff(x,h,h)
itilbert - Inverse Tilbert transform: sc_diff(x,h,h)
hilbert - Hilbert transform: cs_diff(x,inf,inf)
ihilbert - Inverse Hilbert transform: sc_diff(x,inf,inf)
cs_diff - cosh/sinh pseudo-derivative of periodic sequences
sc_diff - sinh/cosh pseudo-derivative of periodic sequences
ss_diff - sinh/sinh pseudo-derivative of periodic sequences
cc_diff - cosh/cosh pseudo-derivative of periodic sequences
shift - Shift periodic sequences
Helper functions
================
.. autosummary::
:toctree: generated/
fftshift - Shift the zero-frequency component to the center of the spectrum
ifftshift - The inverse of `fftshift`
fftfreq - Return the Discrete Fourier Transform sample frequencies
rfftfreq - DFT sample frequencies (for usage with rfft, irfft)
next_fast_len - Find the optimal length to zero-pad an FFT for speed
Note that ``fftshift``, ``ifftshift`` and ``fftfreq`` are numpy functions
exposed by ``fftpack``; importing them from ``numpy`` should be preferred.
Convolutions (:mod:`scipy.fftpack.convolve`)
============================================
.. module:: scipy.fftpack.convolve
.. autosummary::
:toctree: generated/
convolve
convolve_z
init_convolution_kernel
destroy_convolve_cache
"""
from __future__ import division, print_function, absolute_import
__all__ = ['fft','ifft','fftn','ifftn','rfft','irfft',
'fft2','ifft2',
'diff',
'tilbert','itilbert','hilbert','ihilbert',
'sc_diff','cs_diff','cc_diff','ss_diff',
'shift',
'fftfreq', 'rfftfreq',
'fftshift', 'ifftshift',
'next_fast_len',
'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'
]
from .basic import *
from .pseudo_diffs import *
from .helper import *
from .realtransforms import *
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
{
"content_hash": "2fb16b46b594271f4beb6e6de16801bc",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 78,
"avg_line_length": 30.62135922330097,
"alnum_prop": 0.6277742549143944,
"repo_name": "arokem/scipy",
"id": "33b3050d11041b5075f2f2e4647cd794937256d8",
"size": "3154",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/fftpack/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4399737"
},
{
"name": "C++",
"bytes": "649740"
},
{
"name": "Dockerfile",
"bytes": "1291"
},
{
"name": "Fortran",
"bytes": "5368728"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12815696"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This is a handler for lighttpd+fastcgi
This file has to be in the PYTHONPATH
Put something like this in the lighttpd.conf file:
server.port = 8000
server.bind = '127.0.0.1'
server.event-handler = 'freebsd-kqueue'
server.modules = ('mod_rewrite', 'mod_fastcgi')
server.error-handler-404 = '/test.fcgi'
server.document-root = '/somewhere/web2py'
server.errorlog = '/tmp/error.log'
fastcgi.server = ('.fcgi' =>
('localhost' =>
('min-procs' => 1,
'socket' => '/tmp/fcgi.sock'
)
)
)
"""
LOGGING = False
SOFTCRON = False
import sys
import os
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path]+[p for p in sys.path if not p==path]
import gluon.main
import gluon.contrib.gateways.fcgi as fcgi
if LOGGING:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profilerfilename=None)
else:
application = gluon.main.wsgibase
if SOFTCRON:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
fcgi.WSGIServer(application, bindAddress='/tmp/fcgi.sock').run()
|
{
"content_hash": "fa2dae8514e38e19da27e9ad1b945134",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 69,
"avg_line_length": 28.057692307692307,
"alnum_prop": 0.6196024674434544,
"repo_name": "SEA000/uw-empathica",
"id": "e81d7ee37a5649357b7b2e8a7f7b7e655fe70edc",
"size": "1506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "empathica/fcgihandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "127034"
},
{
"name": "JavaScript",
"bytes": "981904"
},
{
"name": "PHP",
"bytes": "15326"
},
{
"name": "Python",
"bytes": "3911190"
},
{
"name": "Shell",
"bytes": "31485"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from db.db_manager import db_sync_manager
from mapper.utils.filter import PathFinderTNtoSDNFilterUtils as FilterUtils
from mapper.utils.format import PathFinderTNtoSDNFormatUtils as FormatUtils
from mapper.utils.combination import PathFinderTNtoSDNCombinationUtils as CombinationUtils
from pprint import pprint
#import itertools
class PathFinderTNtoSDN(object):
def __init__(self, source_tn, destination_tn, *args, **kwargs):
# CIDs of source and destination TN endpoints
self.src_dom = source_tn
self.dst_dom = destination_tn
# Link type can be "nsi" or "gre". Empty means "all"
self.link_type = kwargs.get("link_type", "")
# Filters to match against required switches
self.src_of_cids = kwargs.get("src_of_switch_cids", [])
self.dst_of_cids = kwargs.get("dst_of_switch_cids", [])
self.of_cids_check_by_auth = kwargs.get("of_switch_cids_check_by_auth", False)
# Dummy list to reduce lines of code
self.src_dst_values = [ "src", "dst" ]
# Nodes and links from database
self.tn_nodes = [ x for x in db_sync_manager.get_tn_nodes() ]
self.se_links = [ x for x in db_sync_manager.get_se_links() ]
# Mapping structure to be returned is a list of possible src-dst paths
self.mapping_tn_se_of = []
self.organisation_name_mappings = {
"psnc": ["pionier"],
"iminds": ["iMinds"],
"kddi": ["jgn-x.jp"],
}
# Update with parameters passed
self.__dict__.update(kwargs)
def get_organisation_mappings(self, organisation_name):
# Return possible alternatives, given an organisation name
return self.organisation_name_mappings.get(organisation_name, [organisation_name])
def format_verify_tn_interface(self, tn_interface):
# Ensure that the TN interfaces match with their original names
# under resource.tn.node. This is performed to restore the
# component_id values, previously changed
tn_interfaces_cids = self.get_tn_interfaces_cids(clean=False)
return FormatUtils.format_verify_tn_interface(tn_interfaces_cids, tn_interface)
def get_tn_interfaces_cids(self, clean=False):
# Return a list with the component_id values for the TN interfaces
tn_interfaces = set()
for tn_node in self.tn_nodes:
tn_interfaces.update(FormatUtils.get_tn_interfaces_cid_from_node(tn_node, clean))
return tn_interfaces
def get_se_interfaces_cids(self, clean=False):
# Return a list with the component_id values for the SE interfaces
se_interfaces = set()
for se_link in self.se_links:
se_interfaces.add(FilterUtils.get_se_interfaces_cid_from_link(se_link, clean))
return se_interfaces
def find_tn_interfaces_for_domain(self, domain_name):
# Given a domain name (e.g. "kddi", "aist"), find possible TN interfaces
tn_interfaces_cids = self.get_tn_interfaces_cids(clean=True)
domain_names_alt = self.get_organisation_mappings(domain_name)
return FilterUtils.find_tn_interfaces_for_domain(tn_interfaces_cids, domain_names_alt, domain_name)
def filter_tn_interfaces_by_type(self, tn_interfaces_cids, link_type=""):
return FilterUtils.filter_tn_interfaces_by_type(tn_interfaces_cids, link_type)
def find_se_interfaces_for_tn_interface(self, tn_interface):
return FilterUtils.find_se_interfaces_for_tn_interface(self.se_links, tn_interface)
def find_se_interfaces_for_domain_names(self, src_domain, dst_domain):
mappings = self.organisation_name_mappings
return FilterUtils.find_se_interfaces_for_domain_names(self.se_links, mappings, src_domain, dst_domain)
def find_sdn_interfaces_for_se_interface(self, se_interface, negative_filter=[], possitive_filter=[""]):
return FilterUtils.find_sdn_interfaces_for_se_interface(self.se_links, se_interface, negative_filter, possitive_filter)
def find_se_sdn_links_for_se_node(self, se_node, negative_filter=[], possitive_filter=[""]):
return FilterUtils.find_se_sdn_links_for_se_node(self.se_links, se_node, negative_filter, possitive_filter)
def find_path_tn(self):
# Retrieve list of CIDs for TNRM interfaces
tn_interfaces_cids = self.get_tn_interfaces_cids(clean=True)
# Get proper TN interfaces for both SRC and DST TN interfaces
self.mapping_tn_se_of_src_partial = {}
self.mapping_tn_se_of_dst_partial = {}
# Get proper TN interfaces for (SRC, DST) TN interface
for src_dst_value in self.src_dst_values:
# Do a first clean of SRC and DST interface
src_dst_cid = FormatUtils.clean_tn_stp_cid(getattr(self, "%s_dom" % src_dst_value))
dst_src_tn_interface_found = False
# Playing a bit with the language to be able
# to have all the processing in a single place
for tn_interface_cid in tn_interfaces_cids:
if src_dst_cid in tn_interface_cid and src_dst_cid.startswith("urn"):
dst_src_tn_interface_found = True
break
if dst_src_tn_interface_found == True:
setattr(self, "tn_candidates_%s" % src_dst_value, [ src_dst_cid ])
else:
# Set is converted to list for easyness
list_interfaces = map(list, self.find_tn_interfaces_for_domain(src_dst_cid))[0]
# NOTE: only the first TN interface is retrieved...
# Filter by link type, if requested by user
setattr(self, "tn_candidates_%s" % src_dst_value, list(\
self.filter_tn_interfaces_by_type(list_interfaces, self.link_type)))
# Initialize structure with dictionary and append SRC and DST interfaces to the set
setattr(self, "mapping_tn_se_of_%s_partial" % src_dst_value, { "tn": set() })
for tn_candidate in getattr(self, "tn_candidates_%s" % src_dst_value):
mapping_partial = getattr(self, "mapping_tn_se_of_%s_partial" % src_dst_value)
mapping_partial["tn"].add(tn_candidate)
# Place every path into the final structure
#combinations_src_dst_stps = zip(self.mapping_tn_se_of_src_partial["tn"], self.mapping_tn_se_of_dst_partial["tn"])
# Find all possible combinations (order-independent)
src_stps = self.mapping_tn_se_of_src_partial["tn"]
dst_stps = self.mapping_tn_se_of_dst_partial["tn"]
combinations_src_dst_stps = CombinationUtils.yield_combinations_stp_pairs(src_stps, dst_stps)
# Filter out combinations whose STP have different types (i.e. NSI-GRE)
combinations_src_dst_stps_filter = []
for src_dst_stp in combinations_src_dst_stps:
stp_link_tmp = FilterUtils.ensure_same_type_tn_interfaces([src_dst_stp[0], src_dst_stp[1]])
if len(stp_link_tmp) == 2:
combinations_src_dst_stps_filter.append(stp_link_tmp)
combinations_src_dst_stps = combinations_src_dst_stps_filter
for tn_src_dst_pair in combinations_src_dst_stps:
# Tuple: 1st element (src), 2nd element (dst)
self.mapping_tn_se_of.append({"src": {"tn": tn_src_dst_pair[0]}, "dst": {"tn": tn_src_dst_pair[1]}})
def find_path_se(self):
# Get SE interfaces for both SRC and DST TN interfaces
for path_source in self.mapping_tn_se_of:
for src_dst_value in self.src_dst_values:
# Preparing list of links for SE-SDN
path_source[src_dst_value]["links"] = []
se_candidates = self.find_se_interfaces_for_tn_interface(path_source[src_dst_value]["tn"])
# Fill mapping structure
path_source[src_dst_value]["se"] = ""
if len(se_candidates) > 0:
path_source[src_dst_value]["se"] = se_candidates[0]
# Get SE interfaces without previous TN info
# (case of static links between islands)
# Assumption: name of 2 different islands/domains is provided
if len(self.mapping_tn_se_of) == 0:
partial_mapping = self.find_se_interfaces_for_domain_names(self.src_dom, self.dst_dom)
mapping_tn_se_of_path = {}
for src_dst_value in self.src_dst_values:
src_dst_value_struct = {}
for part in partial_mapping:
src_dst_domain = getattr(self, "%s_dom" % src_dst_value)
index_serm = [ src_dst_domain in l and "serm" in l for l in list(part) ].index(True)
index_sdnrm = len(part) - index_serm - 1
src_dst_value_struct = {}
src_dst_value_struct["se"] = part[index_serm]
part_mapping_sdn = part[index_sdnrm] if "ofam" in part[index_sdnrm] else None
# Only add proper links structure when both endpoints (SDN, SE) are correct
src_dst_value_struct["links"] = []
if part_mapping_sdn is not None:
src_dst_value_struct["links"] = [{"se": part[index_serm], "sdn": part_mapping_sdn }]
# Add SE-SE paths for SRC and DST
mapping_tn_se_of_path[src_dst_value] = src_dst_value_struct
# Append to final structure
self.mapping_tn_se_of.append(mapping_tn_se_of_path)
def find_path_sdn(self):
# Get SDN interfaces for (SRC, DST) SE interface
negative_filter = [ "tnrm" ]
for path_source in self.mapping_tn_se_of:
for src_dst_value in self.src_dst_values:
# Domains connected through the VPN may not have SE links (skip)
if "se" not in path_source[src_dst_value]:
return
#possitive_filter_of_switches = [ FormatUtils.remove_port_cid(f) for f in getattr(self, "%s_of_cids" % src_dst_value) ]
se_interface = path_source[src_dst_value]["se"]
# Possible SE-SDN links
sdn_candidates = []
if se_interface is not None and len(se_interface) > 0:
# Search for *every* connection between SE and SDN devices
se_node = FormatUtils.remove_port_cid(se_interface)
sdn_candidates = self.find_se_sdn_links_for_se_node(se_node, negative_filter)
for se_sdn_link in sdn_candidates:
se_sdn_link = FormatUtils.format_verify_se_sdn_links(se_sdn_link)
path_source[src_dst_value]["links"].append(se_sdn_link)
def format_structure(self):
# Restore the full CID of the source and destination TN interfaces
for mapping in self.mapping_tn_se_of:
for src_dst_value in self.src_dst_values:
# Domains connected through static links may not have "tn" data
if "tn" in mapping[src_dst_value]:
mapping[src_dst_value]["tn"] = self.format_verify_tn_interface(mapping[src_dst_value]["tn"])
# Remove paths where either source or destination are invalid
self.mapping_tn_se_of = FilterUtils.prune_invalid_paths(self.mapping_tn_se_of)
self.mapping_tn_se_of = FilterUtils.prune_unlinked_dpids(self.mapping_tn_se_of, self.src_of_cids, \
self.dst_of_cids, self.of_cids_check_by_auth)
return self.mapping_tn_se_of
def find_paths(self):
# Find path from given TN to SDN, passing through SE
self.find_path_tn()
self.find_path_se()
self.find_path_sdn()
# Prepare structure (clean up, correct, etc)
self.mapping_tn_se_of = self.format_structure()
return self.mapping_tn_se_of
if __name__ == "__main__":
error_help = "Error using mapper. Usage: %s <src> <dst> [nsi|gre]" % (__file__)
# SRC and DST are required
if len(sys.argv) >= 3:
src_name = sys.argv[1]
dst_name = sys.argv[2]
else:
# src_name = "urn:publicid:IDN+fms:aist:tnrm+stp+urn:ogf:network:pionier.net.pl:2013:topology:felix-ge-1-0-3"
# dst_name = "urn:publicid:IDN+fms:aist:tnrm+stp+urn:ogf:network:jgn-x.jp:2013:topology:bi-felix-kddi-stp1"
src_name = "psnc"
dst_name = "aist"
# sys.exit(error_help)
# Link type is optional
if len(sys.argv) >= 4:
link_type = sys.argv[3]
else:
# link_type = "nsi"
link_type = ""
# src_of_switch_cids = [ "i2cat" ]
# dst_of_switch_cids = [ "aist" ]
src_of_switch_cids = ['urn:publicid:IDN+openflow:ocf:psnc:ofam+datapath+00:00:08:81:f4:88:f5:b0_13', 'urn:publicid:IDN+openflow:ocf:psnc:ofam+datapath+00:00:08:81:f4:88:f5:b0_17']
dst_of_switch_cids = ['urn:publicid:IDN+openflow:ocf:aist:ofam+datapath+00:00:00:00:00:00:00:01_3', 'urn:publicid:IDN+openflow:ocf:aist:ofam+datapath+00:00:00:00:00:00:00:01_5']
# Note: restrictions (src_of_switch_cids, dst_of_switch_cids)
# only to be explicitly passed (otherwise it will probably fail)
optional = {
# "src_of_switch_cids": src_of_switch_cids,
# "dst_of_switch_cids": dst_of_switch_cids,
"of_switch_cids_check_by_auth": True,
"link_type": link_type,
}
path_finder_tn_sdn = PathFinderTNtoSDN(src_name, dst_name, **optional)
pprint(path_finder_tn_sdn.find_paths())
|
{
"content_hash": "e43074e69a5cc3fe341127384ed66069",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 183,
"avg_line_length": 53.4901185770751,
"alnum_prop": 0.6213699844823765,
"repo_name": "dana-i2cat/felix",
"id": "413d6d18cdac45396de107e8f415fa6d116591c6",
"size": "13533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/resource/orchestrator/src/mapper/path_finder_tn_to_sdn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "DTrace",
"bytes": "370"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11211"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "7875883"
},
{
"name": "Shell",
"bytes": "258079"
}
],
"symlink_target": ""
}
|
AWS = {
'key': '',
'secret': '',
'use_safe_connection': True,
'validate_certs': True,
}
DEBUG = False
# From AppEngine documentation (http://goo.gl/AO8lt):
# For security purposes, the sender address of a message must be the email
# address of an administrator for the application or any valid email receiving
# address for the app (see Receiving Mail). The sender can also be the
# Google Account email address of the current user who is signed in, if the
# user's account is a Gmail account or is on a domain managed by Google Apps.
LOGGER = {
'sender': 'Cloudsnap Logger<logger@appid.appspotmail.com>',
'to': 'Your Email<you@yourdomain.com>',
}
# Template of the generated AMI's
# %(today) the current date in the format yyyy-mm-dd
# %(name) the instance name (falls back to instance_id when not found)
# %(instance_id)
AMI_NAME_TEMPLATE = "%(today)-%(name)"
|
{
"content_hash": "2690a486f7448448e1ecebb4329c55c7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 35.56,
"alnum_prop": 0.7019122609673791,
"repo_name": "renanivo/cloudsnap",
"id": "e1f96a13399b969db1c6a48b70ec3713d3741e40",
"size": "889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudsnap/settings.example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18980"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_narmle_rifleman_rodian_male_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","rodian_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "b772b29d5e60bdcafba26cd47629a7e5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 84,
"avg_line_length": 24.53846153846154,
"alnum_prop": 0.7021943573667712,
"repo_name": "anhstudios/swganh",
"id": "b44a247e66d30168c714355bb76f391c88cc395b",
"size": "464",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_narmle_rifleman_rodian_male_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
from casexml.apps.stock.consumption import compute_consumption_or_default
from casexml.apps.stock.utils import get_current_ledger_state
from dimagi.utils.parsing import json_format_datetime
from datetime import datetime
from casexml.apps.stock.const import COMMTRACK_REPORT_XMLNS
def get_stock_payload(project, stock_settings, case_stub_list):
if project and not project.commtrack_enabled:
return
from lxml.builder import ElementMaker
E = ElementMaker(namespace=COMMTRACK_REPORT_XMLNS)
def entry_xml(id, quantity):
return E.entry(
id=id,
quantity=str(int(quantity)),
)
def state_to_xml(state):
return entry_xml(state.product_id, state.stock_on_hand)
def consumption_entry(case_id, product_id, section_id):
consumption_value = compute_consumption_or_default(
case_id,
product_id,
datetime.utcnow(),
section_id,
stock_settings.consumption_config
)
if consumption_value is not None:
return entry_xml(product_id, consumption_value)
case_ids = [case.case_id for case in case_stub_list]
all_current_ledgers = get_current_ledger_state(case_ids)
for commtrack_case_stub in case_stub_list:
case_id = commtrack_case_stub.case_id
current_ledgers = all_current_ledgers[case_id]
section_product_map = defaultdict(lambda: [])
section_timestamp_map = defaultdict(lambda: json_format_datetime(datetime.utcnow()))
for section_id in sorted(current_ledgers.keys()):
state_map = current_ledgers[section_id]
sorted_product_ids = sorted(state_map.keys())
stock_states = [state_map[p] for p in sorted_product_ids]
as_of = json_format_datetime(max(txn.last_modified_date for txn in stock_states))
section_product_map[section_id] = sorted_product_ids
section_timestamp_map[section_id] = as_of
yield E.balance(*(state_to_xml(e) for e in stock_states),
**{'entity-id': case_id, 'date': as_of, 'section-id': section_id})
for section_id, consumption_section_id in stock_settings.section_to_consumption_types.items():
if (section_id in current_ledgers or
stock_settings.force_consumption_case_filter(commtrack_case_stub)):
consumption_product_ids = stock_settings.default_product_list \
if stock_settings.default_product_list \
else section_product_map[section_id]
consumption_entries = filter(lambda e: e is not None, [
consumption_entry(case_id, p, section_id)
for p in consumption_product_ids
])
if consumption_entries:
yield E.balance(
*consumption_entries,
**{
'entity-id': case_id,
'date': section_timestamp_map[section_id],
'section-id': consumption_section_id,
}
)
|
{
"content_hash": "62c06702d566918ddbfa0ce7d51e787a",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 102,
"avg_line_length": 42.26315789473684,
"alnum_prop": 0.6014943960149439,
"repo_name": "puttarajubr/commcare-hq",
"id": "975f44f633451d760bd2372fb9c6cfd5edaab530",
"size": "3212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/ex-submodules/casexml/apps/phone/data_providers/case/stock.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test.utils import override_settings
from django.utils import six
import django_comments
import bogofilter
from bogofilter.models import BogofilterComment
from bogofilter.forms import BogofilterCommentForm
from . import CommentTestCase
class CommentAppAPITests(CommentTestCase):
"""Tests for the "comment app" API"""
def testGetCommentApp(self):
self.assertEqual(django_comments.get_comment_app(), bogofilter)
@override_settings(
COMMENTS_APP='missing_app',
INSTALLED_APPS=list(settings.INSTALLED_APPS) + ['missing_app'],
)
def testGetMissingCommentApp(self):
with six.assertRaisesRegex(self, ImproperlyConfigured, 'missing_app'):
_ = django_comments.get_comment_app()
def testGetForm(self):
self.assertEqual(django_comments.get_form(), BogofilterCommentForm)
def testGetFormTarget(self):
self.assertEqual(django_comments.get_form_target(), "/post/")
def testGetFlagURL(self):
c = BogofilterComment(id=12345)
self.assertEqual(django_comments.get_flag_url(c), "/flag/12345/")
def getGetDeleteURL(self):
c = BogofilterComment(id=12345)
self.assertEqual(django_comments.get_delete_url(c), "/delete/12345/")
def getGetApproveURL(self):
c = BogofilterComment(id=12345)
self.assertEqual(django_comments.get_approve_url(c), "/approve/12345/")
@override_settings(
COMMENTS_APP='custom_comments',
INSTALLED_APPS=list(settings.INSTALLED_APPS) + [
'custom_comments'],
)
class CustomCommentTest(CommentTestCase):
urls = 'testapp.urls'
def testGetCommentApp(self):
import custom_comments
self.assertEqual(django_comments.get_comment_app(), custom_comments)
def testGetModel(self):
from custom_comments.models import CustomComment
self.assertEqual(django_comments.get_model(), CustomComment)
def testGetForm(self):
from custom_comments.forms import CustomCommentForm
self.assertEqual(django_comments.get_form(), CustomCommentForm)
def testGetFormTarget(self):
self.assertEqual(django_comments.get_form_target(), "/post/")
def testGetFlagURL(self):
c = BogofilterComment(id=12345)
self.assertEqual(django_comments.get_flag_url(c), "/flag/12345/")
def getGetDeleteURL(self):
c = BogofilterComment(id=12345)
self.assertEqual(django_comments.get_delete_url(c), "/delete/12345/")
def getGetApproveURL(self):
c = BogofilterComment(id=12345)
self.assertEqual(django_comments.get_approve_url(c), "/approve/12345/")
|
{
"content_hash": "4436d18172a613e7155b47e4dbe1d614",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 33.97530864197531,
"alnum_prop": 0.7074854651162791,
"repo_name": "stefantalpalaru/django-bogofilter",
"id": "756a27dee8d201252bf092e5f3258dfaa66503af",
"size": "2752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testapp/tests/app_api_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "80449"
}
],
"symlink_target": ""
}
|
import numpy
def readBamp(filename):
"""
This is a general bamp reading function. returns list of numpy.complexes
representing the complex amplitudes of the wave represented by the file.
"""
temp1=numpy.fromfile(file=filename,dtype=numpy.dtype('f8'))
temp2=temp1.reshape((2,-1),order='F')
temp3=[]
for lineNumber in range(temp2.shape[1]):
temp3.append(numpy.complex(temp2[0,lineNumber],temp2[1,lineNumber]))
return temp3
|
{
"content_hash": "008758883a36d41ae043ccd9db27cffa",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 77,
"avg_line_length": 35.84615384615385,
"alnum_prop": 0.6974248927038627,
"repo_name": "bdell/pyPWA",
"id": "6555efc3e6004c5ebdc2f0cedc3799204d62adee",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonPWA/fileHandlers/bampReader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53884"
}
],
"symlink_target": ""
}
|
from typing import Any
from django.conf.urls import include
from django.urls import path
from django.views.generic import RedirectView, TemplateView
from corporate.views.billing_page import billing_home, update_plan
from corporate.views.event_status import event_status, event_status_page
from corporate.views.portico import (
app_download_link_redirect,
apps_view,
communities_view,
hello_view,
landing_view,
plans_view,
team_view,
)
from corporate.views.session import (
start_card_update_stripe_session,
start_retry_payment_intent_session,
)
from corporate.views.support import support_request
from corporate.views.upgrade import initial_upgrade, sponsorship, upgrade
from corporate.views.webhook import stripe_webhook
from zerver.lib.rest import rest_path
from zerver.lib.url_redirects import LANDING_PAGE_REDIRECTS
i18n_urlpatterns: Any = [
# Zephyr/MIT
path("zephyr/", TemplateView.as_view(template_name="corporate/zephyr.html")),
path("zephyr-mirror/", TemplateView.as_view(template_name="corporate/zephyr-mirror.html")),
path("jobs/", TemplateView.as_view(template_name="corporate/jobs.html")),
# Billing
path("billing/", billing_home, name="billing_home"),
path("upgrade/", initial_upgrade, name="initial_upgrade"),
path("support/", support_request),
path("billing/event_status/", event_status_page, name="event_status_page"),
path("stripe/webhook/", stripe_webhook, name="stripe_webhook"),
]
v1_api_and_json_patterns = [
rest_path("billing/upgrade", POST=upgrade),
rest_path("billing/sponsorship", POST=sponsorship),
rest_path("billing/plan", PATCH=update_plan),
rest_path("billing/session/start_card_update_session", POST=start_card_update_stripe_session),
rest_path(
"billing/session/start_retry_payment_intent_session",
POST=start_retry_payment_intent_session,
),
rest_path("billing/event/status", GET=event_status),
]
landing_page_urls = [
# Landing page, features pages, signup form, etc.
path("hello/", hello_view),
path("features/", landing_view, {"template_name": "corporate/features.html"}),
path("plans/", plans_view, name="plans"),
path("apps/", apps_view),
path("apps/download/<platform>", app_download_link_redirect),
path("apps/<platform>", apps_view),
path(
"development-community/",
landing_view,
{"template_name": "corporate/development-community.html"},
),
path("attribution/", landing_view, {"template_name": "corporate/attribution.html"}),
path("team/", team_view),
path("history/", landing_view, {"template_name": "corporate/history.html"}),
path("values/", landing_view, {"template_name": "corporate/values.html"}),
path("why-zulip/", landing_view, {"template_name": "corporate/why-zulip.html"}),
path("self-hosting/", landing_view, {"template_name": "corporate/self-hosting.html"}),
path("security/", landing_view, {"template_name": "corporate/security.html"}),
# /for pages
path("use-cases/", landing_view, {"template_name": "corporate/for/use-cases.html"}),
path(
"for/communities/",
landing_view,
{"template_name": "corporate/for/communities.html"},
),
path("for/education/", landing_view, {"template_name": "corporate/for/education.html"}),
path("for/events/", landing_view, {"template_name": "corporate/for/events.html"}),
path("for/open-source/", landing_view, {"template_name": "corporate/for/open-source.html"}),
path("for/research/", landing_view, {"template_name": "corporate/for/research.html"}),
path("for/business/", landing_view, {"template_name": "corporate/for/business.html"}),
# case-studies
path(
"case-studies/idrift/",
landing_view,
{"template_name": "corporate/case-studies/idrift-case-study.html"},
),
path(
"case-studies/tum/",
landing_view,
{"template_name": "corporate/case-studies/tum-case-study.html"},
),
path(
"case-studies/ucsd/",
landing_view,
{"template_name": "corporate/case-studies/ucsd-case-study.html"},
),
path(
"case-studies/rust/",
landing_view,
{"template_name": "corporate/case-studies/rust-case-study.html"},
),
path(
"case-studies/lean/",
landing_view,
{"template_name": "corporate/case-studies/lean-case-study.html"},
),
path(
"case-studies/asciidoctor/",
landing_view,
{"template_name": "corporate/case-studies/asciidoctor-case-study.html"},
),
path(
"case-studies/recurse-center/",
landing_view,
{"template_name": "corporate/case-studies/recurse-center-case-study.html"},
),
path("communities/", communities_view),
]
# Redirects due to us having moved or combined landing pages:
for redirect in LANDING_PAGE_REDIRECTS:
old_url = redirect.old_url.lstrip("/")
landing_page_urls += [path(old_url, RedirectView.as_view(url=redirect.new_url, permanent=True))]
i18n_urlpatterns += landing_page_urls
# Make a copy of i18n_urlpatterns so that they appear without prefix for English
urlpatterns = list(i18n_urlpatterns)
urlpatterns += [
path("api/v1/", include(v1_api_and_json_patterns)),
path("json/", include(v1_api_and_json_patterns)),
]
|
{
"content_hash": "d7f0737fe2bef40129a14caaffe11cea",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 100,
"avg_line_length": 38.96350364963504,
"alnum_prop": 0.6702884975646309,
"repo_name": "zulip/zulip",
"id": "134c6531d270c49b7ffabcf5e3c437e495895795",
"size": "5338",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "corporate/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "509211"
},
{
"name": "Dockerfile",
"bytes": "4219"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "696430"
},
{
"name": "Handlebars",
"bytes": "384277"
},
{
"name": "JavaScript",
"bytes": "4098367"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112433"
},
{
"name": "Python",
"bytes": "10336945"
},
{
"name": "Ruby",
"bytes": "3166"
},
{
"name": "Shell",
"bytes": "147162"
},
{
"name": "TypeScript",
"bytes": "286785"
}
],
"symlink_target": ""
}
|
from rest_framework import viewsets
from .serializers import BookSerializer
from .models import Book
class BookViewSet(viewsets.ModelViewSet):
"""
API endpoint for my very sophisticated book model
"""
queryset = Book.objects.all()
serializer_class = BookSerializer
|
{
"content_hash": "d8da16f0a678b1029e523b62961aacb9",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 53,
"avg_line_length": 28.6,
"alnum_prop": 0.7517482517482518,
"repo_name": "stephanpoetschner/django-vienna",
"id": "c8c0cf7c0f6853524cdd6910dbec130a35c997e6",
"size": "286",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "2014/03/django-rest-framework/src/demo2/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "131200"
},
{
"name": "HTML",
"bytes": "51555"
},
{
"name": "JavaScript",
"bytes": "215136"
},
{
"name": "Python",
"bytes": "85666"
}
],
"symlink_target": ""
}
|
"""
test_sample_project
----------------------------------
Tests for `sample_project` module.
"""
import unittest
from sample_project import sample_project
class TestSample_project(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
assert 2 == 3
def test_something_else(self):
print "I am running something else"
assert 3 == 4
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "b1de9d4ebe6f237c4f907c9c6a1550a6",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 44,
"avg_line_length": 16.586206896551722,
"alnum_prop": 0.5758835758835759,
"repo_name": "saksham/sample_project",
"id": "4884ab6fb5da63c29dc0b24cc875cfc701b9b9c8",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sample_project.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10535"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
}
|
import logging
import os
import shutil
import sys
import unittest
import environment
import utils
import tablet
tablet_62344 = tablet.Tablet(62344)
tablet_62044 = tablet.Tablet(62044)
tablet_41983 = tablet.Tablet(41983)
tablet_31981 = tablet.Tablet(31981)
zkocc_server = None
def setUpModule():
try:
utils.zk_setup()
zkocc_server = utils.zkocc_start()
# start mysql instance external to the test
setup_procs = [
tablet_62344.init_mysql(),
tablet_62044.init_mysql(),
tablet_41983.init_mysql(),
tablet_31981.init_mysql(),
]
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
teardown_procs = [
tablet_62344.teardown_mysql(),
tablet_62044.teardown_mysql(),
tablet_41983.teardown_mysql(),
tablet_31981.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
if zkocc_server:
utils.zkocc_kill(zkocc_server)
utils.zk_teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_62344.remove_tree()
tablet_62044.remove_tree()
tablet_41983.remove_tree()
tablet_31981.remove_tree()
path = os.path.join(environment.vtdataroot, 'snapshot')
try:
shutil.rmtree(path)
except OSError as e:
logging.debug("removing snapshot %s: %s", path, str(e))
class TestBarnacle(unittest.TestCase):
def tearDown(self):
tablet.Tablet.check_vttablet_count()
utils.zk_wipe()
for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
t.reset_replication()
t.clean_dbs()
def test_sanity(self):
self._test_sanity()
print "hit enter"
sys.stdin.readline()
def _test_sanity(self):
# Start up a master mysql and vttablet
utils.run_vtctl('CreateKeyspace -force test_keyspace')
utils.run_vtctl('CreateShard -force test_keyspace/0')
tablet_62344.init_tablet('master', 'test_keyspace', '0', parent=False)
utils.run_vtctl('RebuildKeyspaceGraph test_keyspace')
utils.validate_topology()
# if these statements don't run before the tablet it will wedge waiting for the
# db to become accessible. this is more a bug than a feature.
tablet_62344.populate('vt_test_keyspace', self._create_vt_select_test,
self._populate_vt_select_test)
tablet_62344.start_vttablet()
# make sure the query service is started right away
result, _ = utils.run_vtctl('Query test_nj test_keyspace "select * from vt_select_test"', trap_output=True)
rows = result.splitlines()
self.assertEqual(len(rows), 5, "expected 5 rows in vt_select_test: %s %s" % (str(rows), result))
# check Pings
utils.run_vtctl('Ping ' + tablet_62344.tablet_alias)
utils.run_vtctl('RpcPing ' + tablet_62344.tablet_alias)
utils.validate_topology()
utils.run_vtctl('ValidateKeyspace test_keyspace')
# not pinging tablets, as it enables replication checks, and they
# break because we only have a single master, no slaves
utils.run_vtctl('ValidateShard -ping-tablets=false test_keyspace/0')
_create_vt_insert_test = '''create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
_populate_vt_insert_test = [
"insert into vt_insert_test (msg) values ('test %s')" % x
for x in xrange(4)]
_create_vt_select_test = '''create table vt_select_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
_populate_vt_select_test = [
"insert into vt_select_test (msg) values ('test %s')" % x
for x in xrange(4)]
if __name__ == '__main__':
utils.main()
|
{
"content_hash": "44da325569ae5add1f69195991ca9322",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 111,
"avg_line_length": 28.697674418604652,
"alnum_prop": 0.6742301458670988,
"repo_name": "apmichaud/vitess-apm",
"id": "666d18dfbebb622bd552096c82d1ae6f33446397",
"size": "3721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/barnacle_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39092"
},
{
"name": "Go",
"bytes": "2605337"
},
{
"name": "Java",
"bytes": "100152"
},
{
"name": "Python",
"bytes": "604118"
},
{
"name": "Shell",
"bytes": "11552"
}
],
"symlink_target": ""
}
|
"""
Handler for create collection wizard.
"""
from datafinder.gui.user.dialogs.creation_wizard.constants import PROPERTY_PAGE_ID, SOURCE_PAGE_ID
from datafinder.gui.user.dialogs.creation_wizard.state_handler.base_state_handler import BaseStateHandler
from datafinder.gui.user.models.repository.filter.leaf_filter import LeafFilter
__version__ = "$Revision-Id:$"
class CreateCollectionHandler(BaseStateHandler):
""" Handles collection creation. """
WINDOW_TITLE = "New Collection"
_PAGEID_TITLE_SUBTITLE_MAP = {SOURCE_PAGE_ID: ("Collection", "Creates a new collection."),
PROPERTY_PAGE_ID: ("Collection Properties", "Please attach additional information to the collection.")}
_ITEMNAME_LABEL_TEXT = "Collection name:"
def __init__(self, wizard):
""" Constructor. """
BaseStateHandler.__init__(self, wizard)
self._repositoryModel = wizard.sourceRepositoryModel
self._currentSourceIndex = None
self.lockIndex = None # Redefining it because check-in pylint wants it
def nextId(self):
""" Returns the identifier of the next page. """
nextId = -1
if self._repositoryModel.hasCustomMetadataSupport \
and self._wizard.currentId() == SOURCE_PAGE_ID:
nextId = PROPERTY_PAGE_ID
return nextId
def initializePage(self, identifier):
""" Performs initialization actions for the wizard page with the given identifier. """
if identifier == SOURCE_PAGE_ID:
self._wizard.configureSourceItemPage(LeafFilter(self._repositoryModel),
[self._repositoryModel.activeIndex],
self._ITEMNAME_LABEL_TEXT,
self._repositoryModel.isManagedRepository)
else:
indexChanged = self._currentSourceIndex != self._wizard.sourceIndexes[0]
self._currentSourceIndex = self._wizard.sourceIndexes[0]
self._wizard.configurePropertyPage(self._repositoryModel, True, self._currentSourceIndex, indexChanged)
def prepareFinishSlot(self):
""" Performs the finish slot preparation. """
self.lockIndex = self._wizard.sourceIndexes[0]
self._repositoryModel.lock([self.lockIndex])
def finishSlotCallback(self):
""" Unlocks the lock index. """
self._repositoryModel.unlock(self.lockIndex)
self._repositoryModel.activeIndex = self.lockIndex
def finishSlot(self):
""" Performs specific actions when the user commits his parameters. """
self._repositoryModel.createCollection(self._wizard.sourceItemName,
self.lockIndex,
self._wizard.properties)
|
{
"content_hash": "82e086cfb96e69c82fd147993dcc9fdb",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 137,
"avg_line_length": 41.916666666666664,
"alnum_prop": 0.5964214711729622,
"repo_name": "DLR-SC/DataFinder",
"id": "c50b00f3ece86d4fd67fc9816718c9462d501cb9",
"size": "4712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/datafinder/gui/user/dialogs/creation_wizard/state_handler/create_collection_state_handler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
}
|
from ctypes import cdll, c_double, c_int32
import os
lib = cdll.LoadLibrary(os.path.join(os.path.dirname(__file__), "src/libgomonte.so"))
estimate_pi = lib.EstimatePi
lib.EstimatePi.restype = c_double
lib.EstimatePi.argtypes = [c_int32, c_int32]
def estimate_pi(sims, needles):
return lib.EstimatePi(sims, needles)
|
{
"content_hash": "00b3a1de0e1bd20ed3f3439061837717",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 84,
"avg_line_length": 32.1,
"alnum_prop": 0.7414330218068536,
"repo_name": "domluna/fun_with_ffi",
"id": "d9855ead81c32e35e4be8f97888c2a0c4f25c649",
"size": "321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monte_go/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1322"
},
{
"name": "Go",
"bytes": "913"
},
{
"name": "Python",
"bytes": "2436"
},
{
"name": "Rust",
"bytes": "1125"
}
],
"symlink_target": ""
}
|
import smbl
|
{
"content_hash": "1fa606abbccd32eaa232617805883c1a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 11,
"avg_line_length": 12,
"alnum_prop": 0.8333333333333334,
"repo_name": "karel-brinda/smbl",
"id": "610a76d8eeb4aecec8e9a2e68a712e3b1b56f190",
"size": "12",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smbl/utils/clean.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60847"
},
{
"name": "Shell",
"bytes": "2199"
}
],
"symlink_target": ""
}
|
"""``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler:
.. testcode::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
could be written with ``gen`` as:
.. testcode::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished:
.. testcode::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. testoutput::
:hide:
If the `~functools.singledispatch` library is available (standard in
Python 3.4, available via the `singledispatch
<https://pypi.python.org/pypi/singledispatch>`_ package on older
versions), additional types of objects may be yielded. Tornado includes
support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
See the `convert_yielded` function to extend this mechanism.
.. versionchanged:: 3.2
Dict support added.
.. versionchanged:: 4.1
Support added for yielding ``asyncio`` Futures and Twisted Deferreds
via ``singledispatch``.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import os
import sys
import textwrap
import types
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.util import PY3, raise_exc_info
try:
try:
from functools import singledispatch # py34+
except ImportError:
from singledispatch import singledispatch # backport
except ImportError:
# In most cases, singledispatch is required (to avoid
# difficult-to-diagnose problems in which the functionality
# available differs depending on which invisble packages are
# installed). However, in Google App Engine third-party
# dependencies are more trouble so we allow this module to be
# imported without it.
if 'APPENGINE_RUNTIME' not in os.environ:
raise
singledispatch = None
try:
try:
from collections.abc import Generator as GeneratorType # py35+
except ImportError:
from backports_abc import Generator as GeneratorType
try:
from inspect import isawaitable # py35+
except ImportError:
from backports_abc import isawaitable
except ImportError:
if 'APPENGINE_RUNTIME' not in os.environ:
raise
from types import GeneratorType
def isawaitable(x):
return False
if PY3:
import builtins
else:
import __builtin__ as builtins
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def _value_from_stopiteration(e):
try:
# StopIteration has a value attribute beginning in py33.
# So does our Return class.
return e.value
except AttributeError:
pass
try:
# Cython backports coroutine functionality by putting the value in
# e.args[0].
return e.args[0]
except (AttributeError, IndexError):
return None
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
information will be stored in the `.Future` object. You must
examine the result of the `.Future` object, or the exception
may go unnoticed by your code. This means yielding the function
if called from another coroutine, using something like
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
# On Python 3.5, set the coroutine flag on our generator, to allow it
# to be used with 'await'.
if hasattr(types, 'coroutine'):
func = types.coroutine(func)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = TracebackFuture()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = _value_from_stopiteration(e)
except Exception:
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(_value_from_stopiteration(e))
except Exception:
future.set_exc_info(sys.exc_info())
else:
Runner(result, future, yielded)
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
# Cython recognizes subclasses of StopIteration with a .args tuple.
self.args = (value,)
class WaitIterator(object):
"""Provides an iterator to yield the results of futures as they finish.
Yielding a set of futures like this:
``results = yield [future1, future2]``
pauses the coroutine until both ``future1`` and ``future2``
return, and then restarts the coroutine with the results of both
futures. If either future is an exception, the expression will
raise that exception and all the results will be lost.
If you need to get the result of each future as soon as possible,
or if you need the result of some futures even if others produce
errors, you can use ``WaitIterator``::
wait_iterator = gen.WaitIterator(future1, future2)
while not wait_iterator.done():
try:
result = yield wait_iterator.next()
except Exception as e:
print("Error {} from {}".format(e, wait_iterator.current_future))
else:
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
Because results are returned as soon as they are available the
output from the iterator *will not be in the same order as the
input arguments*. If you need to know which future produced the
current result, you can use the attributes
``WaitIterator.current_future``, or ``WaitIterator.current_index``
to get the index of the future from the input list. (if keyword
arguments were used in the construction of the `WaitIterator`,
``current_index`` will use the corresponding keyword).
On Python 3.5, `WaitIterator` implements the async iterator
protocol, so it can be used with the ``async for`` statement (note
that in this version the entire iteration is aborted if any value
raises an exception, while the previous example can continue past
individual errors)::
async for result in gen.WaitIterator(future1, future2):
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
.. versionadded:: 4.1
.. versionchanged:: 4.3
Added ``async for`` support in Python 3.5.
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError(
"You must provide args or kwargs, not both")
if kwargs:
self._unfinished = dict((f, k) for (k, f) in kwargs.items())
futures = list(kwargs.values())
else:
self._unfinished = dict((f, i) for (i, f) in enumerate(args))
futures = args
self._finished = collections.deque()
self.current_index = self.current_future = None
self._running_future = None
for future in futures:
future.add_done_callback(self._done_callback)
def done(self):
"""Returns True if this iterator has no more results."""
if self._finished or self._unfinished:
return False
# Clear the 'current' values when iteration is done.
self.current_index = self.current_future = None
return True
def next(self):
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = TracebackFuture()
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future
def _done_callback(self, done):
if self._running_future and not self._running_future.done():
self._return_result(done)
else:
self._finished.append(done)
def _return_result(self, done):
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
@coroutine
def __aiter__(self):
raise Return(self)
def __anext__(self):
if self.done():
# Lookup by name to silence pyflakes on older versions.
raise getattr(builtins, 'StopAsyncIteration')()
return self.next()
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = Future()
def handle_exception(typ, value, tb):
if future.done():
return False
future.set_exc_info((typ, value, tb))
return True
def set_result(result):
if future.done():
return
future.set_result(result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
"""Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result_fn = self.future.result
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result_fn()
def _contains_yieldpoint(children):
"""Returns True if ``children`` contains any YieldPoints.
``children`` may be a dict or a list, as used by `MultiYieldPoint`
and `multi_future`.
"""
if isinstance(children, dict):
return any(isinstance(i, YieldPoint) for i in children.values())
if isinstance(children, list):
return any(isinstance(i, YieldPoint) for i in children)
return False
def multi(children, quiet_exceptions=()):
"""Runs multiple asynchronous operations in parallel.
``children`` may either be a list or a dict whose values are
yieldable objects. ``multi()`` returns a new yieldable
object that resolves to a parallel structure containing their
results. If ``children`` is a list, the result is a list of
results in the same order; if it is a dict, the result is a dict
with the same keys.
That is, ``results = yield multi(list_of_futures)`` is equivalent
to::
results = []
for future in list_of_futures:
results.append(yield future)
If any children raise exceptions, ``multi()`` will raise the first
one. All others will be logged, unless they are of types
contained in the ``quiet_exceptions`` argument.
If any of the inputs are `YieldPoints <YieldPoint>`, the returned
yieldable object is a `YieldPoint`. Otherwise, returns a `.Future`.
This means that the result of `multi` can be used in a native
coroutine if and only if all of its children can be.
In a ``yield``-based coroutine, it is not normally necessary to
call this function directly, since the coroutine runner will
do it automatically when a list or dict is yielded. However,
it is necessary in ``await``-based coroutines, or to pass
the ``quiet_exceptions`` argument.
This function is available under the names ``multi()`` and ``Multi()``
for historical reasons.
.. versionchanged:: 4.2
If multiple yieldables fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. versionchanged:: 4.3
Replaced the class ``Multi`` and the function ``multi_future``
with a unified function ``multi``. Added support for yieldables
other than `YieldPoint` and `.Future`.
"""
if _contains_yieldpoint(children):
return MultiYieldPoint(children, quiet_exceptions=quiet_exceptions)
else:
return multi_future(children, quiet_exceptions=quiet_exceptions)
Multi = multi
class MultiYieldPoint(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
This class is similar to `multi`, but it always creates a stack
context even when no children require it. It is not compatible with
native coroutines.
.. versionchanged:: 4.2
If multiple ``YieldPoints`` fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. versionchanged:: 4.3
Renamed from ``Multi`` to ``MultiYieldPoint``. The name ``Multi``
remains as an alias for the equivalent `multi` function.
.. deprecated:: 4.3
Use `multi` instead.
"""
def __init__(self, children, quiet_exceptions=()):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if not isinstance(i, YieldPoint):
i = convert_yielded(i)
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
self.quiet_exceptions = quiet_exceptions
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result_list = []
exc_info = None
for f in self.children:
try:
result_list.append(f.get_result())
except Exception as e:
if exc_info is None:
exc_info = sys.exc_info()
else:
if not isinstance(e, self.quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
if exc_info is not None:
raise_exc_info(exc_info)
if self.keys is not None:
return dict(zip(self.keys, result_list))
else:
return list(result_list)
def multi_future(children, quiet_exceptions=()):
"""Wait for multiple asynchronous futures in parallel.
This function is similar to `multi`, but does not support
`YieldPoints <YieldPoint>`.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. deprecated:: 4.3
Use `multi` instead.
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
children = list(map(convert_yielded, children))
assert all(is_future(i) for i in children)
unfinished_children = set(children)
future = Future()
if not children:
future.set_result({} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
result_list = []
for f in children:
try:
result_list.append(f.result())
except Exception as e:
if future.done():
if not isinstance(e, quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
else:
future.set_exc_info(sys.exc_info())
if not future.done():
if keys is not None:
future.set_result(dict(zip(keys, result_list)))
else:
future.set_result(result_list)
listening = set()
for f in children:
if f not in listening:
listening.add(f)
f.add_done_callback(callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
.. deprecated:: 4.3
This function only handles ``Futures``, not other yieldable objects.
Instead of `maybe_future`, check for the non-future result types
you expect (often just ``None``), and ``yield`` anything unknown.
"""
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error("Exception in Future %r after timeout",
future, exc_info=True)
def timeout_callback():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
def sleep(duration):
"""Return a `.Future` that resolves after the given number of seconds.
When used with ``yield`` in a coroutine, this is a non-blocking
analogue to `time.sleep` (which should not be used in coroutines
because it is blocking)::
yield gen.sleep(0.5)
Note that calling this function on its own does nothing; you must
wait on the `.Future` it returns (usually by yielding it).
.. versionadded:: 4.1
"""
f = Future()
IOLoop.current().call_later(duration, lambda: f.set_result(None))
return f
_null_future = Future()
_null_future.set_result(None)
moment = Future()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
"""
moment.set_result(None)
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
exc_info = None
try:
value = future.result()
except Exception:
self.had_exception = True
exc_info = sys.exc_info()
if exc_info is not None:
yielded = self.gen.throw(*exc_info)
exc_info = None
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(_value_from_stopiteration(e))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
finally:
self.running = False
def handle_yield(self, yielded):
# Lists containing YieldPoints require stack contexts;
# other lists are handled in convert_yielded.
if _contains_yieldpoint(yielded):
yielded = multi(yielded)
if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism.
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
else:
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
# Convert Awaitables into Futures. It is unfortunately possible
# to have infinite recursion here if those Awaitables assume that
# we're using a different coroutine runner and yield objects
# we don't understand. If that happens, the solution is to
# register that runner's yieldable objects with convert_yielded.
if sys.version_info >= (3, 3):
exec(textwrap.dedent("""
@coroutine
def _wrap_awaitable(x):
if hasattr(x, '__await__'):
x = x.__await__()
return (yield from x)
"""))
else:
# Py2-compatible version for use with Cython.
# Copied from PEP 380.
@coroutine
def _wrap_awaitable(x):
if hasattr(x, '__await__'):
_i = x.__await__()
else:
_i = iter(x)
try:
_y = next(_i)
except StopIteration as _e:
_r = _value_from_stopiteration(_e)
else:
while 1:
try:
_s = yield _y
except GeneratorExit as _e:
try:
_m = _i.close
except AttributeError:
pass
else:
_m()
raise _e
except BaseException as _e:
_x = sys.exc_info()
try:
_m = _i.throw
except AttributeError:
raise _e
else:
try:
_y = _m(*_x)
except StopIteration as _e:
_r = _value_from_stopiteration(_e)
break
else:
try:
if _s is None:
_y = next(_i)
else:
_y = _i.send(_s)
except StopIteration as _e:
_r = _value_from_stopiteration(_e)
break
raise Return(_r)
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled earlier.
if isinstance(yielded, (list, dict)):
return multi(yielded)
elif is_future(yielded):
return yielded
elif isawaitable(yielded):
return _wrap_awaitable(yielded)
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded)
try:
# If we can import t.p.asyncio, do it for its side effect
# (registering asyncio.Future with convert_yielded).
# It's ugly to do this here, but it prevents a cryptic
# infinite recursion in _wrap_awaitable.
# Note that even with this, asyncio integration is unlikely
# to work unless the application also configures AsyncIOLoop,
# but at least the error messages in that case are more
# comprehensible than a stack overflow.
import tornado.platform.asyncio
except ImportError:
pass
else:
# Reference the imported module to make pyflakes happy.
tornado
|
{
"content_hash": "3484f60470b0056cd130712e99411361",
"timestamp": "",
"source": "github",
"line_count": 1241,
"max_line_length": 82,
"avg_line_length": 35.519742143432715,
"alnum_prop": 0.6073049001814882,
"repo_name": "arthurdarcet/tornado",
"id": "29e8cb50b53861893522b5482e16c13a0dad260f",
"size": "44080",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tornado/gen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1078"
},
{
"name": "CSS",
"bytes": "7736"
},
{
"name": "HTML",
"bytes": "12417"
},
{
"name": "JavaScript",
"bytes": "6073"
},
{
"name": "Python",
"bytes": "1512121"
},
{
"name": "Ruby",
"bytes": "1733"
},
{
"name": "Shell",
"bytes": "4881"
}
],
"symlink_target": ""
}
|
from pos_parameters import filename_parameter, value_parameter, \
string_parameter, list_parameter,\
vector_parameter
import pos_wrappers
class preprocess_slice_volume(pos_wrappers.generic_wrapper):
_template = """pos_slice_volume \
-i {input_image} \
-o "{output_naming}" \
-s {slicing_plane} \
-r {start_slice} {end_slice} {step} \
{shift_indexes}"""
_parameters = { \
'input_image' : filename_parameter('input_image', None),
'output_naming' : filename_parameter('output_naming', None),
'slicing_plane' : value_parameter('slicing_plane', 1),
'start_slice' : value_parameter('start_slice', None),
'end_slice' : value_parameter('end_slice', None),
'step' : value_parameter('step', 1),
'shift_indexes' : value_parameter('output-filenames-offset', None, str_template="--{_name} {_value}"),
'output_dir' : string_parameter('output_dir', None),
}
class blank_slice_deformation_wrapper(pos_wrappers.generic_wrapper):
_template = """c{dimension}d {input_image} -scale 0 -dup -omc {dimension} {output_image}"""
_parameters = {\
'dimension' : value_parameter('dimension', 2),
'input_image' : filename_parameter('input_image', None),
'output_image' : filename_parameter('output_image', None),
}
class convert_slice_parent(pos_wrappers.generic_wrapper):
_template = """ -- stub -- """
_parameters = {
'dimension' : value_parameter('dimension', 2),
'input_image' : filename_parameter('input_image', None),
'output_image' : filename_parameter('output_image', None),
'scaling' : value_parameter('scaling', None, "-scale {_value}"),
'spacing' : vector_parameter('spacing', None, '-spacing {_list}mm')
}
class convert_slice_image(convert_slice_parent):
_template = """c{dimension}d -mcs {input_image}\
-foreach {spacing} {scaling} -endfor \
-omc {dimension} {output_image}"""
class convert_slice_image_grayscale(convert_slice_parent):
_template = """c{dimension}d {input_image}\
{spacing} {scaling}\
-o {output_image}"""
|
{
"content_hash": "bc67f726b91f805c8171b3c1455b2745",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 114,
"avg_line_length": 41,
"alnum_prop": 0.5651808242220353,
"repo_name": "pmajka/poSSum",
"id": "2ac70d56019dfb540f223cc39c1725fcef6a56e6",
"size": "2378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "possum/pos_deformable_wrappers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "10113"
},
{
"name": "Python",
"bytes": "685592"
},
{
"name": "Shell",
"bytes": "130154"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('inputs')
class PluginInputs(object):
"""
Allows the same input plugin to be configured multiple times in a task.
Example::
inputs:
- rss: http://feeda.com
- rss: http://feedb.com
"""
schema = {
'type': 'array',
'items': {'allOf': [{'$ref': '/schema/plugins?phase=input'}, {'maxProperties': 1, 'minProperties': 1}]}
}
def on_task_input(self, task, config):
entries = []
entry_titles = set()
entry_urls = set()
for item in config:
for input_name, input_config in item.iteritems():
input = plugin.get_plugin_by_name(input_name)
if input.api_ver == 1:
raise plugin.PluginError('Plugin %s does not support API v2' % input_name)
method = input.phase_handlers['input']
try:
result = method(task, input_config)
except plugin.PluginError as e:
log.warning('Error during input plugin %s: %s' % (input_name, e))
continue
if not result:
msg = 'Input %s did not return anything' % input_name
if getattr(task, 'no_entries_ok', False):
log.verbose(msg)
else:
log.warning(msg)
continue
for entry in result:
if entry['title'] in entry_titles:
log.debug('Title `%s` already in entry list, skipping.' % entry['title'])
continue
urls = ([entry['url']] if entry.get('url') else []) + entry.get('urls', [])
if any(url in entry_urls for url in urls):
log.debug('URL for `%s` already in entry list, skipping.' % entry['title'])
continue
entries.append(entry)
entry_titles.add(entry['title'])
entry_urls.update(urls)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(PluginInputs, 'inputs', api_ver=2)
|
{
"content_hash": "9ecc32f588511e36021ac946129ced7b",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 111,
"avg_line_length": 36.03076923076923,
"alnum_prop": 0.5106746370623398,
"repo_name": "voriux/Flexget",
"id": "80b61bc17412849de1b5a4d147b24b7b508d3259",
"size": "2342",
"binary": false,
"copies": "14",
"ref": "refs/heads/develop",
"path": "flexget/plugins/input/inputs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "1849035"
}
],
"symlink_target": ""
}
|
"""yaml_config_loader setup file."""
from setuptools import setup
setup(
name='yaml-config-loader',
version='0.1.0',
description='Yaml configuration loader',
maintainer='Greg Leeper',
maintainer_email='gleeper@google.com',
url='https://github.com/google/python-spanner-orm',
packages=['yaml_config_loader'],
install_requires=['pyyaml', 'jinja2'])
|
{
"content_hash": "9d8543cf05b32bc0f89e6505a7309a6d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6842105263157895,
"repo_name": "google/python-yaml-config",
"id": "ff0bdb8cfc637354326ce0ac0d689e4006d35c3e",
"size": "961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13315"
}
],
"symlink_target": ""
}
|
"""
Tools for visualizing dependencies between Terms.
"""
from __future__ import unicode_literals
from contextlib import contextmanager
import errno
from functools import partial
from io import BytesIO
from subprocess import Popen, PIPE
from networkx import topological_sort
from six import iteritems
from zipline.pipeline.data import BoundColumn
from zipline.pipeline import Filter, Factor, Classifier, Term
from zipline.pipeline.term import AssetExists
class NoIPython(Exception):
pass
def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]])
quote = partial(delimit, '""')
bracket = partial(delimit, '[]')
def begin_graph(f, name, **attrs):
writeln(f, "strict digraph %s {" % name)
writeln(f, "graph {}".format(format_attrs(attrs)))
def begin_cluster(f, name, **attrs):
attrs.setdefault("label", quote(name))
writeln(f, "subgraph cluster_%s {" % name)
writeln(f, "graph {}".format(format_attrs(attrs)))
def end_graph(f):
writeln(f, '}')
@contextmanager
def graph(f, name, **attrs):
begin_graph(f, name, **attrs)
yield
end_graph(f)
@contextmanager
def cluster(f, name, **attrs):
begin_cluster(f, name, **attrs)
yield
end_graph(f)
def roots(g):
"Get nodes from graph G with indegree 0"
return set(n for n, d in iteritems(g.in_degree()) if d == 0)
def filter_nodes(include_asset_exists, nodes):
if include_asset_exists:
return nodes
return filter(lambda n: n is not AssetExists(), nodes)
def _render(g, out, format_, include_asset_exists=False):
"""
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes.
"""
graph_attrs = {'rankdir': 'TB', 'splines': 'ortho'}
cluster_attrs = {'style': 'filled', 'color': 'lightgoldenrod1'}
in_nodes = g.loadable_terms
out_nodes = list(g.outputs.values())
f = BytesIO()
with graph(f, "G", **graph_attrs):
# Write outputs cluster.
with cluster(f, 'Output', labelloc='b', **cluster_attrs):
for term in filter_nodes(include_asset_exists, out_nodes):
add_term_node(f, term)
# Write inputs cluster.
with cluster(f, 'Input', **cluster_attrs):
for term in filter_nodes(include_asset_exists, in_nodes):
add_term_node(f, term)
# Write intermediate results.
for term in filter_nodes(include_asset_exists,
topological_sort(g.graph)):
if term in in_nodes or term in out_nodes:
continue
add_term_node(f, term)
# Write edges
for source, dest in g.graph.edges():
if source is AssetExists() and not include_asset_exists:
continue
add_edge(f, id(source), id(dest))
cmd = ['dot', '-T', format_]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError(
"Couldn't find `dot` graph layout program. "
"Make sure Graphviz is installed and `dot` is on your path."
)
else:
raise
f.seek(0)
proc_stdout, proc_stderr = proc.communicate(f.read())
if proc_stderr:
raise RuntimeError(
"Error(s) while rendering graph: %s" % proc_stderr.decode('utf-8')
)
out.write(proc_stdout)
def display_graph(g, format='svg', include_asset_exists=False):
"""
Display a TermGraph interactively from within IPython.
"""
try:
import IPython.display as display
except ImportError:
raise NoIPython("IPython is not installed. Can't display graph.")
if format == 'svg':
display_cls = display.SVG
elif format in ("jpeg", "png"):
display_cls = partial(display.Image, format=format, embed=True)
out = BytesIO()
_render(g, out, format, include_asset_exists=include_asset_exists)
return display_cls(data=out.getvalue())
def writeln(f, s):
f.write((s + '\n').encode('utf-8'))
def fmt(obj):
if isinstance(obj, Term):
if hasattr(obj, 'short_repr'):
r = obj.short_repr()
else:
r = type(obj).__name__
else:
r = obj
return '"%s"' % r
def add_term_node(f, term):
declare_node(f, id(term), attrs_for_node(term))
def declare_node(f, name, attributes):
writeln(f, "{0} {1};".format(name, format_attrs(attributes)))
def add_edge(f, source, dest):
writeln(f, "{0} -> {1};".format(source, dest))
def attrs_for_node(term, **overrides):
attrs = {
'shape': 'box',
'colorscheme': 'pastel19',
'style': 'filled',
'label': fmt(term),
}
if isinstance(term, BoundColumn):
attrs['fillcolor'] = '1'
if isinstance(term, Factor):
attrs['fillcolor'] = '2'
elif isinstance(term, Filter):
attrs['fillcolor'] = '3'
elif isinstance(term, Classifier):
attrs['fillcolor'] = '4'
attrs.update(**overrides or {})
return attrs
def format_attrs(attrs):
"""
Format key, value pairs from attrs into graphviz attrs format
Example
-------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]'
"""
if not attrs:
return ''
entries = ['='.join((key, value)) for key, value in iteritems(attrs)]
return '[' + ', '.join(entries) + ']'
|
{
"content_hash": "053138a5107053e057398c5b24caa9ee",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 78,
"avg_line_length": 26.369565217391305,
"alnum_prop": 0.5929101401483924,
"repo_name": "florentchandelier/zipline",
"id": "fdc34b70af9f008ee7cd9562b97ef75832d9929f",
"size": "6065",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "zipline/pipeline/visualize.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7014"
},
{
"name": "Dockerfile",
"bytes": "2480"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Jupyter Notebook",
"bytes": "162383"
},
{
"name": "PowerShell",
"bytes": "3269"
},
{
"name": "Python",
"bytes": "3677457"
},
{
"name": "Shell",
"bytes": "7420"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.